16
#include <glusterfs/compat-uuid.h>
19
#include <glusterfs/list.h>
20
#include <glusterfs/dict.h>
21
#include <glusterfs/compat-errno.h>
22
#include <glusterfs/statedump.h>
23
#include "glusterd-op-sm.h"
24
#include "glusterd-utils.h"
25
#include "glusterd-store.h"
26
#include "glusterd-locks.h"
27
#include "glusterd-quota.h"
28
#include <glusterfs/syscall.h>
29
#include "glusterd-snapshot-utils.h"
30
#include "glusterd-svc-mgmt.h"
31
#include "glusterd-svc-helper.h"
32
#include "glusterd-shd-svc-helper.h"
33
#include "glusterd-shd-svc.h"
34
#include "glusterd-quotad-svc.h"
35
#include "glusterd-server-quorum.h"
39
#include "glusterd-gfproxyd-svc-helper.h"
41
#define len_strcmp(key, len, str) \
42
((len == SLEN(str)) && (strcmp(key, str) == 0))
44
extern char local_node_hostname[PATH_MAX];
46
glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
57
const glusterd_all_vol_opts valid_all_vol_opts[] = {
58
{GLUSTERD_QUORUM_RATIO_KEY, "51"},
59
{GLUSTERD_SHARED_STORAGE_KEY, "disable"},
61
{GLUSTERD_GLOBAL_OP_VERSION_KEY, "BUG_NO_OP_VERSION"},
68
{GLUSTERD_MAX_OP_VERSION_KEY, "BUG_NO_MAX_OP_VERSION"},
69
{GLUSTERD_BRICK_MULTIPLEX_KEY, "disable"},
75
{GLUSTERD_BRICKMUX_LIMIT_KEY, GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE},
76
{GLUSTERD_VOL_CNT_PER_THRD, GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE},
77
{GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
78
{GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
79
{GLUSTER_BRICK_GRACEFUL_CLEANUP, "disable"},
83
static struct cds_list_head gd_op_sm_queue;
84
synclock_t gd_op_sm_lock;
85
glusterd_op_info_t opinfo = {
90
glusterd_txn_opinfo_dict_init(void)
93
xlator_t *this = THIS;
94
glusterd_conf_t *priv = NULL;
99
priv->glusterd_txn_opinfo = dict_new();
100
if (!priv->glusterd_txn_opinfo) {
101
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
106
memset(priv->global_txn_id, '\0', sizeof(uuid_t));
114
glusterd_txn_opinfo_dict_fini(void)
116
glusterd_conf_t *priv = NULL;
118
priv = THIS->private;
121
if (priv->glusterd_txn_opinfo)
122
dict_unref(priv->glusterd_txn_opinfo);
126
glusterd_txn_opinfo_init(glusterd_op_info_t *opinfo,
127
glusterd_op_sm_state_t state, int *op, dict_t *op_ctx,
128
rpcsvc_request_t *req)
130
glusterd_conf_t *conf = NULL;
134
conf = THIS->private;
138
opinfo->state = state;
144
opinfo->op_ctx = dict_ref(op_ctx);
146
opinfo->op_ctx = NULL;
151
opinfo->txn_generation = conf->generation;
158
glusterd_generate_txn_id(dict_t *dict, uuid_t **txn_id)
161
xlator_t *this = THIS;
165
*txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
167
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
171
gf_uuid_generate(**txn_id);
173
ret = dict_set_bin(dict, "transaction_id", *txn_id, sizeof(**txn_id));
175
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
176
"Failed to set transaction id.");
180
gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(**txn_id));
182
if (ret && *txn_id) {
191
glusterd_get_txn_opinfo(uuid_t *txn_id, glusterd_op_info_t *opinfo)
194
glusterd_txn_opinfo_obj *opinfo_obj = NULL;
195
glusterd_conf_t *priv = NULL;
196
xlator_t *this = THIS;
198
priv = this->private;
201
if (!txn_id || !opinfo) {
202
gf_msg_callingfn(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
203
"Empty transaction id or opinfo received.");
208
ret = dict_get_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
209
(void **)&opinfo_obj);
213
(*opinfo) = opinfo_obj->opinfo;
215
gf_msg_debug(this->name, 0,
216
"Successfully got opinfo for transaction ID : %s",
221
gf_msg_debug(this->name, 0, "Returning %d", ret);
226
glusterd_set_txn_opinfo(uuid_t *txn_id, glusterd_op_info_t *opinfo)
229
glusterd_txn_opinfo_obj *opinfo_obj = NULL;
230
glusterd_conf_t *priv = NULL;
231
xlator_t *this = THIS;
233
priv = this->private;
237
gf_msg_callingfn(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
238
"Empty transaction id received.");
243
ret = dict_get_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
244
(void **)&opinfo_obj);
246
opinfo_obj = GF_CALLOC(1, sizeof(glusterd_txn_opinfo_obj),
247
gf_common_mt_txn_opinfo_obj_t);
253
ret = dict_set_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
254
opinfo_obj, sizeof(glusterd_txn_opinfo_obj));
256
gf_msg_callingfn(this->name, GF_LOG_ERROR, -ret,
257
GD_MSG_DICT_SET_FAILED,
258
"Unable to set opinfo for transaction"
265
opinfo_obj->opinfo = (*opinfo);
267
gf_msg_debug(this->name, 0,
268
"Successfully set opinfo for transaction ID : %s",
276
gf_msg_debug(this->name, 0, "Returning %d", ret);
281
glusterd_clear_txn_opinfo(uuid_t *txn_id)
284
glusterd_op_info_t txn_op_info = {
287
glusterd_conf_t *priv = NULL;
288
xlator_t *this = THIS;
290
priv = this->private;
294
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
295
"Empty transaction id received.");
300
ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
302
gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
303
GD_MSG_TRANS_OPINFO_GET_FAIL,
304
"Unable to get transaction opinfo "
305
"for transaction ID : %s",
310
if (txn_op_info.op_ctx)
311
dict_unref(txn_op_info.op_ctx);
313
dict_del(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id));
315
gf_msg_debug(this->name, 0,
316
"Successfully cleared opinfo for transaction ID : %s",
321
gf_msg_debug(this->name, 0, "Returning %d", ret);
325
static int glusterfs_port = GLUSTERD_DEFAULT_PORT;
326
static char *glusterd_op_sm_state_names[] = {
339
"Brick op Committed",
340
"Brick op Commit failed",
345
static char *glusterd_op_sm_event_names[] = {
346
"GD_OP_EVENT_NONE", "GD_OP_EVENT_START_LOCK",
347
"GD_OP_EVENT_LOCK", "GD_OP_EVENT_RCVD_ACC",
348
"GD_OP_EVENT_ALL_ACC", "GD_OP_EVENT_STAGE_ACC",
349
"GD_OP_EVENT_COMMIT_ACC", "GD_OP_EVENT_RCVD_RJT",
350
"GD_OP_EVENT_STAGE_OP", "GD_OP_EVENT_COMMIT_OP",
351
"GD_OP_EVENT_UNLOCK", "GD_OP_EVENT_START_UNLOCK",
352
"GD_OP_EVENT_ALL_ACK", "GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP",
353
"GD_OP_EVENT_INVALID"};
356
glusterd_op_sm_state_name_get(int state)
358
if (state < 0 || state >= GD_OP_STATE_MAX)
359
return glusterd_op_sm_state_names[GD_OP_STATE_MAX];
360
return glusterd_op_sm_state_names[state];
364
glusterd_op_sm_event_name_get(int event)
366
if (event < 0 || event >= GD_OP_EVENT_MAX)
367
return glusterd_op_sm_event_names[GD_OP_EVENT_MAX];
368
return glusterd_op_sm_event_names[event];
372
glusterd_destroy_lock_ctx(glusterd_op_lock_ctx_t *ctx)
380
glusterd_set_volume_status(glusterd_volinfo_t *volinfo,
381
glusterd_volume_status status)
384
volinfo->status = status;
388
glusterd_op_sm_inject_all_acc(uuid_t *txn_id)
391
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, txn_id, NULL);
392
gf_msg_debug("glusterd", 0, "Returning %d", ret);
397
glusterd_check_bitrot_cmd(char *key, const int keylen, char *errstr,
402
if (len_strcmp(key, keylen, "bitrot") ||
403
len_strcmp(key, keylen, "features.bitrot")) {
404
snprintf(errstr, size,
405
" 'gluster volume set <VOLNAME> %s' is invalid command."
406
" Use 'gluster volume bitrot <VOLNAME> {enable|disable}'"
410
} else if (len_strcmp(key, keylen, "scrub-freq") ||
411
len_strcmp(key, keylen, "features.scrub-freq")) {
412
snprintf(errstr, size,
413
" 'gluster volume set <VOLNAME> %s' is invalid command."
414
" Use 'gluster volume bitrot <VOLNAME> scrub-frequency"
415
" {hourly|daily|weekly|biweekly|monthly}' instead.",
418
} else if (len_strcmp(key, keylen, "scrub") ||
419
len_strcmp(key, keylen, "features.scrub")) {
420
snprintf(errstr, size,
421
" 'gluster volume set <VOLNAME> %s' is invalid command."
422
" Use 'gluster volume bitrot <VOLNAME> scrub {pause|resume}'"
426
} else if (len_strcmp(key, keylen, "scrub-throttle") ||
427
len_strcmp(key, keylen, "features.scrub-throttle")) {
428
snprintf(errstr, size,
429
" 'gluster volume set <VOLNAME> %s' is invalid command."
430
" Use 'gluster volume bitrot <VOLNAME> scrub-throttle "
431
" {lazy|normal|aggressive}' instead.",
442
glusterd_check_quota_cmd(char *key, const int keylen, char *value, char *errstr,
446
gf_boolean_t b = _gf_false;
448
if (len_strcmp(key, keylen, "quota") ||
449
len_strcmp(key, keylen, "features.quota")) {
450
ret = gf_string2boolean(value, &b);
455
snprintf(errstr, size,
456
" 'gluster volume set <VOLNAME> %s %s' is deprecated."
457
" Use 'gluster volume quota <VOLNAME> enable' instead.",
460
snprintf(errstr, size,
461
" 'gluster volume set <VOLNAME> %s %s' is deprecated."
462
" Use 'gluster volume quota <VOLNAME> disable' instead.",
466
} else if (len_strcmp(key, keylen, "inode-quota") ||
467
len_strcmp(key, keylen, "features.inode-quota")) {
468
ret = gf_string2boolean(value, &b);
475
" 'gluster volume set <VOLNAME> %s %s' is deprecated."
476
" Use 'gluster volume inode-quota <VOLNAME> enable' instead.",
482
snprintf(errstr, size,
483
" 'gluster volume set <VOLNAME> %s %s' is deprecated."
484
" Use 'gluster volume quota <VOLNAME> disable' instead.",
496
glusterd_brick_op_build_payload(glusterd_op_t op,
497
glusterd_brickinfo_t *brickinfo,
498
gd1_mgmt_brick_op_req **req, dict_t *dict)
501
gd1_mgmt_brick_op_req *brick_req = NULL;
502
char *volname = NULL;
506
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
507
xlator_t *this = THIS;
508
glusterd_volinfo_t *volinfo = NULL;
510
GF_ASSERT(op < GD_OP_MAX);
511
GF_ASSERT(op > GD_OP_NONE);
515
case GD_OP_REMOVE_BRICK:
516
case GD_OP_STOP_VOLUME:
517
brick_req = GF_CALLOC(1, sizeof(*brick_req),
518
gf_gld_mt_mop_brick_req_t);
520
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
524
brick_req->op = GLUSTERD_BRICK_TERMINATE;
525
brick_req->name = brickinfo->path;
526
glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPING);
528
case GD_OP_PROFILE_VOLUME:
529
brick_req = GF_CALLOC(1, sizeof(*brick_req),
530
gf_gld_mt_mop_brick_req_t);
533
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
538
brick_req->op = GLUSTERD_BRICK_XLATOR_INFO;
539
brick_req->name = brickinfo->path;
542
case GD_OP_HEAL_VOLUME: {
543
brick_req = GF_CALLOC(1, sizeof(*brick_req),
544
gf_gld_mt_mop_brick_req_t);
546
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
551
brick_req->op = GLUSTERD_BRICK_XLATOR_OP;
552
brick_req->name = "";
553
ret = dict_get_int32(dict, "heal-op", (int32_t *)&heal_op);
555
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
556
"Key=heal-op", NULL);
559
ret = dict_set_int32_sizen(dict, "xl-op", heal_op);
561
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
566
case GD_OP_STATUS_VOLUME: {
567
brick_req = GF_CALLOC(1, sizeof(*brick_req),
568
gf_gld_mt_mop_brick_req_t);
570
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
574
brick_req->op = GLUSTERD_BRICK_STATUS;
575
brick_req->name = "";
576
ret = dict_set_str_sizen(dict, "brick-name", brickinfo->path);
578
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
579
"Key=brick-name", NULL);
583
case GD_OP_REBALANCE:
584
case GD_OP_DEFRAG_BRICK_VOLUME:
585
brick_req = GF_CALLOC(1, sizeof(*brick_req),
586
gf_gld_mt_mop_brick_req_t);
588
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
593
brick_req->op = GLUSTERD_BRICK_XLATOR_DEFRAG;
594
ret = dict_get_str(dict, "volname", &volname);
596
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
597
"Key=volname", NULL);
600
ret = glusterd_volinfo_find(volname, &volinfo);
602
gf_smsg(this->name, GF_LOG_ERROR, errno,
603
GD_MSG_VOLINFO_GET_FAIL, "Volume=%s", volname, NULL);
606
snprintf(name, sizeof(name), "%s-dht", volname);
607
brick_req->name = gf_strdup(name);
612
brick_req = GF_CALLOC(1, sizeof(*brick_req),
613
gf_gld_mt_mop_brick_req_t);
615
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
619
brick_req->op = GLUSTERD_BRICK_BARRIER;
620
brick_req->name = brickinfo->path;
628
brick_req->dict.dict_len = 0;
629
brick_req->dict.dict_val = NULL;
630
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
631
&brick_req->input.input_len);
633
gf_smsg(this->name, GF_LOG_ERROR, errno,
634
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
641
if (ret && brick_req)
643
gf_msg_debug(this->name, 0, "Returning %d", ret);
648
glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
652
gd1_mgmt_brick_op_req *brick_req = NULL;
653
char *volname = NULL;
655
GF_ASSERT(op < GD_OP_MAX);
656
GF_ASSERT(op > GD_OP_NONE);
658
xlator_t *this = THIS;
661
case GD_OP_PROFILE_VOLUME:
662
brick_req = GF_CALLOC(1, sizeof(*brick_req),
663
gf_gld_mt_mop_brick_req_t);
665
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
670
brick_req->op = GLUSTERD_NODE_PROFILE;
671
brick_req->name = "";
675
case GD_OP_STATUS_VOLUME:
676
brick_req = GF_CALLOC(1, sizeof(*brick_req),
677
gf_gld_mt_mop_brick_req_t);
679
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
684
brick_req->op = GLUSTERD_NODE_STATUS;
685
brick_req->name = "";
689
case GD_OP_SCRUB_STATUS:
690
case GD_OP_SCRUB_ONDEMAND:
691
brick_req = GF_CALLOC(1, sizeof(*brick_req),
692
gf_gld_mt_mop_brick_req_t);
694
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
699
brick_req->op = GLUSTERD_NODE_BITROT;
701
ret = dict_get_str(dict, "volname", &volname);
703
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
704
"Key=volname", NULL);
708
brick_req->name = gf_strdup(volname);
714
brick_req->dict.dict_len = 0;
715
brick_req->dict.dict_val = NULL;
716
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
717
&brick_req->input.input_len);
720
gf_smsg(this->name, GF_LOG_ERROR, errno,
721
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
729
if (ret && brick_req)
731
gf_msg_debug(this->name, 0, "Returning %d", ret);
736
glusterd_validate_quorum_options(xlator_t *this, char *fullkey, char *value,
741
volume_option_t *opt = NULL;
743
if (!glusterd_is_quorum_option(fullkey))
745
key = strchr(fullkey, '.');
747
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
752
opt = xlator_volume_option_get(this, key);
754
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, NULL);
758
ret = xlator_option_validate(this, key, value, opt, op_errstr);
764
glusterd_validate_brick_mx_options(xlator_t *this, char *fullkey, char *value,
775
glusterd_count_connected_peers(int32_t *count)
777
glusterd_peerinfo_t *peerinfo = NULL;
778
glusterd_conf_t *conf = NULL;
780
xlator_t *this = THIS;
782
conf = this->private;
783
GF_VALIDATE_OR_GOTO(this->name, conf, out);
784
GF_VALIDATE_OR_GOTO(this->name, count, out);
789
cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
792
if ((peerinfo->connected) &&
793
(peerinfo->state == GD_FRIEND_STATE_BEFRIENDED)) {
805
glusterd_validate_shared_storage(char *value, char *errstr)
810
char hook_script[PATH_MAX] = "";
811
xlator_t *this = THIS;
812
glusterd_conf_t *conf = NULL;
814
glusterd_volinfo_t *volinfo = NULL;
816
conf = this->private;
817
GF_VALIDATE_OR_GOTO(this->name, conf, out);
819
GF_VALIDATE_OR_GOTO(this->name, value, out);
820
GF_VALIDATE_OR_GOTO(this->name, errstr, out);
822
if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
823
snprintf(errstr, PATH_MAX,
824
"Invalid option(%s). Valid options "
825
"are 'enable' and 'disable'",
827
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
833
len = snprintf(hook_script, sizeof(hook_script),
834
"%s" GLUSTERD_SHRD_STRG_HOOK_SCRIPT, conf->workdir);
835
if ((len < 0) || (len >= sizeof(hook_script))) {
840
ret = sys_access(hook_script, R_OK | X_OK);
842
len = snprintf(errstr, PATH_MAX,
843
"The hook-script (%s) required "
844
"for this operation is not present. "
845
"Please install the hook-script "
849
strncpy(errstr, "<error>", PATH_MAX);
851
gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, "%s",
856
if (!strncmp(value, "disable", SLEN("disable"))) {
857
ret = dict_get_str(conf->opts, GLUSTERD_SHARED_STORAGE_KEY, &op);
858
if (ret || !strncmp(op, "disable", SLEN("disable"))) {
859
snprintf(errstr, PATH_MAX,
860
"Shared storage volume "
861
"does not exist. Please enable shared storage"
862
" for creating shared storage volume.");
863
gf_msg(this->name, GF_LOG_ERROR, 0,
864
GD_MSG_SHARED_STORAGE_DOES_NOT_EXIST, "%s", errstr);
871
ret = glusterd_volinfo_find(GLUSTER_SHARED_STORAGE, &volinfo);
873
snprintf(errstr, PATH_MAX,
874
"Shared storage volume(" GLUSTER_SHARED_STORAGE
875
") already exists.");
876
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_EXIST, "%s",
882
ret = glusterd_count_connected_peers(&count);
884
snprintf(errstr, PATH_MAX,
885
"Failed to calculate number of connected peers.");
886
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_COUNT_GET_FAIL, "%s",
892
snprintf(errstr, PATH_MAX,
893
"More than one node should "
894
"be up/present in the cluster to enable this option");
895
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INSUFFICIENT_UP_NODES, "%s",
906
glusterd_validate_localtime_logging(char *value, char *errstr)
909
xlator_t *this = THIS;
910
glusterd_conf_t *conf = NULL;
911
int already_enabled = 0;
913
conf = this->private;
914
GF_VALIDATE_OR_GOTO(this->name, conf, out);
915
GF_VALIDATE_OR_GOTO(this->name, value, out);
917
already_enabled = gf_log_get_localtime();
920
if (strcmp(value, "enable") == 0) {
921
gf_log_set_localtime(1);
922
if (!already_enabled)
923
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_ENABLE,
924
"localtime logging enable");
925
} else if (strcmp(value, "disable") == 0) {
926
gf_log_set_localtime(0);
928
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_DISABLE,
929
"localtime logging disable");
932
GF_VALIDATE_OR_GOTO(this->name, errstr, out);
933
snprintf(errstr, PATH_MAX,
934
"Invalid option(%s). Valid options "
935
"are 'enable' and 'disable'",
937
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
946
glusterd_validate_daemon_log_level(char *value, char *errstr)
949
xlator_t *this = THIS;
950
glusterd_conf_t *conf = NULL;
952
conf = this->private;
953
GF_VALIDATE_OR_GOTO(this->name, conf, out);
955
GF_VALIDATE_OR_GOTO(this->name, value, out);
959
if ((strcmp(value, "INFO")) && (strcmp(value, "WARNING")) &&
960
(strcmp(value, "DEBUG")) && (strcmp(value, "TRACE")) &&
961
(strcmp(value, "ERROR"))) {
963
GF_VALIDATE_OR_GOTO(this->name, errstr, out);
964
snprintf(errstr, PATH_MAX,
965
"Invalid option(%s). Valid options "
966
"are 'INFO' or 'WARNING' or 'ERROR' or 'DEBUG' or "
969
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
978
glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
981
char *volname = NULL;
984
char *key_fixed = NULL;
986
char *val_dup = NULL;
992
char *trash_path = NULL;
993
int trash_path_len = 0;
996
char errstr[PATH_MAX] = {
999
glusterd_volinfo_t *volinfo = NULL;
1000
glusterd_brickinfo_t *brickinfo = NULL;
1001
dict_t *val_dict = NULL;
1002
gf_boolean_t global_opt = _gf_false;
1003
gf_boolean_t key_matched = _gf_false;
1004
glusterd_volinfo_t *voliter = NULL;
1005
glusterd_conf_t *priv = NULL;
1006
xlator_t *this = THIS;
1007
uint32_t new_op_version = GD_OP_VERSION_MIN;
1008
uint32_t local_new_op_version = GD_OP_VERSION_MIN;
1009
uint32_t local_new_client_op_version = GD_OP_VERSION_MIN;
1010
uint32_t key_op_version = GD_OP_VERSION_MIN;
1011
uint32_t local_key_op_version = GD_OP_VERSION_MIN;
1012
gf_boolean_t origin_glusterd = _gf_true;
1013
gf_boolean_t check_op_version = _gf_true;
1014
gf_boolean_t trash_enabled = _gf_false;
1015
gf_boolean_t all_vol = _gf_false;
1016
struct volopt_map_entry *vmep = NULL;
1019
priv = this->private;
1026
origin_glusterd = is_origin_glusterd(dict);
1028
if (!origin_glusterd) {
1030
check_op_version = dict_get_str_boolean(dict, "check-op-version",
1033
if (check_op_version) {
1034
ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
1036
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1037
"Key=new-op-version", NULL);
1041
if ((new_op_version > GD_OP_VERSION_MAX) ||
1042
(new_op_version < GD_OP_VERSION_MIN)) {
1044
snprintf(errstr, sizeof(errstr),
1045
"Required op_version (%d) is not supported."
1046
" Max supported op version is %d",
1047
new_op_version, priv->op_version);
1048
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
1055
ret = dict_get_int32(dict, "count", &dict_count);
1057
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1058
"Count(dict),not set in Volume-Set");
1062
if (dict_count == 0) {
1064
if (dict_get_sizen(dict, "help")) {
1069
if (dict_get_sizen(dict, "help-xml")) {
1075
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MODULE_NOT_INSTALLED,
1076
"libxml not present in the system");
1077
*op_errstr = gf_strdup(
1078
"Error: xml libraries not present to produce xml-output");
1082
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
1083
"No options received ");
1084
*op_errstr = gf_strdup("Options not specified");
1089
ret = dict_get_str(dict, "volname", &volname);
1091
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1092
"Key=volname", NULL);
1096
if (strcasecmp(volname, "all") != 0) {
1097
ret = glusterd_volinfo_find(volname, &volinfo);
1099
snprintf(errstr, sizeof(errstr), FMTSTR_CHECK_VOL_EXISTS, volname);
1100
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
1101
FMTSTR_CHECK_VOL_EXISTS, volname);
1105
ret = glusterd_validate_volume_id(dict, volinfo);
1109
local_new_op_version = volinfo->op_version;
1110
local_new_client_op_version = volinfo->client_op_version;
1116
val_dict = dict_new();
1118
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
1122
for (count = 1; ret != 1; count++) {
1123
keystr_len = sprintf(keystr, "key%d", count);
1124
ret = dict_get_strn(dict, keystr, keystr_len, &key);
1126
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1127
"Key=%s", keystr, NULL);
1131
keystr_len = sprintf(keystr, "value%d", count);
1132
ret = dict_get_strn(dict, keystr, keystr_len, &value);
1134
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1135
"invalid key,value pair in 'volume set'");
1140
key_matched = _gf_false;
1141
keylen = strlen(key);
1142
if (len_strcmp(key, keylen, "config.memory-accounting")) {
1143
key_matched = _gf_true;
1144
gf_msg_debug(this->name, 0,
1145
"enabling memory accounting for volume %s", volname);
1147
} else if (len_strcmp(key, keylen, "config.transport")) {
1148
key_matched = _gf_true;
1149
gf_msg_debug(this->name, 0, "changing transport-type for volume %s",
1153
if (!((strcasecmp(value, "rdma") == 0) ||
1154
(strcasecmp(value, "tcp") == 0) ||
1155
(strcasecmp(value, "tcp,rdma") == 0) ||
1156
(strcasecmp(value, "rdma,tcp") == 0))) {
1157
ret = snprintf(errstr, sizeof(errstr),
1158
"transport-type %s does not exist", value);
1164
} else if (len_strcmp(key, keylen, "ganesha.enable")) {
1165
key_matched = _gf_true;
1166
if (strcmp(value, "off") == 0) {
1167
ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
1174
ret = glusterd_check_bitrot_cmd(key, keylen, errstr,
1178
ret = glusterd_check_quota_cmd(key, keylen, value, errstr,
1184
if (is_key_glusterd_hooks_friendly(key))
1187
ret = glusterd_volopt_validate(volinfo, dict, key, value, op_errstr);
1191
exists = glusterd_check_option_exists(key, &key_fixed);
1198
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
1199
"Option with name: %s does not exist", key);
1200
ret = snprintf(errstr, sizeof(errstr), "option : %s does not exist",
1203
snprintf(errstr + ret, sizeof(errstr) - ret,
1204
"\nDid you mean %s?", key_fixed);
1211
keylen = strlen(key_fixed);
1215
if (len_strcmp(key, keylen, "storage.linux-aio")) {
1216
if (volinfo && volinfo->status == GLUSTERD_STATUS_STARTED) {
1217
snprintf(errstr, sizeof(errstr),
1218
"Changing 'storage.linux-aio' is not"
1219
" supported when the volume is in started"
1220
" state. Please stop the volume first.");
1228
if (len_strcmp(key, keylen, "storage.linux-io_uring")) {
1229
if (volinfo && volinfo->status == GLUSTERD_STATUS_STARTED) {
1230
snprintf(errstr, sizeof(errstr),
1231
"Changing 'storage.linux-io_uring' is not"
1232
" supported when the volume is in started"
1233
" state. Please stop the volume first.");
1240
if (len_strcmp(key, keylen, "cluster.granular-entry-heal")) {
1246
if (volinfo && volinfo->status != GLUSTERD_STATUS_NONE &&
1247
(dict_get_sizen(dict, "is-special-key") == NULL)) {
1248
snprintf(errstr, sizeof(errstr),
1249
" 'gluster volume set <VOLNAME> %s {enable, disable}'"
1250
" is not supported."
1251
" Use 'gluster volume heal <VOLNAME> "
1252
"granular-entry-heal {enable, disable}' instead.",
1257
} else if (len_strcmp(key, keylen, GLUSTERD_GLOBAL_OP_VERSION_KEY)) {
1263
snprintf(errstr, sizeof(errstr),
1264
"Option \"%s\" is not valid for a single volume", key);
1272
snprintf(errstr, sizeof(errstr),
1273
"Option \"%s\" cannot be set along with other options",
1280
ret = gf_string2uint(value, &local_key_op_version);
1282
snprintf(errstr, sizeof(errstr),
1283
"invalid number format \"%s\" in option \"%s\"", value,
1285
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
1290
if (local_key_op_version > GD_OP_VERSION_MAX ||
1291
local_key_op_version < GD_OP_VERSION_MIN) {
1293
snprintf(errstr, sizeof(errstr),
1294
"Required op_version (%d) is not supported."
1295
" Max supported op version is %d",
1296
local_key_op_version, priv->op_version);
1297
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
1301
if (local_key_op_version > priv->op_version) {
1302
local_new_op_version = local_key_op_version;
1305
snprintf(errstr, sizeof(errstr),
1306
"Required op-version (%d) should"
1307
" not be equal or lower than current"
1308
" cluster op-version (%d).",
1309
local_key_op_version, priv->op_version);
1310
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
1318
ALL_VOLUME_OPTION_CHECK(volname, _gf_false, key, ret, op_errstr, out);
1319
ret = glusterd_validate_quorum_options(this, key, value, op_errstr);
1323
ret = glusterd_validate_brick_mx_options(this, key, value, op_errstr);
1327
vmep = gd_get_vmep(key);
1328
local_key_op_version = glusterd_get_op_version_from_vmep(vmep);
1329
if (local_key_op_version > local_new_op_version)
1330
local_new_op_version = local_key_op_version;
1331
if (gd_is_client_option(vmep) &&
1332
(local_key_op_version > local_new_client_op_version))
1333
local_new_client_op_version = local_key_op_version;
1335
sprintf(keystr, "op-version%d", count);
1336
if (origin_glusterd) {
1337
ret = dict_set_uint32(dict, keystr, local_key_op_version);
1339
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1340
"Failed to set key-op-version in dict");
1343
} else if (check_op_version) {
1344
ret = dict_get_uint32(dict, keystr, &key_op_version);
1346
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1347
"Failed to get key-op-version from dict");
1350
if (local_key_op_version != key_op_version) {
1352
snprintf(errstr, sizeof(errstr),
1353
"option: %s op-version mismatch", key);
1354
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
1355
"%s, required op-version = %" PRIu32
1356
", available op-version = %" PRIu32,
1357
errstr, key_op_version, local_key_op_version);
1362
global_opt = glusterd_check_globaloption(key);
1364
if (len_strcmp(key, keylen, GLUSTERD_SHARED_STORAGE_KEY)) {
1365
ret = glusterd_validate_shared_storage(value, errstr);
1367
gf_msg(this->name, GF_LOG_ERROR, 0,
1368
GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
1369
"Failed to validate shared storage volume options");
1372
} else if (len_strcmp(key, keylen, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
1373
ret = glusterd_validate_localtime_logging(value, errstr);
1375
gf_msg(this->name, GF_LOG_ERROR, 0,
1376
GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
1377
"Failed to validate localtime logging volume options");
1380
} else if (len_strcmp(key, keylen, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
1381
ret = glusterd_validate_daemon_log_level(value, errstr);
1383
gf_msg(this->name, GF_LOG_ERROR, 0,
1384
GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
1385
"Failed to validate daemon-log-level volume options");
1388
} else if (len_strcmp(key, keylen, "features.trash-dir")) {
1390
ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_TRASH,
1392
if (!ret && val_dup) {
1393
ret = gf_string2boolean(val_dup, &trash_enabled);
1398
if (!trash_enabled) {
1399
snprintf(errstr, sizeof(errstr),
1400
"Trash translator is not enabled. "
1401
"Use volume set %s trash on",
1403
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1404
"Unable to set the options in 'volume set': %s", errstr);
1408
if (strchr(value, '/')) {
1409
snprintf(errstr, sizeof(errstr),
1410
"Path is not allowed as option");
1411
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1412
"Unable to set the options in 'volume set': %s", errstr);
1417
list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
1420
if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
1421
trash_path_len = strlen(value) + strlen(brickinfo->path) +
1423
trash_path = GF_MALLOC(trash_path_len, gf_common_mt_char);
1424
snprintf(trash_path, trash_path_len, "%s/%s",
1425
brickinfo->path, value);
1429
if (!sys_access(trash_path, R_OK)) {
1430
snprintf(errstr, sizeof(errstr), "Path %s exists",
1432
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1433
"Unable to set the options in 'volume set': %s",
1438
gf_msg_debug(this->name, 0,
1439
"Directory with given name does not exist,"
1443
if (volinfo->status == GLUSTERD_STATUS_STARTED &&
1444
brickinfo->status != GF_BRICK_STARTED) {
1447
snprintf(errstr, sizeof(errstr),
1448
"One or more bricks are down");
1449
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1450
"Unable to set the options in 'volume set': %s",
1457
GF_FREE(trash_path);
1463
ret = dict_set_strn(val_dict, key, keylen, value);
1466
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1467
"Unable to set the options in 'volume set'");
1473
if (!global_opt && !all_vol)
1474
ret = glusterd_validate_reconfopts(volinfo, val_dict, op_errstr);
1475
else if (!all_vol) {
1477
cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
1479
ret = glusterd_validate_globalopts(voliter, val_dict,
1487
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
1488
"Could not create temp volfile, some option failed: %s",
1492
dict_deln(val_dict, key, keylen);
1502
ret = glusterd_check_client_op_version_support(
1503
volname, local_new_client_op_version, op_errstr);
1507
if (origin_glusterd) {
1508
ret = dict_set_uint32(dict, "new-op-version", local_new_op_version);
1510
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1511
"Failed to set new-op-version in dict");
1520
ret = dict_set_int32_sizen(dict, "check-op-version", 1);
1522
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1523
"Failed to set check-op-version in dict");
1532
dict_unref(val_dict);
1535
GF_FREE(trash_path);
1538
if (errstr[0] != '\0')
1539
*op_errstr = gf_strdup(errstr);
1542
if (!(*op_errstr)) {
1543
*op_errstr = gf_strdup("Error, Validation Failed");
1544
gf_msg_debug(this->name, 0, "Error, Cannot Validate option :%s",
1547
gf_msg_debug(this->name, 0, "Error, Cannot Validate option");
1554
glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
1557
char *volname = NULL;
1559
char msg[2048] = {0};
1561
char *key_fixed = NULL;
1562
glusterd_volinfo_t *volinfo = NULL;
1563
xlator_t *this = THIS;
1565
ret = dict_get_str(dict, "volname", &volname);
1568
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1569
"Unable to get volume name");
1573
if (strcasecmp(volname, "all") != 0) {
1574
ret = glusterd_volinfo_find(volname, &volinfo);
1576
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1580
ret = glusterd_validate_volume_id(dict, volinfo);
1585
ret = dict_get_str(dict, "key", &key);
1587
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1588
"Unable to get option key");
1597
if (volinfo && (!strcmp(key, "all") || !strcmp(key, "ganesha.enable"))) {
1598
if (glusterd_check_ganesha_export(volinfo)) {
1599
ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
1601
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
1602
"Could not reset ganesha.enable key");
1606
if (strcmp(key, "all")) {
1607
exists = glusterd_check_option_exists(key, &key_fixed);
1614
ret = snprintf(msg, sizeof(msg), "Option %s does not exist", key);
1616
snprintf(msg + ret, sizeof(msg) - ret, "\nDid you mean %s?",
1620
} else if (exists > 0) {
1634
if (strcmp(VKEY_FEATURES_INODE_QUOTA, key) == 0 ||
1635
strcmp(VKEY_FEATURES_QUOTA, key) == 0) {
1636
snprintf(msg, sizeof(msg),
1638
"reset <VOLNAME> %s' is deprecated. "
1639
"Use 'gluster volume quota <VOLNAME> "
1640
"disable' instead.",
1645
ALL_VOLUME_OPTION_CHECK(volname, _gf_false, key, ret, op_errstr,
1653
if (msg[0] != '\0') {
1654
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_RESET_VOL_FAIL,
1656
*op_errstr = gf_strdup(msg);
1659
gf_msg_debug(this->name, 0, "Returning %d", ret);
1665
glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
1668
char *volname = NULL;
1669
char *hostname = NULL;
1670
glusterd_peerinfo_t *peerinfo = NULL;
1674
glusterd_volinfo_t *volinfo = NULL;
1675
xlator_t *this = THIS;
1677
ret = dict_get_str(dict, "hostname", &hostname);
1679
snprintf(msg, sizeof(msg),
1680
"hostname couldn't be "
1681
"retrieved from msg");
1682
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1683
"Key=hostname", NULL);
1684
*op_errstr = gf_strdup(msg);
1688
if (glusterd_gf_is_local_addr(hostname)) {
1690
ret = dict_get_str(dict, "volname", &volname);
1692
ret = glusterd_volinfo_find(volname, &volinfo);
1694
snprintf(msg, sizeof(msg),
1698
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_FOUND,
1699
"Volume=%s", volname, NULL);
1700
*op_errstr = gf_strdup(msg);
1707
peerinfo = glusterd_peerinfo_find(NULL, hostname);
1708
if (peerinfo == NULL) {
1711
snprintf(msg, sizeof(msg), "%s, is not a friend", hostname);
1712
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND,
1713
"Peer_name=%s", hostname, NULL);
1714
*op_errstr = gf_strdup(msg);
1717
} else if (!peerinfo->connected) {
1720
snprintf(msg, sizeof(msg),
1721
"%s, is not connected at "
1724
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_DISCONNECTED,
1725
"Peer_name=%s", hostname, NULL);
1726
*op_errstr = gf_strdup(msg);
1734
gf_msg_debug("glusterd", 0, "Returning %d", ret);
1740
glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
1747
char *volname = NULL;
1749
xlator_t *this = THIS;
1750
glusterd_conf_t *priv = NULL;
1751
glusterd_brickinfo_t *brickinfo = NULL;
1752
glusterd_volinfo_t *volinfo = NULL;
1753
dict_t *vol_opts = NULL;
1755
gf_boolean_t nfs_disabled = _gf_false;
1757
gf_boolean_t shd_enabled = _gf_false;
1760
priv = this->private;
1763
ret = dict_get_uint32(dict, "cmd", &cmd);
1765
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1770
if (cmd & GF_CLI_STATUS_ALL)
1773
if ((cmd & GF_CLI_STATUS_QUOTAD) &&
1774
(priv->op_version == GD_OP_VERSION_MIN)) {
1775
snprintf(msg, sizeof(msg),
1776
"The cluster is operating at "
1777
"version 1. Getting the status of quotad is not "
1778
"allowed in this state.");
1779
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_GET_STAT_FAIL,
1785
ret = dict_get_str(dict, "volname", &volname);
1787
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1788
"Unable to get volume name");
1792
ret = glusterd_volinfo_find(volname, &volinfo);
1794
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1795
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
1796
"Volume=%s", volname, NULL);
1801
ret = glusterd_validate_volume_id(dict, volinfo);
1803
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VALIDATE_FAILED, NULL);
1807
ret = glusterd_is_volume_started(volinfo);
1809
snprintf(msg, sizeof(msg), "Volume %s is not started", volname);
1810
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_STARTED,
1811
"Volume=%s", volname, NULL);
1816
vol_opts = volinfo->dict;
1818
if ((cmd & GF_CLI_STATUS_SHD) != 0) {
1819
if (glusterd_is_shd_compatible_volume(volinfo)) {
1820
shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
1823
snprintf(msg, sizeof(msg), "Volume %s is not Self-heal compatible",
1825
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_SHD_NOT_COMP,
1826
"Volume=%s", volname, NULL);
1831
snprintf(msg, sizeof(msg),
1832
"Self-heal Daemon is disabled for volume %s", volname);
1833
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SELF_HEALD_DISABLED,
1834
"Volume=%s", volname, NULL);
1838
} else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
1839
nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
1843
snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s",
1845
gf_smsg(this->name, GF_LOG_ERROR, errno,
1846
GD_MSG_NFS_GANESHA_DISABLED, "Volume=%s", volname, NULL);
1850
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
1851
if (!glusterd_is_volume_quota_enabled(volinfo)) {
1853
snprintf(msg, sizeof(msg),
1854
"Volume %s does not have "
1857
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_DISABLED,
1858
"Volume=%s", volname, NULL);
1861
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
1862
if (!glusterd_is_bitrot_enabled(volinfo)) {
1864
snprintf(msg, sizeof(msg),
1865
"Volume %s does not have "
1868
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
1869
"Volume=%s", volname, NULL);
1872
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
1873
if (!glusterd_is_bitrot_enabled(volinfo)) {
1875
snprintf(msg, sizeof(msg),
1876
"Volume %s does not have "
1877
"bitrot enabled. Scrubber will be enabled "
1878
"automatically if bitrot is enabled",
1881
this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
1882
"Scrubber will be enabled automatically if bitrot is enabled",
1883
"Volume=%s", volname, NULL);
1886
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
1887
if (!glusterd_is_snapd_enabled(volinfo)) {
1889
snprintf(msg, sizeof(msg),
1890
"Volume %s does not have "
1893
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAPD_NOT_RUNNING,
1894
"Volume=%s", volname, NULL);
1897
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
1898
ret = dict_get_str(dict, "brick", &brick);
1900
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1905
ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
1908
snprintf(msg, sizeof(msg),
1912
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_FOUND,
1913
"Brick=%s, Volume=%s", brick, volname, NULL);
1924
*op_errstr = gf_strdup(msg);
1926
*op_errstr = gf_strdup("Validation Failed for Status");
1929
gf_msg_debug(this->name, 0, "Returning: %d", ret);
1934
glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
1937
char *volname = NULL;
1941
int32_t stats_op = GF_CLI_STATS_NONE;
1942
glusterd_volinfo_t *volinfo = NULL;
1944
ret = dict_get_str(dict, "volname", &volname);
1946
snprintf(msg, sizeof(msg), "Volume name get failed");
1950
ret = glusterd_volinfo_find(volname, &volinfo);
1952
snprintf(msg, sizeof(msg),
1959
ret = glusterd_validate_volume_id(dict, volinfo);
1963
ret = dict_get_int32(dict, "op", &stats_op);
1965
snprintf(msg, sizeof(msg), "Volume profile op get failed");
1969
if (GF_CLI_STATS_START == stats_op) {
1970
if (_gf_true == glusterd_is_profile_on(volinfo)) {
1971
snprintf(msg, sizeof(msg),
1972
"Profile on Volume %s is"
1978
} else if ((GF_CLI_STATS_STOP == stats_op) ||
1979
(GF_CLI_STATS_INFO == stats_op)) {
1980
if (_gf_false == glusterd_is_profile_on(volinfo)) {
1981
snprintf(msg, sizeof(msg),
1982
"Profile on Volume %s is"
1990
if ((GF_CLI_STATS_TOP == stats_op) || (GF_CLI_STATS_INFO == stats_op)) {
1991
if (_gf_false == glusterd_is_volume_started(volinfo)) {
1992
snprintf(msg, sizeof(msg), "Volume %s is not started.",
1994
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED, "%s",
2002
if (msg[0] != '\0') {
2003
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_STATS_VOL_FAIL,
2005
*op_errstr = gf_strdup(msg);
2007
gf_msg_debug("glusterd", 0, "Returning %d", ret);
2012
_delete_reconfig_opt(dict_t *this, char *key, data_t *value, void *data)
2014
int32_t *is_force = 0;
2017
is_force = (int32_t *)data;
2024
glusterd_check_voloption_flags(key, VOLOPT_FLAG_NEVER_RESET)) {
2026
*is_force = *is_force | GD_OP_PROTECTED;
2030
if (*is_force != 1) {
2032
glusterd_check_voloption_flags(key, VOLOPT_FLAG_FORCE)) {
2036
*is_force = *is_force | GD_OP_PROTECTED;
2039
*is_force = *is_force | GD_OP_UNPROTECTED;
2043
gf_msg_debug("glusterd", 0, "deleting dict with key=%s,value=%s", key,
2045
dict_del(this, key);
2049
if (!strncmp(key, VKEY_FEATURES_BITROT, strlen(VKEY_FEATURES_BITROT))) {
2050
dict_del_sizen(this, VKEY_FEATURES_SCRUB);
2057
_delete_reconfig_global_opt(dict_t *this, char *key, data_t *value, void *data)
2061
if (strcmp(GLUSTERD_GLOBAL_OPT_VERSION, key) == 0)
2064
_delete_reconfig_opt(this, key, value, data);
2070
glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
2074
data_t *value = NULL;
2075
char *key_fixed = NULL;
2076
xlator_t *this = THIS;
2077
glusterd_svc_t *svc = NULL;
2079
GF_ASSERT(volinfo->dict);
2082
if (!strncmp(key, "all", 3)) {
2083
dict_foreach(volinfo->dict, _delete_reconfig_opt, is_force);
2084
ret = glusterd_enable_default_options(volinfo, NULL);
2086
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAIL_DEFAULT_OPT_SET,
2088
"default options on reset for volume %s",
2093
value = dict_get(volinfo->dict, key);
2095
gf_msg_debug(this->name, 0, "no value set for option %s", key);
2098
_delete_reconfig_opt(volinfo->dict, key, value, is_force);
2099
ret = glusterd_enable_default_options(volinfo, key);
2101
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAIL_DEFAULT_OPT_SET,
2103
"default value for option '%s' on reset for "
2105
key, volinfo->volname);
2110
gd_update_volume_op_versions(volinfo);
2111
if (!volinfo->is_snap_volume) {
2112
svc = &(volinfo->snapd.svc);
2113
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
2117
svc = &(volinfo->gfproxyd.svc);
2118
ret = svc->reconfigure(volinfo);
2122
svc = &(volinfo->shd.svc);
2123
ret = svc->reconfigure(volinfo);
2127
ret = glusterd_create_volfiles_and_notify_services(volinfo);
2129
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
2130
"Unable to create volfile for"
2136
ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
2140
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
2141
ret = glusterd_svcs_reconfigure(volinfo);
2150
gf_msg_debug(this->name, 0, "Returning %d", ret);
2155
glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
2158
char *key_fixed = NULL;
2160
int32_t is_force = 0;
2161
glusterd_conf_t *conf = NULL;
2162
dict_t *dup_opt = NULL;
2163
gf_boolean_t all = _gf_false;
2164
char *next_version = NULL;
2165
gf_boolean_t quorum_action = _gf_false;
2167
conf = this->private;
2168
ret = dict_get_str(dict, "key", &key);
2170
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2171
"Failed to get key");
2175
ret = dict_get_int32(dict, "force", &is_force);
2179
if (strcmp(key, "all")) {
2180
ret = glusterd_check_option_exists(key, &key_fixed);
2182
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
2183
"Option %s does not "
2197
dup_opt = dict_new();
2199
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
2203
dict_copy(conf->opts, dup_opt);
2204
dict_del(dup_opt, key);
2206
ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
2210
ret = dict_set_str_sizen(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
2213
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2214
"Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2218
ret = glusterd_store_options(this, dup_opt);
2222
if (glusterd_is_quorum_changed(conf->opts, key, NULL))
2223
quorum_action = _gf_true;
2225
ret = dict_set_dynstr_sizen(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
2228
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2229
"Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2232
next_version = NULL;
2235
dict_del(conf->opts, key);
2237
dict_foreach(conf->opts, _delete_reconfig_global_opt, &is_force);
2242
dict_unref(dup_opt);
2244
gf_msg_debug(this->name, 0, "returning %d", ret);
2246
glusterd_do_quorum_action();
2247
GF_FREE(next_version);
2252
glusterd_op_reset_volume(dict_t *dict, char **op_rspstr)
2254
glusterd_volinfo_t *volinfo = NULL;
2256
char *volname = NULL;
2258
char *key_fixed = NULL;
2259
int32_t is_force = 0;
2260
gf_boolean_t quorum_action = _gf_false;
2261
xlator_t *this = THIS;
2263
ret = dict_get_str(dict, "volname", &volname);
2265
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2266
"Unable to get volume name");
2270
if (strcasecmp(volname, "all") == 0) {
2271
ret = glusterd_op_reset_all_volume_options(this, dict);
2275
ret = dict_get_int32(dict, "force", &is_force);
2279
ret = dict_get_str(dict, "key", &key);
2281
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2282
"Unable to get option key");
2286
ret = glusterd_volinfo_find(volname, &volinfo);
2288
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
2289
FMTSTR_CHECK_VOL_EXISTS, volname);
2293
if (strcmp(key, "all") &&
2294
glusterd_check_option_exists(key, &key_fixed) != 1) {
2295
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
2296
"volinfo dict inconsistency: option %s not found", key);
2303
if (glusterd_is_quorum_changed(volinfo->dict, key, NULL))
2304
quorum_action = _gf_true;
2306
ret = glusterd_options_reset(volinfo, key, &is_force);
2308
gf_asprintf(op_rspstr, "Volume reset : failed");
2309
} else if (is_force & GD_OP_PROTECTED) {
2310
if (is_force & GD_OP_UNPROTECTED) {
2311
gf_asprintf(op_rspstr,
2312
"All unprotected fields were"
2313
" reset. To reset the protected fields,"
2317
gf_asprintf(op_rspstr,
2318
"'%s' is protected. To reset"
2324
if (!strcmp(key, "ganesha.enable") || !strcmp(key, "all")) {
2325
if (glusterd_check_ganesha_export(volinfo) &&
2326
is_origin_glusterd(dict)) {
2327
ret = manage_export_config(volname, "off", op_rspstr);
2329
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
2330
"Could not reset ganesha.enable key");
2337
glusterd_do_quorum_action();
2339
gf_msg_debug(this->name, 0, "'volume reset' returning %d", ret);
2344
glusterd_stop_bricks(glusterd_volinfo_t *volinfo)
2346
glusterd_brickinfo_t *brickinfo = NULL;
2348
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2352
if (glusterd_brick_stop(volinfo, brickinfo, _gf_false)) {
2353
gf_event(EVENT_BRICK_STOP_FAILED, "peer=%s;volume=%s;brick=%s",
2354
brickinfo->hostname, volinfo->volname, brickinfo->path);
2363
glusterd_start_bricks(glusterd_volinfo_t *volinfo)
2367
glusterd_brickinfo_t *brickinfo = NULL;
2371
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2373
if (!brickinfo->start_triggered) {
2374
pthread_mutex_lock(&brickinfo->restart_mutex);
2377
ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
2380
pthread_mutex_unlock(&brickinfo->restart_mutex);
2382
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_DISCONNECTED,
2383
"Failed to start %s:%s for %s", brickinfo->hostname,
2384
brickinfo->path, volinfo->volname);
2385
gf_event(EVENT_BRICK_START_FAILED, "peer=%s;volume=%s;brick=%s",
2386
brickinfo->hostname, volinfo->volname,
2398
glusterd_update_volumes_dict(glusterd_volinfo_t *volinfo)
2401
xlator_t *this = THIS;
2402
glusterd_conf_t *conf = NULL;
2403
char *address_family_str = NULL;
2405
conf = this->private;
2406
GF_VALIDATE_OR_GOTO(this->name, conf, out);
2416
if (conf->op_version >= GD_OP_VERSION_3_9_0) {
2417
if (dict_get_str_boolean(volinfo->dict, NFS_DISABLE_MAP_KEY, 1)) {
2418
ret = dict_set_dynstr_with_alloc(volinfo->dict, NFS_DISABLE_MAP_KEY,
2421
gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2423
"option ' NFS_DISABLE_MAP_KEY ' on "
2429
ret = dict_get_str(volinfo->dict, "transport.address-family",
2430
&address_family_str);
2432
if (volinfo->transport_type == GF_TRANSPORT_TCP) {
2433
ret = dict_set_dynstr_with_alloc(
2434
volinfo->dict, "transport.address-family", "inet");
2436
gf_msg(this->name, GF_LOG_ERROR, -ret,
2437
GD_MSG_DICT_SET_FAILED,
2438
"failed to set transport."
2439
"address-family on %s",
2446
ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
2453
glusterd_set_brick_mx_opts(dict_t *dict, char *key, char *value,
2457
xlator_t *this = THIS;
2458
glusterd_conf_t *priv = NULL;
2460
GF_VALIDATE_OR_GOTO(this->name, dict, out);
2461
GF_VALIDATE_OR_GOTO(this->name, key, out);
2462
GF_VALIDATE_OR_GOTO(this->name, value, out);
2463
GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
2467
priv = this->private;
2469
if (!strcmp(key, GLUSTERD_BRICK_MULTIPLEX_KEY)) {
2470
ret = dict_set_dynstr_sizen(priv->opts, GLUSTERD_BRICK_MULTIPLEX_KEY,
2479
glusterd_set_brick_graceful_cleanup(dict_t *dict, char *key, char *value,
2480
glusterd_conf_t *priv)
2483
char *dup_value = NULL;
2485
if (!strcmp(key, GLUSTER_BRICK_GRACEFUL_CLEANUP)) {
2486
dup_value = gf_strdup(value);
2491
ret = dict_set_dynstr_sizen(priv->opts, GLUSTER_BRICK_GRACEFUL_CLEANUP,
2496
if (ret && dup_value)
2505
glusterd_dict_set_skip_cliot_key(glusterd_volinfo_t *volinfo)
2507
return dict_set_int32_sizen(volinfo->dict, "skip-CLIOT", 1);
2511
glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
2515
char *key_fixed = NULL;
2517
char *dup_value = NULL;
2519
glusterd_conf_t *conf = NULL;
2520
dict_t *dup_opt = NULL;
2521
char *next_version = NULL;
2522
gf_boolean_t quorum_action = _gf_false;
2523
uint32_t op_version = 0;
2524
glusterd_volinfo_t *volinfo = NULL;
2525
glusterd_svc_t *svc = NULL;
2526
gf_boolean_t svcs_reconfigure = _gf_false;
2528
conf = this->private;
2529
ret = dict_get_str(dict, "key1", &key);
2531
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
2536
ret = dict_get_str(dict, "value1", &value);
2538
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2539
"invalid key,value pair in 'volume set'");
2543
ret = glusterd_check_option_exists(key, &key_fixed);
2545
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_KEY,
2546
"Invalid key %s", key);
2554
ret = glusterd_set_shared_storage(dict, key, value, op_errstr);
2556
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHARED_STRG_SET_FAIL,
2557
"Failed to set shared storage option");
2561
ret = glusterd_set_brick_mx_opts(dict, key, value, op_errstr);
2563
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_MX_SET_FAIL,
2564
"Failed to set brick multiplexing option");
2568
ret = glusterd_set_brick_graceful_cleanup(dict, key, value, conf);
2570
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GRACEFUL_CLEANUP_SET_FAIL,
2571
"Failed to set brick graceful option");
2578
if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
2581
ret = gf_string2uint(value, &op_version);
2585
if (op_version >= conf->op_version) {
2586
conf->op_version = op_version;
2596
cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
2598
ret = glusterd_store_quota_config(
2599
volinfo, NULL, NULL, GF_QUOTA_OPTION_TYPE_UPGRADE, NULL);
2602
ret = glusterd_update_volumes_dict(volinfo);
2606
if (glusterd_dict_set_skip_cliot_key(volinfo))
2609
if (!volinfo->is_snap_volume) {
2610
svc = &(volinfo->snapd.svc);
2611
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
2616
svc = &(volinfo->gfproxyd.svc);
2617
ret = svc->reconfigure(volinfo);
2621
svc = &(volinfo->shd.svc);
2622
ret = svc->reconfigure(volinfo);
2626
ret = glusterd_create_volfiles_and_notify_services(volinfo);
2628
gf_msg(this->name, GF_LOG_ERROR, 0,
2629
GD_MSG_VOLFILE_CREATE_FAIL,
2630
"Unable to create volfile for"
2634
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
2635
svcs_reconfigure = _gf_true;
2638
if (svcs_reconfigure) {
2639
ret = glusterd_svcs_reconfigure(NULL);
2641
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
2642
"Unable to restart "
2648
ret = glusterd_store_global_info(this);
2650
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
2651
"Failed to store op-version.");
2659
dup_opt = dict_new();
2661
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
2664
dict_copy(conf->opts, dup_opt);
2665
ret = dict_set_str(dup_opt, key, value);
2667
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2668
"Key=%s", key, NULL);
2672
ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
2676
ret = dict_set_str_sizen(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
2679
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2680
"Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2684
ret = glusterd_store_options(this, dup_opt);
2688
if (glusterd_is_quorum_changed(conf->opts, key, value))
2689
quorum_action = _gf_true;
2691
ret = dict_set_dynstr_sizen(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
2694
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2695
"Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2698
next_version = NULL;
2700
dup_value = gf_strdup(value);
2704
ret = dict_set_dynstr(conf->opts, key, dup_value);
2706
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2707
"Key=%s", key, NULL);
2716
dict_unref(dup_opt);
2718
gf_msg_debug(this->name, 0, "returning %d", ret);
2720
glusterd_do_quorum_action();
2721
GF_FREE(next_version);
2726
glusterd_op_get_max_opversion(char **op_errstr, dict_t *rsp_dict)
2730
GF_VALIDATE_OR_GOTO(THIS->name, rsp_dict, out);
2732
ret = dict_set_int32_sizen(rsp_dict, "max-opversion", GD_OP_VERSION_MAX);
2734
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
2735
"Setting value for max-opversion to dict failed");
2740
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
2745
glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
2749
char hooks_args[PATH_MAX] = {
2752
char errstr[PATH_MAX] = {
2755
xlator_t *this = THIS;
2758
GF_VALIDATE_OR_GOTO(this->name, dict, out);
2759
GF_VALIDATE_OR_GOTO(this->name, key, out);
2760
GF_VALIDATE_OR_GOTO(this->name, value, out);
2761
GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
2765
if (strcmp(key, GLUSTERD_SHARED_STORAGE_KEY)) {
2772
ret = recursive_rmdir(GLUSTER_SHARED_STORAGE_BRICK_DIR);
2774
snprintf(errstr, PATH_MAX,
2775
"Failed to remove shared "
2776
"storage brick(%s). "
2778
GLUSTER_SHARED_STORAGE_BRICK_DIR, strerror(errno));
2779
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED, "%s",
2785
ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0755, _gf_true);
2787
snprintf(errstr, PATH_MAX,
2788
"Failed to create shared "
2789
"storage brick(%s). "
2791
GLUSTER_SHARED_STORAGE_BRICK_DIR, strerror(errno));
2792
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, "%s",
2797
if (is_origin_glusterd(dict)) {
2798
len = snprintf(hooks_args, sizeof(hooks_args),
2799
"is_originator=1,local_node_hostname=%s",
2800
local_node_hostname);
2802
len = snprintf(hooks_args, sizeof(hooks_args),
2803
"is_originator=0,local_node_hostname=%s",
2804
local_node_hostname);
2806
if ((len < 0) || (len >= sizeof(hooks_args))) {
2811
ret = dict_set_dynstr_with_alloc(dict, "hooks_args", hooks_args);
2813
gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2815
" hooks_args in dict.");
2820
if (ret && strlen(errstr)) {
2821
*op_errstr = gf_strdup(errstr);
2828
glusterd_op_set_volume(dict_t *dict, char **errstr)
2831
glusterd_volinfo_t *volinfo = NULL;
2832
char *volname = NULL;
2833
xlator_t *this = THIS;
2834
glusterd_conf_t *priv = NULL;
2837
char *key_fixed = NULL;
2843
gf_boolean_t global_opt = _gf_false;
2844
gf_boolean_t global_opts_set = _gf_false;
2845
glusterd_volinfo_t *voliter = NULL;
2846
int32_t dict_count = 0;
2847
gf_boolean_t check_op_version = _gf_false;
2848
uint32_t new_op_version = 0;
2849
gf_boolean_t quorum_action = _gf_false;
2850
glusterd_svc_t *svc = NULL;
2851
dict_t *volinfo_dict_orig = NULL;
2853
priv = this->private;
2856
volinfo_dict_orig = dict_new();
2857
if (!volinfo_dict_orig)
2860
ret = dict_get_int32(dict, "count", &dict_count);
2862
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2863
"Count(dict),not set in Volume-Set");
2867
if (dict_count == 0) {
2868
ret = glusterd_volset_help(NULL, errstr);
2872
ret = dict_get_str(dict, "volname", &volname);
2874
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2875
"Unable to get volume name");
2879
if (strcasecmp(volname, "all") == 0) {
2880
ret = glusterd_op_set_all_volume_options(this, dict, errstr);
2884
ret = glusterd_volinfo_find(volname, &volinfo);
2886
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
2887
FMTSTR_CHECK_VOL_EXISTS, volname);
2891
if (dict_copy(volinfo->dict, volinfo_dict_orig) == NULL) {
2897
check_op_version = dict_get_str_boolean(dict, "check-op-version",
2900
if (check_op_version) {
2901
ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
2903
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2904
"Unable to get new op-version from dict");
2909
for (count = 1; ret != -1; count++) {
2910
keylen = snprintf(keystr, sizeof(keystr), "key%d", count);
2911
ret = dict_get_strn(dict, keystr, keylen, &key);
2915
keylen = snprintf(keystr, sizeof(keystr), "value%d", count);
2916
ret = dict_get_strn(dict, keystr, keylen, &value);
2918
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2919
"invalid key,value pair in 'volume set'");
2924
if (strcmp(key, "config.memory-accounting") == 0) {
2925
ret = gf_string2boolean(value, &volinfo->memory_accounting);
2927
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
2928
"Invalid value in key-value pair.");
2933
if (strcmp(key, "config.transport") == 0) {
2934
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_TRANSPORT_TYPE_CHANGE,
2935
"changing transport-type for volume %s to %s", volname,
2938
if (strcasecmp(value, "rdma") == 0) {
2939
volinfo->transport_type = GF_TRANSPORT_RDMA;
2940
} else if (strcasecmp(value, "tcp") == 0) {
2941
volinfo->transport_type = GF_TRANSPORT_TCP;
2942
} else if ((strcasecmp(value, "tcp,rdma") == 0) ||
2943
(strcasecmp(value, "rdma,tcp") == 0)) {
2944
volinfo->transport_type = GF_TRANSPORT_BOTH_TCP_RDMA;
2951
ret = glusterd_check_ganesha_cmd(key, value, errstr, dict);
2955
if (!is_key_glusterd_hooks_friendly(key)) {
2956
ret = glusterd_check_option_exists(key, &key_fixed);
2964
global_opt = _gf_false;
2965
if (glusterd_check_globaloption(key)) {
2966
global_opt = _gf_true;
2967
global_opts_set = _gf_true;
2971
value = gf_strdup(value);
2974
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
2975
"Unable to set the options in 'volume set'");
2983
if (glusterd_is_quorum_changed(volinfo->dict, key, value))
2984
quorum_action = _gf_true;
2987
cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
2989
value = gf_strdup(value);
2990
ret = dict_set_dynstr(voliter->dict, key, value);
2995
ret = dict_set_dynstr(volinfo->dict, key, value);
3007
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
3008
"No options received ");
3016
if (new_op_version > priv->op_version) {
3017
priv->op_version = new_op_version;
3018
ret = glusterd_store_global_info(this);
3020
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
3021
"Failed to store op-version");
3025
if (!global_opts_set) {
3026
gd_update_volume_op_versions(volinfo);
3028
if (!volinfo->is_snap_volume) {
3029
svc = &(volinfo->snapd.svc);
3030
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
3034
svc = &(volinfo->gfproxyd.svc);
3035
ret = svc->reconfigure(volinfo);
3039
svc = &(volinfo->shd.svc);
3040
ret = svc->reconfigure(volinfo);
3044
ret = glusterd_create_volfiles_and_notify_services(volinfo);
3046
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
3047
"Unable to create volfile for"
3053
ret = glusterd_store_volinfo(volinfo,
3054
GLUSTERD_VOLINFO_VER_AC_INCREMENT);
3058
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
3059
ret = glusterd_svcs_reconfigure(volinfo);
3061
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
3062
"Unable to restart services");
3068
cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
3071
gd_update_volume_op_versions(volinfo);
3073
if (!volinfo->is_snap_volume) {
3074
svc = &(volinfo->snapd.svc);
3075
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
3080
svc = &(volinfo->gfproxyd.svc);
3081
ret = svc->reconfigure(volinfo);
3085
svc = &(volinfo->shd.svc);
3086
ret = svc->reconfigure(volinfo);
3090
ret = glusterd_create_volfiles_and_notify_services(volinfo);
3092
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
3093
"Unable to create volfile for"
3099
ret = glusterd_store_volinfo(volinfo,
3100
GLUSTERD_VOLINFO_VER_AC_INCREMENT);
3104
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
3105
ret = glusterd_svcs_reconfigure(volinfo);
3107
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
3108
"Unable to restart services");
3117
gf_msg_debug(this->name, 0, "returning %d", ret);
3119
glusterd_do_quorum_action();
3120
if (ret < 0 && count > 1) {
3121
if (dict_reset(volinfo->dict) == 0)
3122
dict_copy(volinfo_dict_orig, volinfo->dict);
3124
if (volinfo_dict_orig)
3125
dict_unref(volinfo_dict_orig);
3130
glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
3133
char *volname = NULL;
3134
char *hostname = NULL;
3140
glusterd_conf_t *priv = NULL;
3141
glusterd_volinfo_t *volinfo = NULL;
3142
xlator_t *this = THIS;
3144
priv = this->private;
3147
ret = dict_get_str(dict, "hostname", &hostname);
3149
snprintf(msg, sizeof(msg),
3150
"hostname couldn't be "
3151
"retrieved from msg");
3152
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
3153
"Key=hostname", NULL);
3154
*op_errstr = gf_strdup(msg);
3158
if (!glusterd_gf_is_local_addr(hostname)) {
3164
ret = dict_get_str(dict, "volname", &volname);
3166
ret = glusterd_volinfo_find(volname, &volinfo);
3168
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
3169
"Volume with name: %s "
3178
gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_INVALID_ARGUMENT, NULL);
3184
ret = glusterd_add_volume_to_dict(volinfo, rsp_dict, 1, "volume");
3189
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
3191
ret = glusterd_add_volume_to_dict(volinfo, rsp_dict, count,
3196
vol_count = count++;
3199
ret = dict_set_int32_sizen(rsp_dict, "count", vol_count);
3202
gf_msg_debug("glusterd", 0, "Returning %d", ret);
3208
glusterd_add_profile_volume_options(glusterd_volinfo_t *volinfo)
3214
ret = dict_set_nstrn(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT,
3215
SLEN(VKEY_DIAG_LAT_MEASUREMENT), "on", SLEN("on"));
3217
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3218
"failed to set the volume %s "
3219
"option %s value %s",
3220
volinfo->volname, VKEY_DIAG_LAT_MEASUREMENT, "on");
3224
ret = dict_set_nstrn(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS,
3225
SLEN(VKEY_DIAG_CNT_FOP_HITS), "on", SLEN("on"));
3227
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3228
"failed to set the volume %s "
3229
"option %s value %s",
3230
volinfo->volname, VKEY_DIAG_CNT_FOP_HITS, "on");
3234
gf_msg_debug("glusterd", 0, "Returning %d", ret);
3239
glusterd_remove_profile_volume_options(glusterd_volinfo_t *volinfo)
3243
dict_del_sizen(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT);
3244
dict_del_sizen(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS);
3248
glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
3251
char *volname = NULL;
3255
glusterd_volinfo_t *volinfo = NULL;
3256
int32_t stats_op = GF_CLI_STATS_NONE;
3258
ret = dict_get_str(dict, "volname", &volname);
3260
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3261
"volume name get failed");
3265
ret = glusterd_volinfo_find(volname, &volinfo);
3267
snprintf(msg, sizeof(msg), "Volume %s does not exists", volname);
3269
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
3273
ret = dict_get_int32(dict, "op", &stats_op);
3275
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3276
"volume profile op get failed");
3281
case GF_CLI_STATS_START:
3282
ret = glusterd_add_profile_volume_options(volinfo);
3286
case GF_CLI_STATS_STOP:
3287
glusterd_remove_profile_volume_options(volinfo);
3289
case GF_CLI_STATS_INFO:
3290
case GF_CLI_STATS_TOP:
3298
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
3299
"Invalid profile op: %d", stats_op);
3304
ret = glusterd_create_volfiles_and_notify_services(volinfo);
3307
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
3308
"Unable to create volfile for"
3314
ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
3318
if (GLUSTERD_STATUS_STARTED == volinfo->status) {
3319
ret = glusterd_svcs_reconfigure(volinfo);
3327
gf_msg_debug("glusterd", 0, "Returning %d", ret);
3333
_add_remove_bricks_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo,
3339
char brick_key[16] = {
3342
char dict_key[64] = {
3348
xlator_t *this = THIS;
3354
ret = dict_get_int32(volinfo->rebal.dict, "count", &count);
3356
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3357
"Failed to get brick count");
3361
keylen = snprintf(dict_key, sizeof(dict_key), "%s.count", prefix);
3362
ret = dict_set_int32n(dict, dict_key, keylen, count);
3364
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3365
"Failed to set brick count in dict");
3369
for (i = 1; i <= count; i++) {
3370
keylen = snprintf(brick_key, sizeof(brick_key), "brick%d", i);
3372
ret = dict_get_strn(volinfo->rebal.dict, brick_key, keylen, &brick);
3374
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3375
"Unable to get %s", brick_key);
3379
keylen = snprintf(dict_key, sizeof(dict_key), "%s.%s", prefix,
3381
if ((keylen < 0) || (keylen >= sizeof(dict_key))) {
3385
ret = dict_set_strn(dict, dict_key, keylen, brick);
3387
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3388
"Failed to add brick to dict");
3402
_add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
3409
char *uuid_str = NULL;
3411
xlator_t *this = THIS;
3417
case GD_OP_REMOVE_BRICK:
3418
snprintf(key, sizeof(key), "task%d", index);
3419
ret = _add_remove_bricks_to_dict(dict, volinfo, key);
3421
gf_msg(this->name, GF_LOG_ERROR, 0,
3422
GD_MSG_ADD_REMOVE_BRICK_FAIL,
3423
"Failed to add remove bricks to dict");
3426
case GD_OP_REBALANCE:
3427
uuid_str = gf_strdup(uuid_utoa(volinfo->rebal.rebalance_id));
3428
status = volinfo->rebal.defrag_status;
3433
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_TASK_ID,
3434
"%s operation doesn't have a"
3440
keylen = snprintf(key, sizeof(key), "task%d.type", index);
3441
ret = dict_set_strn(dict, key, keylen, (char *)gd_op_list[op]);
3443
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3444
"Error setting task type in dict");
3448
keylen = snprintf(key, sizeof(key), "task%d.id", index);
3452
ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
3454
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3455
"Error setting task id in dict");
3460
keylen = snprintf(key, sizeof(key), "task%d.status", index);
3461
ret = dict_set_int32n(dict, key, keylen, status);
3463
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3464
"Error setting task status in dict");
3475
glusterd_aggregate_task_status(dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
3479
xlator_t *this = THIS;
3481
if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
3482
ret = _add_task_to_dict(rsp_dict, volinfo, volinfo->rebal.op, tasks);
3485
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3486
"Failed to add task details to dict");
3491
ret = dict_set_int32_sizen(rsp_dict, "tasks", tasks);
3493
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3494
"Error setting tasks count in dict");
3502
glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
3506
char pidfile[PATH_MAX] = "";
3507
gf_boolean_t running = _gf_false;
3510
glusterd_svc_t *svc = NULL;
3513
xlator_t *this = THIS;
3514
glusterd_conf_t *priv = NULL;
3516
priv = this->private;
3519
if (!strcmp(server, "")) {
3524
glusterd_svc_build_pidfile_path(server, priv->rundir, pidfile,
3527
if (strcmp(server, priv->quotad_svc.name) == 0)
3528
svc = &(priv->quotad_svc);
3530
else if (strcmp(server, priv->nfs_svc.name) == 0)
3531
svc = &(priv->nfs_svc);
3533
else if (strcmp(server, priv->bitd_svc.name) == 0)
3534
svc = &(priv->bitd_svc);
3535
else if (strcmp(server, priv->scrub_svc.name) == 0)
3536
svc = &(priv->scrub_svc);
3544
running = gf_is_service_running(pidfile, &pid);
3556
keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
3557
if (!strcmp(server, priv->quotad_svc.name))
3558
ret = dict_set_nstrn(dict, key, keylen, "Quota Daemon",
3559
SLEN("Quota Daemon"));
3561
else if (!strcmp(server, priv->nfs_svc.name))
3562
ret = dict_set_nstrn(dict, key, keylen, "NFS Server",
3563
SLEN("NFS Server"));
3565
else if (!strcmp(server, priv->bitd_svc.name))
3566
ret = dict_set_nstrn(dict, key, keylen, "Bitrot Daemon",
3567
SLEN("Bitrot Daemon"));
3568
else if (!strcmp(server, priv->scrub_svc.name))
3569
ret = dict_set_nstrn(dict, key, keylen, "Scrubber Daemon",
3570
SLEN("Scrubber Daemon"));
3572
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3573
"Key=%s", key, NULL);
3577
keylen = snprintf(key, sizeof(key), "brick%d.path", count);
3578
ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(uuid_utoa(MY_UUID)));
3580
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3581
"Key=%s", key, NULL);
3590
if (!strcmp(server, priv->nfs_svc.name)) {
3591
if (dict_get_sizen(vol_opts, "nfs.port")) {
3592
ret = dict_get_int32(vol_opts, "nfs.port", &port);
3594
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
3595
"Key=nfs.port", NULL);
3599
port = GF_NFS3_PORT;
3602
keylen = snprintf(key, sizeof(key), "brick%d.port", count);
3603
ret = dict_set_int32n(dict, key, keylen, port);
3605
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3606
"Key=%s", key, NULL);
3610
keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
3611
ret = dict_set_int32n(dict, key, keylen, pid);
3613
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3614
"Key=%s", key, NULL);
3618
keylen = snprintf(key, sizeof(key), "brick%d.status", count);
3619
ret = dict_set_int32n(dict, key, keylen, running);
3621
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3622
"Key=%s", key, NULL);
3627
gf_msg_debug(this->name, 0, "Returning %d", ret);
3632
glusterd_get_all_volnames(dict_t *dict)
3635
int32_t vol_count = 0;
3638
glusterd_volinfo_t *entry = NULL;
3639
glusterd_conf_t *priv = NULL;
3641
priv = THIS->private;
3644
cds_list_for_each_entry(entry, &priv->volumes, vol_list)
3646
keylen = snprintf(key, sizeof(key), "vol%d", vol_count);
3647
ret = dict_set_strn(dict, key, keylen, entry->volname);
3654
ret = dict_set_int32_sizen(dict, "vol_count", vol_count);
3658
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3659
"failed to get all "
3660
"volume names for status");
3665
glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
3670
int32_t brick_online = -1;
3673
char *pidfile = NULL;
3674
xlator_t *this = THIS;
3675
char *uuid_str = NULL;
3677
GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
3678
GF_VALIDATE_OR_GOTO(this->name, dict, out);
3680
keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
3681
ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
3682
SLEN("Self-heal Daemon"));
3684
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3689
keylen = snprintf(key, sizeof(key), "brick%d.path", count);
3690
uuid_str = gf_strdup(uuid_utoa(MY_UUID));
3695
ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
3697
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3707
keylen = snprintf(key, sizeof(key), "brick%d.port", count);
3708
ret = dict_set_int32n(dict, key, keylen, 0);
3710
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3715
pidfile = volinfo->shd.svc.proc.pidfile;
3717
brick_online = gf_is_service_running(pidfile, &pid);
3722
keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
3723
ret = dict_set_int32n(dict, key, keylen, pid);
3725
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3730
keylen = snprintf(key, sizeof(key), "brick%d.status", count);
3731
ret = dict_set_int32n(dict, key, keylen, brick_online);
3737
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3738
"Returning %d. adding values to dict failed", ret);
3744
glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
3748
int brick_index = -1;
3749
int other_count = 0;
3750
int other_index = 0;
3752
char *volname = NULL;
3754
xlator_t *this = THIS;
3755
glusterd_volinfo_t *volinfo = NULL;
3756
glusterd_brickinfo_t *brickinfo = NULL;
3757
glusterd_conf_t *priv = NULL;
3758
dict_t *vol_opts = NULL;
3760
gf_boolean_t nfs_disabled = _gf_false;
3762
gf_boolean_t shd_enabled = _gf_false;
3763
gf_boolean_t origin_glusterd = _gf_false;
3764
int snapd_enabled, bitrot_enabled, volume_quota_enabled;
3766
priv = this->private;
3772
origin_glusterd = is_origin_glusterd(dict);
3774
ret = dict_get_uint32(dict, "cmd", &cmd);
3778
if (origin_glusterd) {
3780
if ((cmd & GF_CLI_STATUS_ALL)) {
3781
ret = glusterd_get_all_volnames(rsp_dict);
3783
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAMES_GET_FAIL,
3784
"failed to get all volume "
3785
"names for status");
3789
ret = dict_set_uint32(rsp_dict, "cmd", cmd);
3793
if (cmd & GF_CLI_STATUS_ALL)
3796
ret = dict_get_str(dict, "volname", &volname);
3800
ret = glusterd_volinfo_find(volname, &volinfo);
3802
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
3803
"Volume with name: %s "
3808
vol_opts = volinfo->dict;
3810
if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
3811
ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
3818
} else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
3819
ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict, 0,
3826
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
3827
ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict, 0,
3833
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
3834
ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict, 0,
3840
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
3841
ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict, other_index);
3846
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
3847
ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, other_index);
3852
} else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
3853
ret = dict_get_str(dict, "brick", &brick);
3857
ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
3862
if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
3865
glusterd_add_brick_to_dict(volinfo, brickinfo, rsp_dict, ++brick_index);
3866
if (cmd & GF_CLI_STATUS_DETAIL)
3867
glusterd_add_brick_detail_to_dict(volinfo, brickinfo, rsp_dict,
3871
} else if ((cmd & GF_CLI_STATUS_TASKS) != 0) {
3872
ret = glusterd_aggregate_task_status(rsp_dict, volinfo);
3876
snapd_enabled = glusterd_is_snapd_enabled(volinfo);
3877
shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
3879
nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
3882
volume_quota_enabled = glusterd_is_volume_quota_enabled(volinfo);
3883
bitrot_enabled = glusterd_is_bitrot_enabled(volinfo);
3885
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
3888
if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
3891
glusterd_add_brick_to_dict(volinfo, brickinfo, rsp_dict,
3894
if (cmd & GF_CLI_STATUS_DETAIL) {
3895
glusterd_add_brick_detail_to_dict(volinfo, brickinfo, rsp_dict,
3901
if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
3902
other_index = brick_index + 1;
3903
if (snapd_enabled) {
3904
ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict,
3913
if (glusterd_is_shd_compatible_volume(volinfo)) {
3915
ret = glusterd_add_shd_to_dict(volinfo, rsp_dict,
3925
if (!nfs_disabled) {
3926
ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict,
3927
other_index, vol_opts);
3935
if (volume_quota_enabled) {
3936
ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict,
3937
other_index, vol_opts);
3945
if (bitrot_enabled) {
3946
ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict,
3947
other_index, vol_opts);
3955
ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict,
3956
other_index, vol_opts);
3965
ret = dict_set_int32_sizen(rsp_dict, "type", volinfo->type);
3967
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3972
ret = dict_set_int32_sizen(rsp_dict, "brick-index-max", brick_index);
3974
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3975
"Key=brick-index-max", NULL);
3978
ret = dict_set_int32_sizen(rsp_dict, "other-count", other_count);
3980
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3981
"Key=other-count", NULL);
3984
ret = dict_set_int32_sizen(rsp_dict, "count", node_count);
3986
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3995
if (!glusterd_status_has_tasks(cmd))
3998
ret = glusterd_aggregate_task_status(rsp_dict, volinfo);
4004
gf_msg_debug(this->name, 0, "Returning %d", ret);
4010
glusterd_op_ac_none(glusterd_op_sm_event_t *event, void *ctx)
4014
gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
4020
glusterd_op_sm_locking_failed(uuid_t *txn_id)
4025
opinfo.op_errstr = gf_strdup("locking failed for one of the peer.");
4027
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
4029
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4031
"transaction's opinfo");
4033
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, txn_id, NULL);
4039
glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
4042
rpc_clnt_procedure_t *proc = NULL;
4043
glusterd_conf_t *priv = NULL;
4044
xlator_t *this = THIS;
4045
glusterd_peerinfo_t *peerinfo = NULL;
4046
uint32_t pending_count = 0;
4047
dict_t *dict = NULL;
4049
priv = this->private;
4053
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
4058
if (peerinfo->generation > opinfo.txn_generation)
4061
if (!peerinfo->connected || !peerinfo->mgmt)
4063
if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
4064
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
4067
dict = glusterd_op_get_ctx();
4070
proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_LOCK];
4072
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
4075
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4076
"failed to set peerinfo");
4081
ret = proc->fn(NULL, this, dict);
4084
gf_msg(this->name, GF_LOG_WARNING, 0,
4085
GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
4086
"Failed to send mgmt_v3 lock "
4087
"request for operation "
4088
"'Volume %s' to peer %s",
4089
gd_op_list[opinfo.op], peerinfo->hostname);
4094
peerinfo->locked = _gf_true;
4100
opinfo.pending_count = pending_count;
4102
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4104
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4106
"transaction's opinfo");
4108
if (!opinfo.pending_count)
4109
ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
4113
ret = glusterd_op_sm_locking_failed(&event->txn_id);
4115
gf_msg_debug(this->name, 0, "Returning with %d", ret);
4120
glusterd_op_ac_send_unlock(glusterd_op_sm_event_t *event, void *ctx)
4123
rpc_clnt_procedure_t *proc = NULL;
4124
glusterd_conf_t *priv = NULL;
4125
xlator_t *this = THIS;
4126
glusterd_peerinfo_t *peerinfo = NULL;
4127
uint32_t pending_count = 0;
4128
dict_t *dict = NULL;
4130
priv = this->private;
4134
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
4139
if (peerinfo->generation > opinfo.txn_generation)
4142
if (!peerinfo->connected || !peerinfo->mgmt || !peerinfo->locked)
4144
if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
4145
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
4147
dict = glusterd_op_get_ctx();
4150
proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_UNLOCK];
4152
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
4154
opinfo.op_errstr = gf_strdup(
4155
"Unlocking failed for one of the "
4157
gf_msg(this->name, GF_LOG_ERROR, 0,
4158
GD_MSG_CLUSTER_UNLOCK_FAILED,
4159
"Unlocking failed for operation"
4160
" volume %s on peer %s",
4161
gd_op_list[opinfo.op], peerinfo->hostname);
4166
ret = proc->fn(NULL, this, dict);
4168
opinfo.op_errstr = gf_strdup(
4169
"Unlocking failed for one of the "
4171
gf_msg(this->name, GF_LOG_ERROR, 0,
4172
GD_MSG_CLUSTER_UNLOCK_FAILED,
4173
"Unlocking failed for operation"
4174
" volume %s on peer %s",
4175
gd_op_list[opinfo.op], peerinfo->hostname);
4180
peerinfo->locked = _gf_false;
4185
opinfo.pending_count = pending_count;
4187
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4189
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4191
"transaction's opinfo");
4193
if (!opinfo.pending_count)
4194
ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
4196
gf_msg_debug(this->name, 0, "Returning with %d", ret);
4201
glusterd_op_ac_ack_drain(glusterd_op_sm_event_t *event, void *ctx)
4205
if (opinfo.pending_count > 0)
4206
opinfo.pending_count--;
4208
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4210
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4212
"transaction's opinfo");
4214
if (!opinfo.pending_count)
4215
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
4218
gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
4224
glusterd_op_ac_send_unlock_drain(glusterd_op_sm_event_t *event, void *ctx)
4226
return glusterd_op_ac_ack_drain(event, ctx);
4230
glusterd_op_ac_lock(glusterd_op_sm_event_t *event, void *ctx)
4233
char *volname = NULL;
4234
char *globalname = NULL;
4235
glusterd_op_lock_ctx_t *lock_ctx = NULL;
4236
xlator_t *this = THIS;
4237
uint32_t op_errno = 0;
4238
glusterd_conf_t *conf = NULL;
4244
conf = this->private;
4247
lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
4252
if (lock_ctx->dict == NULL) {
4253
ret = glusterd_lock(lock_ctx->uuid);
4254
glusterd_op_lock_send_resp(lock_ctx->req, ret);
4261
ret = dict_get_time(lock_ctx->dict, "timeout", &timeout);
4263
conf->mgmt_v3_lock_timeout = timeout + 120;
4265
ret = dict_get_str(lock_ctx->dict, "volname", &volname);
4267
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4268
"Unable to acquire volname");
4270
ret = glusterd_mgmt_v3_lock(volname, lock_ctx->uuid, &op_errno,
4273
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
4274
"Unable to acquire lock for %s", volname);
4277
ret = dict_get_str(lock_ctx->dict, "globalname", &globalname);
4279
ret = glusterd_mgmt_v3_lock(globalname, lock_ctx->uuid, &op_errno,
4282
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
4283
"Unable to acquire lock for %s", globalname);
4286
glusterd_op_mgmt_v3_lock_send_resp(lock_ctx->req, &event->txn_id, ret);
4288
dict_unref(lock_ctx->dict);
4291
gf_msg_debug(THIS->name, 0, "Lock Returned %d", ret);
4296
glusterd_op_ac_unlock(glusterd_op_sm_event_t *event, void *ctx)
4299
char *volname = NULL;
4300
char *globalname = NULL;
4301
glusterd_op_lock_ctx_t *lock_ctx = NULL;
4302
glusterd_conf_t *priv = NULL;
4303
xlator_t *this = THIS;
4308
priv = this->private;
4310
lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
4315
if (lock_ctx->dict == NULL) {
4316
ret = glusterd_unlock(lock_ctx->uuid);
4317
glusterd_op_unlock_send_resp(lock_ctx->req, ret);
4319
ret = dict_get_str(lock_ctx->dict, "volname", &volname);
4321
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4322
"Unable to acquire volname");
4324
ret = glusterd_mgmt_v3_unlock(volname, lock_ctx->uuid, "vol");
4326
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
4327
"Unable to release lock for %s", volname);
4331
ret = dict_get_str(lock_ctx->dict, "globalname", &globalname);
4333
ret = glusterd_mgmt_v3_unlock(globalname, lock_ctx->uuid, "global");
4335
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
4336
"Unable to release lock for %s", globalname);
4339
glusterd_op_mgmt_v3_unlock_send_resp(lock_ctx->req, &event->txn_id,
4342
dict_unref(lock_ctx->dict);
4345
gf_msg_debug(this->name, 0, "Unlock Returned %d", ret);
4347
if (priv->pending_quorum_action)
4348
glusterd_do_quorum_action();
4353
glusterd_op_ac_local_unlock(glusterd_op_sm_event_t *event, void *ctx)
4356
uuid_t *originator = NULL;
4361
originator = (uuid_t *)ctx;
4363
ret = glusterd_unlock(*originator);
4365
gf_msg_debug(THIS->name, 0, "Unlock Returned %d", ret);
4371
glusterd_op_ac_rcvd_lock_acc(glusterd_op_sm_event_t *event, void *ctx)
4377
if (opinfo.pending_count > 0)
4378
opinfo.pending_count--;
4380
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4382
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4384
"transaction's opinfo");
4386
if (opinfo.pending_count > 0)
4389
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, &event->txn_id,
4392
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
4399
glusterd_dict_set_volid(dict_t *dict, char *volname, char **op_errstr)
4402
glusterd_volinfo_t *volinfo = NULL;
4407
xlator_t *this = THIS;
4409
if (!dict || !volname) {
4410
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
4414
ret = glusterd_volinfo_find(volname, &volinfo);
4416
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
4419
volid = gf_strdup(uuid_utoa(volinfo->volume_id));
4424
ret = dict_set_dynstr_sizen(dict, "vol-id", volid);
4426
snprintf(msg, sizeof(msg),
4427
"Failed to set volume id of volume"
4434
if (msg[0] != '\0') {
4435
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ID_SET_FAIL, "%s", msg);
4436
*op_errstr = gf_strdup(msg);
4442
gd_set_commit_hash(dict_t *dict)
4458
gettimeofday(&tv, NULL);
4459
hash = tv.tv_sec << 3;
4466
hash |= 1 << ((tv.tv_usec >> 10) % 3);
4468
return dict_set_uint32(dict, "commit-hash", hash);
4472
glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
4476
dict_t *dict = NULL;
4477
dict_t *req_dict = NULL;
4478
glusterd_op_t op = GD_OP_NONE;
4479
char *volname = NULL;
4480
uint32_t status_cmd = GF_CLI_STATUS_NONE;
4481
xlator_t *this = THIS;
4482
gf_boolean_t do_common = _gf_false;
4486
req_dict = dict_new();
4491
op = glusterd_op_get_op();
4492
ctx = (void *)glusterd_op_get_ctx();
4494
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
4503
#define GD_SYNC_OPCODE_KEY "sync-mgmt-operation"
4504
ret = dict_get_int32(op_ctx, GD_SYNC_OPCODE_KEY, (int32_t *)&op);
4506
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4507
"Failed to get volume"
4512
#undef GD_SYNC_OPCODE_KEY
4517
case GD_OP_CREATE_VOLUME: {
4519
ret = dict_set_int32_sizen(dict, "port", glusterfs_port);
4521
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4522
"Failed to set port in "
4526
dict_copy(dict, req_dict);
4529
case GD_OP_GSYNC_CREATE:
4530
case GD_OP_GSYNC_SET: {
4531
ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, NULL,
4534
ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4538
dict_copy(dict, req_dict);
4541
case GD_OP_SET_VOLUME: {
4542
ret = dict_get_str(dict, "volname", &volname);
4544
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_DICT_GET_FAILED,
4545
"volname is not present in "
4549
if (strcmp(volname, "help") && strcmp(volname, "help-xml") &&
4550
strcasecmp(volname, "all")) {
4551
ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4555
dict_unref(req_dict);
4556
req_dict = dict_ref(dict);
4559
case GD_OP_REMOVE_BRICK: {
4561
ret = dict_get_str(dict, "volname", &volname);
4563
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_DICT_GET_FAILED,
4564
"volname is not present in "
4569
ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4573
ret = gd_set_commit_hash(dict);
4577
dict_unref(req_dict);
4578
req_dict = dict_ref(dict);
4581
case GD_OP_STATUS_VOLUME: {
4582
ret = dict_get_uint32(dict, "cmd", &status_cmd);
4584
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4585
"Status command not present "
4589
if (GF_CLI_STATUS_ALL & status_cmd) {
4590
dict_copy(dict, req_dict);
4593
do_common = _gf_true;
4596
case GD_OP_DELETE_VOLUME:
4597
case GD_OP_START_VOLUME:
4598
case GD_OP_STOP_VOLUME:
4599
case GD_OP_ADD_BRICK:
4600
case GD_OP_REPLACE_BRICK:
4601
case GD_OP_RESET_VOLUME:
4602
case GD_OP_LOG_ROTATE:
4604
case GD_OP_PROFILE_VOLUME:
4605
case GD_OP_HEAL_VOLUME:
4606
case GD_OP_STATEDUMP_VOLUME:
4607
case GD_OP_CLEARLOCKS_VOLUME:
4608
case GD_OP_DEFRAG_BRICK_VOLUME:
4611
case GD_OP_SCRUB_STATUS:
4612
case GD_OP_SCRUB_ONDEMAND:
4613
case GD_OP_RESET_BRICK: {
4614
do_common = _gf_true;
4617
case GD_OP_REBALANCE: {
4618
if (gd_set_commit_hash(dict) != 0) {
4621
do_common = _gf_true;
4624
case GD_OP_SYNC_VOLUME:
4625
case GD_OP_COPY_FILE:
4626
case GD_OP_SYS_EXEC:
4627
case GD_OP_GANESHA: {
4628
dict_copy(dict, req_dict);
4640
ret = dict_get_str(dict, "volname", &volname);
4642
gf_msg(this->name, GF_LOG_CRITICAL, -ret, GD_MSG_DICT_GET_FAILED,
4643
"volname is not present in "
4648
if (strcasecmp(volname, "all")) {
4649
ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4653
dict_copy(dict, req_dict);
4660
if (ret && req_dict)
4661
dict_unref(req_dict);
4666
glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
4670
rpc_clnt_procedure_t *proc = NULL;
4671
glusterd_conf_t *priv = NULL;
4672
xlator_t *this = THIS;
4673
glusterd_peerinfo_t *peerinfo = NULL;
4674
dict_t *dict = NULL;
4675
dict_t *rsp_dict = NULL;
4676
char *op_errstr = NULL;
4677
glusterd_op_t op = GD_OP_NONE;
4678
uint32_t pending_count = 0;
4680
priv = this->private;
4683
op = glusterd_op_get_op();
4685
rsp_dict = dict_new();
4687
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
4688
"Failed to create rsp_dict");
4693
ret = glusterd_op_build_payload(&dict, &op_errstr, NULL);
4695
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
4696
LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
4697
if (op_errstr == NULL)
4698
gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
4699
opinfo.op_errstr = op_errstr;
4703
ret = glusterd_validate_quorum(this, op, dict, &op_errstr);
4705
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
4706
"Server quorum not met. Rejecting operation.");
4707
opinfo.op_errstr = op_errstr;
4711
ret = glusterd_op_stage_validate(op, dict, &op_errstr, rsp_dict);
4713
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
4714
LOGSTR_STAGE_FAIL, gd_op_list[op], "localhost",
4715
(op_errstr) ? ":" : " ", (op_errstr) ? op_errstr : " ");
4716
if (op_errstr == NULL)
4717
gf_asprintf(&op_errstr, OPERRSTR_STAGE_FAIL, "localhost");
4718
opinfo.op_errstr = op_errstr;
4723
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
4728
if (peerinfo->generation > opinfo.txn_generation)
4731
if (!peerinfo->connected || !peerinfo->mgmt)
4733
if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
4734
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
4737
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_STAGE_OP];
4740
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
4743
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4749
ret = proc->fn(NULL, this, dict);
4751
gf_msg(this->name, GF_LOG_WARNING, 0,
4752
GD_MSG_STAGE_REQ_SEND_FAIL,
4754
"send stage request for operation "
4755
"'Volume %s' to peer %s",
4756
gd_op_list[op], peerinfo->hostname);
4764
opinfo.pending_count = pending_count;
4767
opinfo.op_ret = ret;
4769
ret1 = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4771
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4773
"transaction's opinfo");
4776
dict_unref(rsp_dict);
4781
glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, &event->txn_id, NULL);
4782
opinfo.op_ret = ret;
4785
gf_msg_debug(this->name, 0,
4786
"Sent stage op request for "
4787
"'Volume %s' to %d peers",
4788
gd_op_list[op], opinfo.pending_count);
4790
if (!opinfo.pending_count)
4791
ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
4793
gf_msg_debug(this->name, 0, "Returning with %d", ret);
4802
glusterd_op_volume_dict_uuid_to_hostname(dict_t *dict, const char *key_fmt,
4803
int idx_min, int idx_max)
4809
char *uuid_str = NULL;
4813
char *hostname = NULL;
4814
xlator_t *this = THIS;
4819
for (i = idx_min; i < idx_max; i++) {
4820
keylen = snprintf(key, sizeof(key), key_fmt, i);
4821
ret = dict_get_strn(dict, key, keylen, &uuid_str);
4827
gf_msg_debug(this->name, 0, "Got uuid %s", uuid_str);
4829
ret = gf_uuid_parse(uuid_str, uuid);
4838
hostname = glusterd_uuid_to_hostname(uuid);
4840
gf_msg_debug(this->name, 0, "%s -> %s", uuid_str, hostname);
4841
ret = dict_set_dynstrn(dict, key, keylen, hostname);
4843
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4844
"Error setting hostname %s to dict", hostname);
4852
gf_msg_debug(this->name, 0, "Returning %d", ret);
4857
reassign_defrag_status(dict_t *dict, char *key, int keylen,
4858
gf_defrag_status_t *status)
4866
case GF_DEFRAG_STATUS_STARTED:
4867
*status = GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED;
4870
case GF_DEFRAG_STATUS_STOPPED:
4871
*status = GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED;
4874
case GF_DEFRAG_STATUS_COMPLETE:
4875
*status = GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE;
4878
case GF_DEFRAG_STATUS_FAILED:
4879
*status = GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED;
4885
ret = dict_set_int32n(dict, key, keylen, *status);
4887
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
4888
"failed to reset defrag %s in dict", key);
4898
glusterd_op_check_peer_defrag_status(dict_t *dict, int count)
4900
glusterd_volinfo_t *volinfo = NULL;
4901
gf_defrag_status_t status = GF_DEFRAG_STATUS_NOT_STARTED;
4906
char *volname = NULL;
4910
ret = dict_get_str(dict, "volname", &volname);
4912
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
4913
"Unable to get volume name");
4917
ret = glusterd_volinfo_find(volname, &volinfo);
4919
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
4920
FMTSTR_CHECK_VOL_EXISTS, volname);
4924
if (volinfo->rebal.defrag_cmd != GF_DEFRAG_CMD_START_LAYOUT_FIX) {
4932
keylen = snprintf(key, sizeof(key), "status-%d", i);
4933
ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status);
4935
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
4936
"failed to get defrag %s", key);
4939
ret = reassign_defrag_status(dict, key, keylen, &status);
4943
} while (i <= count);
4971
glusterd_is_volume_status_modify_op_ctx(uint32_t cmd)
4973
if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
4974
if (cmd & GF_CLI_STATUS_BRICK)
4976
if (cmd & GF_CLI_STATUS_ALL)
4984
glusterd_op_modify_port_key(dict_t *op_ctx, int brick_index_max)
4991
char old_key[64] = {0};
4994
for (i = 0; i <= brick_index_max; i++) {
4995
keylen = snprintf(key, sizeof(key), "brick%d.rdma_port", i);
4996
ret = dict_get_strn(op_ctx, key, keylen, &port);
4999
old_keylen = snprintf(old_key, sizeof(old_key), "brick%d.port", i);
5000
ret = dict_get_strn(op_ctx, old_key, old_keylen, &port);
5004
ret = dict_set_strn(op_ctx, key, keylen, port);
5007
ret = dict_set_nstrn(op_ctx, old_key, old_keylen, "\0", SLEN("\0"));
5021
glusterd_op_modify_op_ctx(glusterd_op_t op, void *ctx)
5024
dict_t *op_ctx = NULL;
5025
int brick_index_max = -1;
5026
int other_count = 0;
5028
uint32_t cmd = GF_CLI_STATUS_NONE;
5029
xlator_t *this = THIS;
5030
char *volname = NULL;
5031
glusterd_volinfo_t *volinfo = NULL;
5042
op_ctx = glusterd_op_get_ctx();
5045
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_OPCTX_NULL,
5046
"Operation context is not present.");
5051
case GD_OP_STATUS_VOLUME:
5052
ret = dict_get_uint32(op_ctx, "cmd", &cmd);
5054
gf_msg_debug(this->name, 0, "Failed to get status cmd");
5058
if (!glusterd_is_volume_status_modify_op_ctx(cmd)) {
5059
gf_msg_debug(this->name, 0,
5060
"op_ctx modification not required for status "
5061
"operation being performed");
5065
ret = dict_get_int32(op_ctx, "brick-index-max", &brick_index_max);
5067
gf_msg_debug(this->name, 0, "Failed to get brick-index-max");
5071
ret = dict_get_int32(op_ctx, "other-count", &other_count);
5073
gf_msg_debug(this->name, 0, "Failed to get other-count");
5077
count = brick_index_max + other_count + 1;
5084
ret = dict_get_str(op_ctx, "volname", &volname);
5088
for (i = 0; i <= brick_index_max; i++) {
5089
keylen = snprintf(key, sizeof(key), "brick%d.rdma_port", i);
5090
ret = dict_get_strn(op_ctx, key, keylen, &port);
5092
ret = dict_set_nstrn(op_ctx, key, keylen, "\0", SLEN("\0"));
5097
ret = glusterd_volinfo_find(volname, &volinfo);
5103
char *uuid_str = NULL;
5107
for (i = brick_index_max + 1; i < count; i++) {
5108
keylen = snprintf(key, sizeof(key), "brick%d.path", i);
5109
ret = dict_get_strn(op_ctx, key, keylen, &uuid_str);
5111
keylen = snprintf(key, sizeof(key), "brick%d.peerid",
5113
uuid = gf_strdup(uuid_str);
5115
gf_msg_debug(this->name, 0,
5116
"unable to create dup of"
5120
ret = dict_set_dynstrn(op_ctx, key, keylen, uuid);
5128
ret = glusterd_op_volume_dict_uuid_to_hostname(
5129
op_ctx, "brick%d.path", 0, count);
5131
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
5132
"Failed uuid to hostname conversion");
5136
case GD_OP_PROFILE_VOLUME:
5137
ret = dict_get_str_boolean(op_ctx, "nfs", _gf_false);
5141
ret = dict_get_int32(op_ctx, "count", &count);
5143
gf_msg_debug(this->name, 0, "Failed to get brick count");
5147
ret = glusterd_op_volume_dict_uuid_to_hostname(op_ctx, "%d-brick",
5150
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
5151
"Failed uuid to hostname conversion");
5158
case GD_OP_DEFRAG_BRICK_VOLUME:
5159
case GD_OP_SCRUB_STATUS:
5160
case GD_OP_SCRUB_ONDEMAND:
5161
ret = dict_get_int32(op_ctx, "count", &count);
5163
gf_msg_debug(this->name, 0, "Failed to get count");
5170
char *uuid_str = NULL;
5174
for (i = 1; i <= count; i++) {
5175
keylen = snprintf(key, sizeof(key), "node-uuid-%d", i);
5176
ret = dict_get_strn(op_ctx, key, keylen, &uuid_str);
5178
keylen = snprintf(key, sizeof(key), "node-name-%d", i);
5179
uuid = gf_strdup(uuid_str);
5181
gf_msg_debug(this->name, 0,
5182
"unable to create dup of"
5186
ret = dict_set_dynstrn(op_ctx, key, keylen, uuid);
5194
ret = glusterd_op_volume_dict_uuid_to_hostname(
5195
op_ctx, "node-name-%d", 1, (count + 1));
5197
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
5198
"Failed uuid to hostname conversion");
5204
if (op == GD_OP_SCRUB_STATUS || op == GD_OP_SCRUB_ONDEMAND) {
5208
ret = glusterd_op_check_peer_defrag_status(op_ctx, count);
5210
gf_msg(this->name, GF_LOG_ERROR, 0,
5211
GD_MSG_DEFRAG_STATUS_UPDATE_FAIL,
5212
"Failed to reset defrag status for fix-layout");
5217
gf_msg_debug(this->name, 0, "op_ctx modification not required");
5223
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_OPCTX_UPDATE_FAIL,
5224
"op_ctx modification failed");
5229
glusterd_op_commit_hook(glusterd_op_t op, dict_t *op_ctx,
5230
glusterd_commit_hook_type_t type)
5232
glusterd_conf_t *priv = NULL;
5233
char hookdir[PATH_MAX] = {
5236
char scriptdir[PATH_MAX] = {
5239
char *type_subdir = "";
5240
char *cmd_subdir = NULL;
5244
priv = THIS->private;
5246
case GD_COMMIT_HOOK_NONE:
5247
case GD_COMMIT_HOOK_MAX:
5251
case GD_COMMIT_HOOK_PRE:
5252
type_subdir = "pre";
5254
case GD_COMMIT_HOOK_POST:
5255
type_subdir = "post";
5259
cmd_subdir = glusterd_hooks_get_hooks_cmd_subdir(op);
5260
if (strlen(cmd_subdir) == 0)
5263
GLUSTERD_GET_HOOKS_DIR(hookdir, GLUSTERD_HOOK_VER, priv);
5264
len = snprintf(scriptdir, sizeof(scriptdir), "%s/%s/%s", hookdir,
5265
cmd_subdir, type_subdir);
5266
if ((len < 0) || (len >= sizeof(scriptdir))) {
5271
case GD_COMMIT_HOOK_NONE:
5272
case GD_COMMIT_HOOK_MAX:
5276
case GD_COMMIT_HOOK_PRE:
5277
ret = glusterd_hooks_run_hooks(scriptdir, op, op_ctx, type);
5279
case GD_COMMIT_HOOK_POST:
5280
ret = glusterd_hooks_post_stub_enqueue(scriptdir, op, op_ctx);
5288
glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
5292
rpc_clnt_procedure_t *proc = NULL;
5293
glusterd_conf_t *priv = NULL;
5294
xlator_t *this = THIS;
5295
dict_t *dict = NULL;
5296
glusterd_peerinfo_t *peerinfo = NULL;
5297
char *op_errstr = NULL;
5298
glusterd_op_t op = GD_OP_NONE;
5299
uint32_t pending_count = 0;
5301
priv = this->private;
5304
op = glusterd_op_get_op();
5306
ret = glusterd_op_build_payload(&dict, &op_errstr, NULL);
5308
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
5309
LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
5310
if (op_errstr == NULL)
5311
gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
5312
opinfo.op_errstr = op_errstr;
5316
ret = glusterd_op_commit_perform(op, dict, &op_errstr,
5319
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
5320
LOGSTR_COMMIT_FAIL, gd_op_list[op], "localhost",
5321
(op_errstr) ? ":" : " ", (op_errstr) ? op_errstr : " ");
5322
if (op_errstr == NULL)
5323
gf_asprintf(&op_errstr, OPERRSTR_COMMIT_FAIL, "localhost");
5324
opinfo.op_errstr = op_errstr;
5329
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
5334
if (peerinfo->generation > opinfo.txn_generation)
5337
if (!peerinfo->connected || !peerinfo->mgmt)
5339
if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
5340
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
5343
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_COMMIT_OP];
5346
ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
5349
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5350
"failed to set peerinfo");
5353
ret = proc->fn(NULL, this, dict);
5355
gf_msg(this->name, GF_LOG_WARNING, 0,
5356
GD_MSG_COMMIT_REQ_SEND_FAIL,
5358
"send commit request for operation "
5359
"'Volume %s' to peer %s",
5360
gd_op_list[op], peerinfo->hostname);
5368
opinfo.pending_count = pending_count;
5369
gf_msg_debug(this->name, 0,
5370
"Sent commit op req for 'Volume %s' "
5372
gd_op_list[op], opinfo.pending_count);
5378
opinfo.op_ret = ret;
5380
ret1 = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5382
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5384
"transaction's opinfo");
5387
glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, &event->txn_id, NULL);
5388
opinfo.op_ret = ret;
5391
if (!opinfo.pending_count) {
5392
if (op == GD_OP_REPLACE_BRICK) {
5393
ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
5395
glusterd_op_modify_op_ctx(op, NULL);
5396
ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
5402
gf_msg_debug(this->name, 0, "Returning with %d", ret);
5408
glusterd_op_ac_rcvd_stage_op_acc(glusterd_op_sm_event_t *event, void *ctx)
5414
if (opinfo.pending_count > 0)
5415
opinfo.pending_count--;
5417
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5419
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5421
"transaction's opinfo");
5423
if (opinfo.pending_count > 0)
5426
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_STAGE_ACC, &event->txn_id,
5430
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5436
glusterd_op_ac_stage_op_failed(glusterd_op_sm_event_t *event, void *ctx)
5442
if (opinfo.pending_count > 0)
5443
opinfo.pending_count--;
5445
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5447
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5449
"transaction's opinfo");
5451
if (opinfo.pending_count > 0)
5454
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
5458
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5464
glusterd_op_ac_commit_op_failed(glusterd_op_sm_event_t *event, void *ctx)
5470
if (opinfo.pending_count > 0)
5471
opinfo.pending_count--;
5473
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5475
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5477
"transaction's opinfo");
5479
if (opinfo.pending_count > 0)
5482
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
5486
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5492
glusterd_remove_pending_entry(struct cds_list_head *list, void *elem)
5494
glusterd_pending_node_t *pending_node = NULL;
5495
glusterd_pending_node_t *tmp = NULL;
5498
cds_list_for_each_entry_safe(pending_node, tmp, list, list)
5500
if (elem == pending_node->node) {
5501
cds_list_del_init(&pending_node->list);
5502
GF_FREE(pending_node);
5508
gf_msg_debug(THIS->name, 0, "returning %d", ret);
5513
glusterd_op_ac_brick_op_failed(glusterd_op_sm_event_t *event, void *ctx)
5516
glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
5517
gf_boolean_t free_errstr = _gf_false;
5518
xlator_t *this = THIS;
5524
ret = glusterd_remove_pending_entry(&opinfo.pending_bricks,
5525
ev_ctx->pending_node->node);
5527
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_RESPONSE,
5528
"unknown response received ");
5530
free_errstr = _gf_true;
5533
if (opinfo.brick_pending_count > 0)
5534
opinfo.brick_pending_count--;
5535
if (opinfo.op_ret == 0)
5536
opinfo.op_ret = ev_ctx->op_ret;
5538
if (opinfo.op_errstr == NULL)
5539
opinfo.op_errstr = ev_ctx->op_errstr;
5541
free_errstr = _gf_true;
5543
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5545
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5547
"transaction's opinfo");
5549
if (opinfo.brick_pending_count > 0)
5552
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
5553
ev_ctx->commit_ctx);
5556
if (ev_ctx->rsp_dict)
5557
dict_unref(ev_ctx->rsp_dict);
5558
if (free_errstr && ev_ctx->op_errstr)
5559
GF_FREE(ev_ctx->op_errstr);
5561
gf_msg_debug(this->name, 0, "Returning %d", ret);
5567
glusterd_op_ac_rcvd_commit_op_acc(glusterd_op_sm_event_t *event, void *ctx)
5570
gf_boolean_t commit_ack_inject = _gf_true;
5571
glusterd_op_t op = GD_OP_NONE;
5572
xlator_t *this = THIS;
5574
op = glusterd_op_get_op();
5577
if (opinfo.pending_count > 0)
5578
opinfo.pending_count--;
5580
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5582
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5584
"transaction's opinfo");
5586
if (opinfo.pending_count > 0)
5589
if (op == GD_OP_REPLACE_BRICK) {
5590
ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
5592
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RBOP_START_FAIL,
5594
"replace-brick operation.");
5598
commit_ack_inject = _gf_false;
5603
if (commit_ack_inject) {
5605
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT,
5606
&event->txn_id, NULL);
5607
else if (!opinfo.pending_count) {
5608
glusterd_op_modify_op_ctx(op, NULL);
5609
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_COMMIT_ACC,
5610
&event->txn_id, NULL);
5619
glusterd_op_ac_rcvd_unlock_acc(glusterd_op_sm_event_t *event, void *ctx)
5625
if (opinfo.pending_count > 0)
5626
opinfo.pending_count--;
5628
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5630
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5632
"transaction's opinfo");
5634
if (opinfo.pending_count > 0)
5637
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, &event->txn_id,
5640
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5647
glusterd_op_clear_errstr(void)
5649
opinfo.op_errstr = NULL;
5654
glusterd_op_set_ctx(void *ctx)
5656
opinfo.op_ctx = ctx;
5662
glusterd_op_reset_ctx(void)
5664
glusterd_op_set_ctx(NULL);
5670
glusterd_op_txn_complete(uuid_t *txn_id)
5673
glusterd_conf_t *priv = NULL;
5676
int32_t op_errno = 0;
5677
rpcsvc_request_t *req = NULL;
5679
char *op_errstr = NULL;
5680
char *volname = NULL;
5681
xlator_t *this = THIS;
5683
priv = this->private;
5686
op = glusterd_op_get_op();
5687
ctx = glusterd_op_get_ctx();
5688
op_ret = opinfo.op_ret;
5689
op_errno = opinfo.op_errno;
5691
if (opinfo.op_errstr)
5692
op_errstr = opinfo.op_errstr;
5695
opinfo.op_errno = 0;
5697
ret = dict_get_str(ctx, "volname", &volname);
5699
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
5700
"No Volume name present. "
5701
"Locks have not been held.");
5704
ret = glusterd_mgmt_v3_unlock(volname, MY_UUID, "vol");
5706
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
5707
"Unable to release lock for %s", volname);
5710
ret = glusterd_op_send_cli_response(op, op_ret, op_errno, req, ctx,
5714
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_CLI_RESP,
5715
"Responding to cli failed, "
5722
if (op_errstr && (strcmp(op_errstr, "")))
5725
if (priv->pending_quorum_action)
5726
glusterd_do_quorum_action();
5729
ret = glusterd_clear_txn_opinfo(txn_id);
5731
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
5732
"Unable to clear transaction's opinfo");
5734
gf_msg_debug(this->name, 0, "Returning %d", ret);
5739
glusterd_op_ac_unlocked_all(glusterd_op_sm_event_t *event, void *ctx)
5745
ret = glusterd_op_txn_complete(&event->txn_id);
5747
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5753
glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
5756
glusterd_req_ctx_t *req_ctx = NULL;
5758
dict_t *rsp_dict = NULL;
5759
char *op_errstr = NULL;
5760
dict_t *dict = NULL;
5761
xlator_t *this = THIS;
5762
uuid_t *txn_id = NULL;
5763
glusterd_op_info_t txn_op_info = {
5764
GD_OP_STATE_DEFAULT,
5766
glusterd_conf_t *priv = NULL;
5768
priv = this->private;
5775
dict = req_ctx->dict;
5777
rsp_dict = dict_new();
5779
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
5780
"Failed to get new dictionary");
5784
status = glusterd_op_stage_validate(req_ctx->op, dict, &op_errstr,
5788
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
5789
"Stage failed on operation"
5790
" 'Volume %s', Status : %d",
5791
gd_op_list[req_ctx->op], status);
5794
txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
5797
gf_uuid_copy(*txn_id, event->txn_id);
5802
ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
5804
ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
5806
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5807
"Failed to set transaction id.");
5813
ret = glusterd_op_stage_send_resp(req_ctx->req, req_ctx->op, status,
5814
op_errstr, rsp_dict);
5817
if (op_errstr && (strcmp(op_errstr, "")))
5820
gf_msg_debug(this->name, 0, "Returning with %d", ret);
5828
if (txn_op_info.skip_locking && priv->op_version >= GD_OP_VERSION_6_0 &&
5830
ret = glusterd_clear_txn_opinfo(txn_id);
5833
dict_unref(rsp_dict);
5839
glusterd_need_brick_op(glusterd_op_t op)
5841
gf_boolean_t ret = _gf_false;
5843
GF_ASSERT(GD_OP_NONE < op && op < GD_OP_MAX);
5846
case GD_OP_PROFILE_VOLUME:
5847
case GD_OP_STATUS_VOLUME:
5848
case GD_OP_DEFRAG_BRICK_VOLUME:
5849
case GD_OP_HEAL_VOLUME:
5850
case GD_OP_SCRUB_STATUS:
5851
case GD_OP_SCRUB_ONDEMAND:
5862
glusterd_op_init_commit_rsp_dict(glusterd_op_t op)
5864
dict_t *rsp_dict = NULL;
5865
dict_t *op_ctx = NULL;
5867
GF_ASSERT(GD_OP_NONE < op && op < GD_OP_MAX);
5869
if (glusterd_need_brick_op(op)) {
5870
op_ctx = glusterd_op_get_ctx();
5872
rsp_dict = dict_ref(op_ctx);
5874
rsp_dict = dict_new();
5881
glusterd_op_ac_commit_op(glusterd_op_sm_event_t *event, void *ctx)
5884
glusterd_req_ctx_t *req_ctx = NULL;
5886
char *op_errstr = NULL;
5887
dict_t *dict = NULL;
5888
dict_t *rsp_dict = NULL;
5889
xlator_t *this = THIS;
5890
uuid_t *txn_id = NULL;
5891
glusterd_op_info_t txn_op_info = {
5892
GD_OP_STATE_DEFAULT,
5894
gf_boolean_t need_cleanup = _gf_true;
5900
dict = req_ctx->dict;
5902
rsp_dict = glusterd_op_init_commit_rsp_dict(req_ctx->op);
5903
if (NULL == rsp_dict)
5906
if (GD_OP_CLEARLOCKS_VOLUME == req_ctx->op) {
5912
status = glusterd_op_commit_perform(req_ctx->op, dict, &op_errstr,
5917
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
5918
"Commit of operation "
5919
"'Volume %s' failed: %d",
5920
gd_op_list[req_ctx->op], status);
5922
txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
5925
gf_uuid_copy(*txn_id, event->txn_id);
5930
ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
5932
gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
5933
GD_MSG_TRANS_OPINFO_GET_FAIL,
5934
"Unable to get transaction opinfo "
5935
"for transaction ID : %s",
5936
uuid_utoa(event->txn_id));
5940
ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
5942
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5943
"Failed to set transaction id.");
5944
if (txn_op_info.skip_locking)
5945
ret = glusterd_clear_txn_opinfo(txn_id);
5946
need_cleanup = _gf_false;
5951
ret = glusterd_op_commit_send_resp(req_ctx->req, req_ctx->op, status,
5952
op_errstr, rsp_dict);
5955
if (op_errstr && (strcmp(op_errstr, "")))
5959
dict_unref(rsp_dict);
5963
if (need_cleanup && txn_id && txn_op_info.skip_locking)
5964
ret = glusterd_clear_txn_opinfo(txn_id);
5965
gf_msg_debug(this->name, 0, "Returning with %d", ret);
5971
glusterd_op_ac_send_commit_failed(glusterd_op_sm_event_t *event, void *ctx)
5974
glusterd_req_ctx_t *req_ctx = NULL;
5975
dict_t *op_ctx = NULL;
5981
op_ctx = glusterd_op_get_ctx();
5983
ret = glusterd_op_commit_send_resp(req_ctx->req, req_ctx->op, opinfo.op_ret,
5984
opinfo.op_errstr, op_ctx);
5986
if (opinfo.op_errstr && (strcmp(opinfo.op_errstr, ""))) {
5987
GF_FREE(opinfo.op_errstr);
5988
opinfo.op_errstr = NULL;
5991
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5993
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5995
"transaction's opinfo");
5997
gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
6002
glusterd_op_sm_transition_state(glusterd_op_info_t *opinfo,
6003
glusterd_op_sm_t *state,
6004
glusterd_op_sm_event_type_t event_type)
6006
glusterd_conf_t *conf = NULL;
6011
conf = THIS->private;
6014
(void)glusterd_sm_tr_log_transition_add(&conf->op_sm_log, opinfo->state,
6015
state[event_type].next_state,
6018
opinfo->state = state[event_type].next_state;
6023
glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
6027
xlator_t *this = THIS;
6030
case GD_OP_CREATE_VOLUME:
6031
ret = glusterd_op_stage_create_volume(dict, op_errstr, rsp_dict);
6034
case GD_OP_START_VOLUME:
6035
ret = glusterd_op_stage_start_volume(dict, op_errstr, rsp_dict);
6038
case GD_OP_STOP_VOLUME:
6039
ret = glusterd_op_stage_stop_volume(dict, op_errstr);
6042
case GD_OP_DELETE_VOLUME:
6043
ret = glusterd_op_stage_delete_volume(dict, op_errstr);
6046
case GD_OP_ADD_BRICK:
6047
ret = glusterd_op_stage_add_brick(dict, op_errstr, rsp_dict);
6050
case GD_OP_REPLACE_BRICK:
6051
ret = glusterd_op_stage_replace_brick(dict, op_errstr, rsp_dict);
6054
case GD_OP_SET_VOLUME:
6055
ret = glusterd_op_stage_set_volume(dict, op_errstr);
6059
ret = glusterd_op_stage_set_ganesha(dict, op_errstr);
6062
case GD_OP_RESET_VOLUME:
6063
ret = glusterd_op_stage_reset_volume(dict, op_errstr);
6065
case GD_OP_REMOVE_BRICK:
6066
ret = glusterd_op_stage_remove_brick(dict, op_errstr);
6069
case GD_OP_LOG_ROTATE:
6070
ret = glusterd_op_stage_log_rotate(dict, op_errstr);
6073
case GD_OP_SYNC_VOLUME:
6074
ret = glusterd_op_stage_sync_volume(dict, op_errstr);
6077
case GD_OP_GSYNC_CREATE:
6078
ret = glusterd_op_stage_gsync_create(dict, op_errstr);
6081
case GD_OP_GSYNC_SET:
6082
ret = glusterd_op_stage_gsync_set(dict, op_errstr);
6085
case GD_OP_PROFILE_VOLUME:
6086
ret = glusterd_op_stage_stats_volume(dict, op_errstr);
6090
ret = glusterd_op_stage_quota(dict, op_errstr, rsp_dict);
6093
case GD_OP_STATUS_VOLUME:
6094
ret = glusterd_op_stage_status_volume(dict, op_errstr);
6097
case GD_OP_REBALANCE:
6098
case GD_OP_DEFRAG_BRICK_VOLUME:
6099
ret = glusterd_op_stage_rebalance(dict, op_errstr);
6102
case GD_OP_HEAL_VOLUME:
6103
ret = glusterd_op_stage_heal_volume(dict, op_errstr);
6106
case GD_OP_STATEDUMP_VOLUME:
6107
ret = glusterd_op_stage_statedump_volume(dict, op_errstr);
6109
case GD_OP_CLEARLOCKS_VOLUME:
6110
ret = glusterd_op_stage_clearlocks_volume(dict, op_errstr);
6113
case GD_OP_COPY_FILE:
6114
ret = glusterd_op_stage_copy_file(dict, op_errstr);
6117
case GD_OP_SYS_EXEC:
6118
ret = glusterd_op_stage_sys_exec(dict, op_errstr);
6122
ret = glusterd_op_stage_barrier(dict, op_errstr);
6126
case GD_OP_SCRUB_STATUS:
6127
case GD_OP_SCRUB_ONDEMAND:
6128
ret = glusterd_op_stage_bitrot(dict, op_errstr, rsp_dict);
6132
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
6133
"Unknown op %s", gd_op_list[op]);
6136
gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
6141
glusterd_wait_for_blockers(glusterd_conf_t *priv)
6143
while (GF_ATOMIC_GET(priv->blockers)) {
6144
synccond_wait(&priv->cond_blockers, &priv->big_lock);
6149
glusterd_op_commit_perform(glusterd_op_t op, dict_t *dict, char **op_errstr,
6153
xlator_t *this = THIS;
6155
glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_PRE);
6157
case GD_OP_CREATE_VOLUME:
6158
ret = glusterd_op_create_volume(dict, op_errstr);
6161
case GD_OP_START_VOLUME:
6162
ret = glusterd_op_start_volume(dict, op_errstr);
6165
case GD_OP_STOP_VOLUME:
6166
ret = glusterd_op_stop_volume(dict);
6169
case GD_OP_DELETE_VOLUME:
6170
glusterd_wait_for_blockers(this->private);
6171
ret = glusterd_op_delete_volume(dict);
6174
case GD_OP_ADD_BRICK:
6175
glusterd_wait_for_blockers(this->private);
6176
ret = glusterd_op_add_brick(dict, op_errstr);
6179
case GD_OP_REPLACE_BRICK:
6180
glusterd_wait_for_blockers(this->private);
6181
ret = glusterd_op_replace_brick(dict, rsp_dict);
6184
case GD_OP_SET_VOLUME:
6185
ret = glusterd_op_set_volume(dict, op_errstr);
6188
ret = glusterd_op_set_ganesha(dict, op_errstr);
6190
case GD_OP_RESET_VOLUME:
6191
ret = glusterd_op_reset_volume(dict, op_errstr);
6194
case GD_OP_REMOVE_BRICK:
6195
glusterd_wait_for_blockers(this->private);
6196
ret = glusterd_op_remove_brick(dict, op_errstr);
6199
case GD_OP_LOG_ROTATE:
6200
ret = glusterd_op_log_rotate(dict);
6203
case GD_OP_SYNC_VOLUME:
6204
ret = glusterd_op_sync_volume(dict, op_errstr, rsp_dict);
6207
case GD_OP_GSYNC_CREATE:
6208
ret = glusterd_op_gsync_create(dict, op_errstr, rsp_dict);
6211
case GD_OP_GSYNC_SET:
6212
ret = glusterd_op_gsync_set(dict, op_errstr, rsp_dict);
6215
case GD_OP_PROFILE_VOLUME:
6216
ret = glusterd_op_stats_volume(dict, op_errstr, rsp_dict);
6220
ret = glusterd_op_quota(dict, op_errstr, rsp_dict);
6223
case GD_OP_STATUS_VOLUME:
6224
ret = glusterd_op_status_volume(dict, op_errstr, rsp_dict);
6227
case GD_OP_REBALANCE:
6228
case GD_OP_DEFRAG_BRICK_VOLUME:
6229
ret = glusterd_op_rebalance(dict, op_errstr, rsp_dict);
6232
case GD_OP_HEAL_VOLUME:
6233
ret = glusterd_op_heal_volume(dict, op_errstr);
6236
case GD_OP_STATEDUMP_VOLUME:
6237
ret = glusterd_op_statedump_volume(dict, op_errstr);
6240
case GD_OP_CLEARLOCKS_VOLUME:
6241
ret = glusterd_op_clearlocks_volume(dict, op_errstr, rsp_dict);
6244
case GD_OP_COPY_FILE:
6245
ret = glusterd_op_copy_file(dict, op_errstr);
6248
case GD_OP_SYS_EXEC:
6249
ret = glusterd_op_sys_exec(dict, op_errstr, rsp_dict);
6253
ret = glusterd_op_barrier(dict, op_errstr);
6257
case GD_OP_SCRUB_STATUS:
6258
case GD_OP_SCRUB_ONDEMAND:
6259
ret = glusterd_op_bitrot(dict, op_errstr, rsp_dict);
6263
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
6264
"Unknown op %s", gd_op_list[op]);
6269
glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_POST);
6271
gf_msg_debug(this->name, 0, "Returning %d", ret);
6276
glusterd_bricks_select_stop_volume(dict_t *dict, char **op_errstr,
6277
struct cds_list_head *selected)
6281
char *volname = NULL;
6282
glusterd_volinfo_t *volinfo = NULL;
6283
glusterd_brickinfo_t *brickinfo = NULL;
6284
glusterd_pending_node_t *pending_node = NULL;
6286
ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags);
6290
ret = glusterd_volinfo_find(volname, &volinfo);
6292
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
6293
FMTSTR_CHECK_VOL_EXISTS, volname);
6294
gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
6298
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6300
if (glusterd_is_brick_started(brickinfo)) {
6301
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6302
gf_gld_mt_pending_node_t);
6303
if (!pending_node) {
6307
pending_node->node = brickinfo;
6308
pending_node->type = GD_NODE_BRICK;
6309
cds_list_add_tail(&pending_node->list, selected);
6310
pending_node = NULL;
6317
brickinfo->status = GF_BRICK_STOPPED;
6326
glusterd_bricks_select_remove_brick(dict_t *dict, char **op_errstr,
6327
struct cds_list_head *selected)
6330
char *volname = NULL;
6331
glusterd_volinfo_t *volinfo = NULL;
6332
glusterd_brickinfo_t *brickinfo = NULL;
6340
glusterd_pending_node_t *pending_node = NULL;
6341
int32_t command = 0;
6344
ret = dict_get_str(dict, "volname", &volname);
6347
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6348
"Unable to get volume name");
6352
ret = glusterd_volinfo_find(volname, &volinfo);
6355
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
6356
"Unable to allocate memory");
6360
ret = dict_get_int32(dict, "count", &count);
6362
gf_msg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
6363
"Unable to get count");
6367
ret = dict_get_int32(dict, "command", &command);
6369
gf_msg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
6370
"Unable to get command");
6374
ret = dict_get_int32(dict, "force", &force);
6376
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
6377
"force flag is not set");
6382
while (i <= count) {
6383
keylen = snprintf(key, sizeof(key), "brick%d", i);
6385
ret = dict_get_strn(dict, key, keylen, &brick);
6387
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6388
"Unable to get brick");
6392
ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
6398
if (glusterd_is_brick_started(brickinfo)) {
6399
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6400
gf_gld_mt_pending_node_t);
6401
if (!pending_node) {
6405
pending_node->node = brickinfo;
6406
pending_node->type = GD_NODE_BRICK;
6407
cds_list_add_tail(&pending_node->list, selected);
6408
pending_node = NULL;
6415
brickinfo->status = GF_BRICK_STOPPED;
6425
glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
6426
struct cds_list_head *selected)
6429
char *volname = NULL;
6433
glusterd_conf_t *priv = NULL;
6434
glusterd_volinfo_t *volinfo = NULL;
6435
xlator_t *this = THIS;
6436
int32_t stats_op = GF_CLI_STATS_NONE;
6437
glusterd_brickinfo_t *brickinfo = NULL;
6438
glusterd_pending_node_t *pending_node = NULL;
6441
char pidfile[PATH_MAX] = {0};
6443
priv = this->private;
6446
ret = dict_get_str(dict, "volname", &volname);
6448
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6449
"volume name get failed");
6453
ret = glusterd_volinfo_find(volname, &volinfo);
6455
snprintf(msg, sizeof(msg), "Volume %s does not exists", volname);
6457
*op_errstr = gf_strdup(msg);
6458
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
6462
ret = dict_get_int32(dict, "op", &stats_op);
6464
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6465
"volume profile op get failed");
6470
case GF_CLI_STATS_START:
6471
case GF_CLI_STATS_STOP:
6474
case GF_CLI_STATS_INFO:
6476
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
6478
if (!priv->nfs_svc.online) {
6480
gf_msg(this->name, GF_LOG_ERROR, 0,
6481
GD_MSG_NFS_SERVER_NOT_RUNNING,
6486
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6487
gf_gld_mt_pending_node_t);
6488
if (!pending_node) {
6492
pending_node->node = &(priv->nfs_svc);
6493
pending_node->type = GD_NODE_NFS;
6494
cds_list_add_tail(&pending_node->list, selected);
6495
pending_node = NULL;
6501
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6503
if (glusterd_is_brick_started(brickinfo)) {
6511
GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo,
6513
if (!gf_is_service_running(pidfile, &pid)) {
6516
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6517
gf_gld_mt_pending_node_t);
6518
if (!pending_node) {
6522
pending_node->node = brickinfo;
6523
pending_node->type = GD_NODE_BRICK;
6524
cds_list_add_tail(&pending_node->list, selected);
6525
pending_node = NULL;
6531
case GF_CLI_STATS_TOP:
6533
ret = dict_get_str_boolean(dict, "nfs", _gf_false);
6535
if (!priv->nfs_svc.online) {
6537
gf_msg(this->name, GF_LOG_ERROR, 0,
6538
GD_MSG_NFS_SERVER_NOT_RUNNING,
6543
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6544
gf_gld_mt_pending_node_t);
6545
if (!pending_node) {
6549
pending_node->node = &(priv->nfs_svc);
6550
pending_node->type = GD_NODE_NFS;
6551
cds_list_add_tail(&pending_node->list, selected);
6552
pending_node = NULL;
6558
ret = dict_get_str(dict, "brick", &brick);
6560
ret = glusterd_volume_brickinfo_get_by_brick(
6561
brick, volinfo, &brickinfo, _gf_true);
6565
if (!glusterd_is_brick_started(brickinfo))
6568
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6569
gf_gld_mt_pending_node_t);
6570
if (!pending_node) {
6574
pending_node->node = brickinfo;
6575
pending_node->type = GD_NODE_BRICK;
6576
cds_list_add_tail(&pending_node->list, selected);
6577
pending_node = NULL;
6582
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6584
if (glusterd_is_brick_started(brickinfo)) {
6585
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6586
gf_gld_mt_pending_node_t);
6587
if (!pending_node) {
6591
pending_node->node = brickinfo;
6592
pending_node->type = GD_NODE_BRICK;
6593
cds_list_add_tail(&pending_node->list, selected);
6594
pending_node = NULL;
6602
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
6603
"Invalid profile op: %d", stats_op);
6610
gf_msg_debug("glusterd", 0, "Returning %d", ret);
6616
_get_hxl_children_count(glusterd_volinfo_t *volinfo)
6618
if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
6619
return volinfo->disperse_count;
6621
return volinfo->replica_count;
6626
_add_hxlator_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int index,
6637
if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
6638
xl_type = "disperse";
6640
xl_type = "replicate";
6642
keylen = snprintf(key, sizeof(key), "xl-%d", count);
6643
ret = gf_asprintf(&xname, "%s-%s-%d", volinfo->volname, xl_type, index);
6647
ret = dict_set_dynstrn(dict, key, keylen, xname);
6651
ret = dict_set_int32(dict, xname, index);
6657
get_replica_index_for_per_replica_cmd(glusterd_volinfo_t *volinfo, dict_t *dict)
6660
char *hostname = NULL;
6663
glusterd_brickinfo_t *brickinfo = NULL;
6664
int cmd_replica_index = -1;
6665
int replica_count = -1;
6672
ret = dict_get_str(dict, "per-replica-cmd-hostname", &hostname);
6675
ret = dict_get_str(dict, "per-replica-cmd-path", &path);
6679
replica_count = volinfo->replica_count;
6681
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6683
if (gf_uuid_is_null(brickinfo->uuid))
6684
(void)glusterd_resolve_brick(brickinfo);
6685
if (!strcmp(brickinfo->path, path) &&
6686
!strcmp(brickinfo->hostname, hostname)) {
6687
cmd_replica_index = index / (replica_count);
6695
cmd_replica_index = -1;
6697
return cmd_replica_index;
6701
_select_hxlator_with_matching_brick(xlator_t *this, glusterd_volinfo_t *volinfo,
6702
dict_t *dict, int *index)
6705
glusterd_brickinfo_t *brickinfo = NULL;
6706
int hxl_children = 0;
6708
if (!dict || dict_get_str(dict, "per-replica-cmd-path", &path))
6711
hxl_children = _get_hxl_children_count(volinfo);
6715
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6717
if (gf_uuid_is_null(brickinfo->uuid))
6718
(void)glusterd_resolve_brick(brickinfo);
6720
if ((!gf_uuid_compare(MY_UUID, brickinfo->uuid)) &&
6721
(!strncmp(brickinfo->path, path, strlen(path)))) {
6722
_add_hxlator_to_dict(dict, volinfo, ((*index) - 1) / hxl_children,
6732
_select_hxlators_with_local_bricks(xlator_t *this, glusterd_volinfo_t *volinfo,
6733
dict_t *dict, int *index, int *hxlator_count)
6735
glusterd_brickinfo_t *brickinfo = NULL;
6736
int hxl_children = 0;
6737
gf_boolean_t add = _gf_false;
6739
hxl_children = _get_hxl_children_count(volinfo);
6744
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6746
if (gf_uuid_is_null(brickinfo->uuid))
6747
(void)glusterd_resolve_brick(brickinfo);
6749
if (!gf_uuid_compare(MY_UUID, brickinfo->uuid))
6752
if ((*index) % hxl_children == 0) {
6754
_add_hxlator_to_dict(dict, volinfo,
6755
((*index) - 1) / hxl_children,
6767
_select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
6768
dict_t *dict, int *index,
6771
glusterd_brickinfo_t *brickinfo = NULL;
6772
int hxl_children = 0;
6773
uuid_t candidate = {0};
6774
int brick_index = 0;
6775
glusterd_peerinfo_t *peerinfo = NULL;
6777
uuid_t candidate_max = {0};
6781
if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
6782
hxl_children = volinfo->disperse_count;
6784
hxl_children = volinfo->replica_count;
6787
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6789
if (gf_uuid_compare(brickinfo->uuid, candidate_max) > 0) {
6790
if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
6791
gf_uuid_copy(candidate_max, brickinfo->uuid);
6793
peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
6794
if (peerinfo && peerinfo->connected) {
6795
gf_uuid_copy(candidate_max, brickinfo->uuid);
6801
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6803
if (gf_uuid_is_null(brickinfo->uuid))
6804
(void)glusterd_resolve_brick(brickinfo);
6806
delta %= hxl_children;
6807
if ((*index + delta) == (brick_index + hxl_children)) {
6808
if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
6809
gf_uuid_copy(candidate, brickinfo->uuid);
6811
peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
6812
if (peerinfo && peerinfo->connected) {
6813
gf_uuid_copy(candidate, brickinfo->uuid);
6814
} else if (peerinfo &&
6815
(!gf_uuid_compare(candidate_max, MY_UUID))) {
6816
_add_hxlator_to_dict(dict, volinfo,
6817
((*index) - 1) / hxl_children,
6823
if (!gf_uuid_compare(MY_UUID, candidate)) {
6824
_add_hxlator_to_dict(dict, volinfo,
6825
((*index) - 1) / hxl_children,
6829
gf_uuid_clear(candidate);
6830
brick_index += hxl_children;
6836
return *hxlator_count;
6840
glusterd_bricks_select_snap(dict_t *dict, char **op_errstr,
6841
struct cds_list_head *selected)
6844
xlator_t *this = THIS;
6845
glusterd_pending_node_t *pending_node = NULL;
6846
glusterd_volinfo_t *volinfo = NULL;
6847
char *volname = NULL;
6848
glusterd_brickinfo_t *brickinfo = NULL;
6849
int brick_index = -1;
6851
ret = dict_get_str(dict, "volname", &volname);
6853
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6858
ret = glusterd_volinfo_find(volname, &volinfo);
6862
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6865
if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
6866
!glusterd_is_brick_started(brickinfo)) {
6869
pending_node = GF_CALLOC(1, sizeof(*pending_node),
6870
gf_gld_mt_pending_node_t);
6871
if (!pending_node) {
6875
pending_node->node = brickinfo;
6876
pending_node->type = GD_NODE_BRICK;
6877
pending_node->index = brick_index;
6878
cds_list_add_tail(&pending_node->list, selected);
6879
pending_node = NULL;
6885
gf_msg_debug(this->name, 0, "Returning ret %d", ret);
6890
fill_shd_status_for_local_bricks(dict_t *dict, glusterd_volinfo_t *volinfo,
6891
cli_cmd_type type, int *index,
6894
glusterd_brickinfo_t *brickinfo = NULL;
6895
static char *msg = "self-heal-daemon is not running on";
6904
xlator_t *this = THIS;
6905
int cmd_replica_index = -1;
6907
if (type == PER_HEAL_XL) {
6908
cmd_replica_index = get_replica_index_for_per_replica_cmd(volinfo,
6910
if (cmd_replica_index == -1) {
6911
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REPLICA_INDEX_GET_FAIL,
6912
"Could not find the "
6913
"replica index for per replica type command");
6919
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6921
if (gf_uuid_is_null(brickinfo->uuid))
6922
(void)glusterd_resolve_brick(brickinfo);
6924
if (gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
6929
if (type == PER_HEAL_XL) {
6930
if (cmd_replica_index != ((*index) / volinfo->replica_count)) {
6935
keylen = snprintf(key, sizeof(key), "%d-status", (*index));
6936
snprintf(value, sizeof(value), "%s %s", msg, uuid_utoa(MY_UUID));
6937
ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(value));
6939
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
6941
"set the dictionary for shd status msg");
6944
keylen = snprintf(key, sizeof(key), "%d-shd-status", (*index));
6945
ret = dict_set_nstrn(dict, key, keylen, "off", SLEN("off"));
6947
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
6949
" set dictionary for shd status msg");
6960
glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
6961
glusterd_volinfo_t *volinfo, int *index,
6962
int *hxlator_count, dict_t *rsp_dict)
6965
xlator_t *this = THIS;
6966
glusterd_svc_t *svc = NULL;
6968
svc = &(volinfo->shd.svc);
6971
case GF_SHD_OP_INDEX_SUMMARY:
6972
case GF_SHD_OP_STATISTICS_HEAL_COUNT:
6975
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
6981
ret = fill_shd_status_for_local_bricks(
6982
rsp_dict, volinfo, ALL_HEAL_XL, index, dict);
6984
gf_msg(this->name, GF_LOG_ERROR, 0,
6985
GD_MSG_SHD_STATUS_SET_FAIL,
6987
"fill the shd status for the local "
6993
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
6996
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
7001
ret = fill_shd_status_for_local_bricks(
7002
rsp_dict, volinfo, PER_HEAL_XL, index, dict);
7004
gf_msg(this->name, GF_LOG_ERROR, 0,
7005
GD_MSG_SHD_STATUS_SET_FAIL,
7007
"fill the shd status for the local"
7018
case GF_SHD_OP_HEAL_FULL:
7019
_select_hxlators_for_full_self_heal(this, volinfo, dict, index,
7022
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
7023
(*hxlator_count) += _select_hxlator_with_matching_brick(
7024
this, volinfo, dict, index);
7027
_select_hxlators_with_local_bricks(this, volinfo, dict, index,
7031
ret = (*hxlator_count);
7037
glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
7038
struct cds_list_head *selected,
7042
char *volname = NULL;
7043
glusterd_volinfo_t *volinfo = NULL;
7044
xlator_t *this = THIS;
7048
glusterd_pending_node_t *pending_node = NULL;
7049
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
7050
int hxlator_count = 0;
7053
ret = dict_get_str(dict, "volname", &volname);
7055
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7056
"volume name get failed");
7060
ret = glusterd_volinfo_find(volname, &volinfo);
7062
snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
7064
*op_errstr = gf_strdup(msg);
7065
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
7069
ret = dict_get_int32(dict, "heal-op", (int32_t *)&heal_op);
7070
if (ret || (heal_op == GF_SHD_OP_INVALID)) {
7071
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7075
ret = glusterd_shd_select_brick_xlator(dict, heal_op, volinfo, &index,
7076
&hxlator_count, rsp_dict);
7083
if (hxlator_count == -1) {
7084
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_XLATOR_COUNT_GET_FAIL,
7085
"Could not determine the"
7086
"translator count");
7091
ret = dict_set_int32_sizen(dict, "count", hxlator_count);
7094
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7095
gf_gld_mt_pending_node_t);
7096
if (!pending_node) {
7100
pending_node->node = &(volinfo->shd.svc);
7101
pending_node->type = GD_NODE_SHD;
7102
cds_list_add_tail(&pending_node->list, selected);
7103
pending_node = NULL;
7107
gf_msg_debug(this->name, 0, "Returning ret %d", ret);
7112
glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
7113
struct cds_list_head *selected)
7116
char *volname = NULL;
7117
glusterd_volinfo_t *volinfo = NULL;
7121
glusterd_pending_node_t *pending_node = NULL;
7123
ret = dict_get_str(dict, "volname", &volname);
7125
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7126
"volume name get failed");
7130
ret = glusterd_volinfo_find(volname, &volinfo);
7132
snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
7134
*op_errstr = gf_strdup(msg);
7135
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
7138
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7139
gf_gld_mt_pending_node_t);
7140
if (!pending_node) {
7144
pending_node->node = volinfo;
7145
pending_node->type = GD_NODE_REBALANCE;
7146
cds_list_add_tail(&pending_node->list, selected);
7147
pending_node = NULL;
7155
glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
7156
struct cds_list_head *selected)
7160
int brick_index = -1;
7161
char *volname = NULL;
7162
char *brickname = NULL;
7163
glusterd_volinfo_t *volinfo = NULL;
7164
glusterd_brickinfo_t *brickinfo = NULL;
7165
glusterd_pending_node_t *pending_node = NULL;
7166
xlator_t *this = THIS;
7167
glusterd_conf_t *priv = NULL;
7168
glusterd_svc_t *svc = NULL;
7172
priv = this->private;
7175
ret = dict_get_int32(dict, "cmd", &cmd);
7177
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7178
"Unable to get status type");
7182
if (cmd & GF_CLI_STATUS_ALL)
7185
switch (cmd & GF_CLI_STATUS_MASK) {
7186
case GF_CLI_STATUS_MEM:
7187
case GF_CLI_STATUS_CLIENTS:
7188
case GF_CLI_STATUS_INODE:
7189
case GF_CLI_STATUS_FD:
7190
case GF_CLI_STATUS_CALLPOOL:
7191
case GF_CLI_STATUS_NFS:
7192
case GF_CLI_STATUS_SHD:
7193
case GF_CLI_STATUS_QUOTAD:
7194
case GF_CLI_STATUS_SNAPD:
7195
case GF_CLI_STATUS_BITD:
7196
case GF_CLI_STATUS_SCRUB:
7197
case GF_CLI_STATUS_CLIENT_LIST:
7202
ret = dict_get_str(dict, "volname", &volname);
7204
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7205
"Unable to get volname");
7208
ret = glusterd_volinfo_find(volname, &volinfo);
7213
if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
7214
ret = dict_get_str(dict, "brick", &brickname);
7216
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7217
"Unable to get brick");
7220
ret = glusterd_volume_brickinfo_get_by_brick(brickname, volinfo,
7221
&brickinfo, _gf_false);
7225
if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
7226
!glusterd_is_brick_started(brickinfo))
7229
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7230
gf_gld_mt_pending_node_t);
7231
if (!pending_node) {
7235
pending_node->node = brickinfo;
7236
pending_node->type = GD_NODE_BRICK;
7237
pending_node->index = 0;
7238
cds_list_add_tail(&pending_node->list, selected);
7242
} else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
7243
if (!priv->nfs_svc.online) {
7245
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NFS_SERVER_NOT_RUNNING,
7246
"NFS server is not running");
7249
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7250
gf_gld_mt_pending_node_t);
7251
if (!pending_node) {
7255
pending_node->node = &(priv->nfs_svc);
7256
pending_node->type = GD_NODE_NFS;
7257
pending_node->index = 0;
7258
cds_list_add_tail(&pending_node->list, selected);
7262
} else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
7263
svc = &(volinfo->shd.svc);
7266
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED,
7267
"Self-heal daemon is not running");
7270
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7271
gf_gld_mt_pending_node_t);
7272
if (!pending_node) {
7276
pending_node->node = svc;
7277
pending_node->type = GD_NODE_SHD;
7278
pending_node->index = 0;
7279
cds_list_add_tail(&pending_node->list, selected);
7282
} else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
7283
if (!priv->quotad_svc.online) {
7284
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_QUOTAD_NOT_RUNNING,
7290
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7291
gf_gld_mt_pending_node_t);
7292
if (!pending_node) {
7296
pending_node->node = &(priv->quotad_svc);
7297
pending_node->type = GD_NODE_QUOTAD;
7298
pending_node->index = 0;
7299
cds_list_add_tail(&pending_node->list, selected);
7302
} else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
7303
if (!priv->bitd_svc.online) {
7304
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITROT_NOT_RUNNING,
7310
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7311
gf_gld_mt_pending_node_t);
7312
if (!pending_node) {
7316
pending_node->node = &(priv->bitd_svc);
7317
pending_node->type = GD_NODE_BITD;
7318
pending_node->index = 0;
7319
cds_list_add_tail(&pending_node->list, selected);
7322
} else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
7323
if (!priv->scrub_svc.online) {
7324
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBBER_NOT_RUNNING,
7330
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7331
gf_gld_mt_pending_node_t);
7332
if (!pending_node) {
7336
pending_node->node = &(priv->scrub_svc);
7337
pending_node->type = GD_NODE_SCRUB;
7338
pending_node->index = 0;
7339
cds_list_add_tail(&pending_node->list, selected);
7342
} else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
7343
if (!volinfo->snapd.svc.online) {
7344
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_NOT_RUNNING,
7350
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7351
gf_gld_mt_pending_node_t);
7352
if (!pending_node) {
7353
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
7354
"failed to allocate "
7355
"memory for pending node");
7360
pending_node->node = (void *)(&volinfo->snapd);
7361
pending_node->type = GD_NODE_SNAPD;
7362
pending_node->index = 0;
7363
cds_list_add_tail(&pending_node->list, selected);
7367
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
7370
if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
7371
!glusterd_is_brick_started(brickinfo)) {
7374
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7375
gf_gld_mt_pending_node_t);
7376
if (!pending_node) {
7378
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
7379
"Unable to allocate memory");
7382
pending_node->node = brickinfo;
7383
pending_node->type = GD_NODE_BRICK;
7384
pending_node->index = brick_index;
7385
cds_list_add_tail(&pending_node->list, selected);
7386
pending_node = NULL;
7394
glusterd_bricks_select_scrub(dict_t *dict, char **op_errstr,
7395
struct cds_list_head *selected)
7398
char *volname = NULL;
7402
xlator_t *this = THIS;
7403
glusterd_conf_t *priv = NULL;
7404
glusterd_volinfo_t *volinfo = NULL;
7405
glusterd_pending_node_t *pending_node = NULL;
7407
priv = this->private;
7412
ret = dict_get_str(dict, "volname", &volname);
7414
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7420
ret = glusterd_volinfo_find(volname, &volinfo);
7422
snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
7424
*op_errstr = gf_strdup(msg);
7425
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
7430
if (!priv->scrub_svc.online) {
7432
snprintf(msg, sizeof(msg), "Scrubber daemon is not running");
7434
gf_msg_debug(this->name, 0, "%s", msg);
7438
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7439
gf_gld_mt_pending_node_t);
7440
if (!pending_node) {
7445
pending_node->node = &(priv->scrub_svc);
7446
pending_node->type = GD_NODE_SCRUB;
7447
cds_list_add_tail(&pending_node->list, selected);
7448
pending_node = NULL;
7450
gf_msg_debug(this->name, 0, "Returning %d", ret);
7458
glusterd_bricks_select_barrier(dict_t *dict, struct cds_list_head *selected)
7461
char *volname = NULL;
7462
glusterd_volinfo_t *volinfo = NULL;
7463
glusterd_brickinfo_t *brickinfo = NULL;
7464
glusterd_pending_node_t *pending_node = NULL;
7468
ret = dict_get_str(dict, "volname", &volname);
7470
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7471
"Failed to get volname");
7475
ret = glusterd_volinfo_find(volname, &volinfo);
7477
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
7478
"Failed to find volume %s", volname);
7482
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
7484
if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
7485
!glusterd_is_brick_started(brickinfo)) {
7488
pending_node = GF_CALLOC(1, sizeof(*pending_node),
7489
gf_gld_mt_pending_node_t);
7490
if (!pending_node) {
7494
pending_node->node = brickinfo;
7495
pending_node->type = GD_NODE_BRICK;
7496
cds_list_add_tail(&pending_node->list, selected);
7497
pending_node = NULL;
7501
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
7506
glusterd_clear_pending_nodes(struct cds_list_head *list)
7508
glusterd_pending_node_t *pending_node = NULL;
7509
glusterd_pending_node_t *tmp = NULL;
7511
cds_list_for_each_entry_safe(pending_node, tmp, list, list)
7513
cds_list_del_init(&pending_node->list);
7514
GF_FREE(pending_node);
7521
glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
7524
rpc_clnt_procedure_t *proc = NULL;
7525
glusterd_conf_t *priv = NULL;
7526
xlator_t *this = THIS;
7527
glusterd_op_t op = GD_OP_NONE;
7528
glusterd_req_ctx_t *req_ctx = NULL;
7529
char *op_errstr = NULL;
7530
gf_boolean_t free_req_ctx = _gf_false;
7532
priv = this->private;
7537
req_ctx = GF_CALLOC(1, sizeof(*req_ctx), gf_gld_mt_op_allack_ctx_t);
7540
free_req_ctx = _gf_true;
7541
op = glusterd_op_get_op();
7543
gf_uuid_copy(req_ctx->uuid, MY_UUID);
7544
ret = glusterd_op_build_payload(&req_ctx->dict, &op_errstr, NULL);
7546
gf_msg(this->name, GF_LOG_ERROR, 0,
7547
GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL, LOGSTR_BUILD_PAYLOAD,
7549
if (op_errstr == NULL)
7550
gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
7551
opinfo.op_errstr = op_errstr;
7556
proc = &priv->gfs_mgmt->proctable[GLUSTERD_BRICK_OP];
7558
ret = proc->fn(NULL, this, req_ctx);
7563
if (!opinfo.pending_count && !opinfo.brick_pending_count) {
7564
glusterd_clear_pending_nodes(&opinfo.pending_bricks);
7565
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
7570
if (ret && free_req_ctx)
7572
gf_msg_debug(this->name, 0, "Returning with %d", ret);
7578
glusterd_op_ac_rcvd_brick_op_acc(glusterd_op_sm_event_t *event, void *ctx)
7581
glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
7582
char *op_errstr = NULL;
7583
glusterd_op_t op = GD_OP_NONE;
7584
gd_node_type type = GD_NODE_NONE;
7585
dict_t *op_ctx = NULL;
7586
glusterd_req_ctx_t *req_ctx = NULL;
7587
void *pending_entry = NULL;
7588
xlator_t *this = THIS;
7590
GF_VALIDATE_OR_GOTO(this->name, event, out);
7591
GF_VALIDATE_OR_GOTO(this->name, ctx, out);
7593
GF_VALIDATE_OR_GOTO(this->name, ev_ctx, out);
7595
req_ctx = ev_ctx->commit_ctx;
7596
GF_VALIDATE_OR_GOTO(this->name, req_ctx, out);
7599
op_ctx = glusterd_op_get_ctx();
7600
pending_entry = ev_ctx->pending_node->node;
7601
type = ev_ctx->pending_node->type;
7603
ret = glusterd_remove_pending_entry(&opinfo.pending_bricks, pending_entry);
7605
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_RESPONSE,
7606
"unknown response received ");
7611
if (opinfo.brick_pending_count > 0)
7612
opinfo.brick_pending_count--;
7614
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
7616
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
7618
"transaction's opinfo");
7620
glusterd_handle_node_rsp(req_ctx->dict, pending_entry, op, ev_ctx->rsp_dict,
7621
op_ctx, &op_errstr, type);
7623
if (opinfo.brick_pending_count > 0)
7626
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
7627
ev_ctx->commit_ctx);
7630
if (ev_ctx && ev_ctx->rsp_dict)
7631
dict_unref(ev_ctx->rsp_dict);
7633
gf_msg_debug(this->name, 0, "Returning %d", ret);
7638
glusterd_op_bricks_select(glusterd_op_t op, dict_t *dict, char **op_errstr,
7639
struct cds_list_head *selected, dict_t *rsp_dict)
7644
GF_ASSERT(op_errstr);
7645
GF_ASSERT(op > GD_OP_NONE);
7646
GF_ASSERT(op < GD_OP_MAX);
7649
case GD_OP_STOP_VOLUME:
7650
ret = glusterd_bricks_select_stop_volume(dict, op_errstr, selected);
7652
case GD_OP_REMOVE_BRICK:
7653
ret = glusterd_bricks_select_remove_brick(dict, op_errstr,
7657
case GD_OP_PROFILE_VOLUME:
7658
ret = glusterd_bricks_select_profile_volume(dict, op_errstr,
7662
case GD_OP_HEAL_VOLUME:
7663
ret = glusterd_bricks_select_heal_volume(dict, op_errstr, selected,
7667
case GD_OP_STATUS_VOLUME:
7668
ret = glusterd_bricks_select_status_volume(dict, op_errstr,
7671
case GD_OP_DEFRAG_BRICK_VOLUME:
7672
ret = glusterd_bricks_select_rebalance_volume(dict, op_errstr,
7677
ret = glusterd_bricks_select_barrier(dict, selected);
7680
ret = glusterd_bricks_select_snap(dict, op_errstr, selected);
7682
case GD_OP_SCRUB_STATUS:
7683
case GD_OP_SCRUB_ONDEMAND:
7684
ret = glusterd_bricks_select_scrub(dict, op_errstr, selected);
7690
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
7695
glusterd_op_sm_t glusterd_op_state_default[] = {
7696
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7697
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_send_lock},
7698
{GD_OP_STATE_LOCKED, glusterd_op_ac_lock},
7699
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7700
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7701
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7702
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7703
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7704
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7705
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7706
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7707
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7708
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7709
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7710
{GD_OP_STATE_DEFAULT, glusterd_op_ac_none},
7713
glusterd_op_sm_t glusterd_op_state_lock_sent[] = {
7714
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7715
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7716
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_lock},
7717
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_rcvd_lock_acc},
7718
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op},
7719
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7720
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7721
{GD_OP_STATE_ACK_DRAIN,
7722
glusterd_op_ac_send_unlock_drain},
7723
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7724
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7725
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7726
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7727
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7728
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7729
{GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},
7732
glusterd_op_sm_t glusterd_op_state_locked[] = {
7733
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7734
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7735
{GD_OP_STATE_LOCKED, glusterd_op_ac_lock},
7736
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7737
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7738
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7739
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7740
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7741
{GD_OP_STATE_STAGED, glusterd_op_ac_stage_op},
7742
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7743
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7744
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7745
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7746
{GD_OP_STATE_DEFAULT,
7747
glusterd_op_ac_local_unlock},
7748
{GD_OP_STATE_LOCKED, glusterd_op_ac_none},
7751
glusterd_op_sm_t glusterd_op_state_stage_op_sent[] = {
7752
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},
7753
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},
7754
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_lock},
7755
{GD_OP_STATE_STAGE_OP_SENT,
7756
glusterd_op_ac_rcvd_stage_op_acc},
7757
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_send_brick_op},
7758
{GD_OP_STATE_BRICK_OP_SENT,
7759
glusterd_op_ac_send_brick_op},
7760
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},
7761
{GD_OP_STATE_STAGE_OP_FAILED,
7762
glusterd_op_ac_stage_op_failed},
7763
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},
7764
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},
7765
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7766
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7767
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},
7768
{GD_OP_STATE_STAGE_OP_SENT,
7769
glusterd_op_ac_none},
7770
{GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},
7773
glusterd_op_sm_t glusterd_op_state_stage_op_failed[] = {
7774
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7775
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7776
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_lock},
7777
{GD_OP_STATE_STAGE_OP_FAILED,
7778
glusterd_op_ac_stage_op_failed},
7779
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7780
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7781
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7782
{GD_OP_STATE_STAGE_OP_FAILED,
7783
glusterd_op_ac_stage_op_failed},
7784
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7785
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7786
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7787
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7788
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},
7789
{GD_OP_STATE_STAGE_OP_FAILED,
7790
glusterd_op_ac_none},
7791
{GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},
7794
glusterd_op_sm_t glusterd_op_state_staged[] = {
7795
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7796
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7797
{GD_OP_STATE_STAGED, glusterd_op_ac_lock},
7798
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7799
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7800
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7801
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7802
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7803
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7804
{GD_OP_STATE_BRICK_COMMITTED,
7805
glusterd_op_ac_send_brick_op},
7806
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7807
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7808
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7809
{GD_OP_STATE_DEFAULT,
7810
glusterd_op_ac_local_unlock},
7811
{GD_OP_STATE_STAGED, glusterd_op_ac_none},
7814
glusterd_op_sm_t glusterd_op_state_brick_op_sent[] = {
7815
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7816
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7817
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_lock},
7818
{GD_OP_STATE_BRICK_OP_SENT,
7819
glusterd_op_ac_rcvd_brick_op_acc},
7820
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7821
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7822
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7823
{GD_OP_STATE_BRICK_OP_FAILED,
7824
glusterd_op_ac_brick_op_failed},
7825
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7826
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7827
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7828
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7829
{GD_OP_STATE_COMMIT_OP_SENT,
7830
glusterd_op_ac_send_commit_op},
7831
{GD_OP_STATE_BRICK_OP_SENT,
7832
glusterd_op_ac_none},
7833
{GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},
7836
glusterd_op_sm_t glusterd_op_state_brick_op_failed[] = {
7837
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7838
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7839
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_lock},
7840
{GD_OP_STATE_BRICK_OP_FAILED,
7841
glusterd_op_ac_brick_op_failed},
7842
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7843
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7844
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7845
{GD_OP_STATE_BRICK_OP_FAILED,
7846
glusterd_op_ac_brick_op_failed},
7847
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7848
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7849
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7850
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7851
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},
7852
{GD_OP_STATE_BRICK_OP_FAILED,
7853
glusterd_op_ac_none},
7854
{GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},
7857
glusterd_op_sm_t glusterd_op_state_brick_committed[] = {
7858
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7859
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7860
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_lock},
7861
{GD_OP_STATE_BRICK_COMMITTED,
7862
glusterd_op_ac_rcvd_brick_op_acc},
7863
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7864
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7865
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7866
{GD_OP_STATE_BRICK_COMMIT_FAILED,
7867
glusterd_op_ac_brick_op_failed},
7868
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7869
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7870
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7871
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7872
{GD_OP_STATE_COMMITED, glusterd_op_ac_commit_op},
7873
{GD_OP_STATE_DEFAULT,
7874
glusterd_op_ac_local_unlock},
7875
{GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},
7878
glusterd_op_sm_t glusterd_op_state_brick_commit_failed[] = {
7879
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7880
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7881
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_lock},
7882
{GD_OP_STATE_BRICK_COMMIT_FAILED,
7883
glusterd_op_ac_brick_op_failed},
7884
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7885
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7886
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7887
{GD_OP_STATE_BRICK_COMMIT_FAILED,
7888
glusterd_op_ac_brick_op_failed},
7889
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7890
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7891
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7892
{GD_OP_STATE_BRICK_COMMIT_FAILED,
7893
glusterd_op_ac_none},
7894
{GD_OP_STATE_BRICK_COMMIT_FAILED,
7895
glusterd_op_ac_send_commit_failed},
7896
{GD_OP_STATE_DEFAULT,
7897
glusterd_op_ac_local_unlock},
7898
{GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},
7901
glusterd_op_sm_t glusterd_op_state_commit_op_failed[] = {
7902
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7903
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7904
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_lock},
7905
{GD_OP_STATE_COMMIT_OP_FAILED,
7906
glusterd_op_ac_commit_op_failed},
7907
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7908
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7909
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7910
{GD_OP_STATE_COMMIT_OP_FAILED,
7911
glusterd_op_ac_commit_op_failed},
7912
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7913
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7914
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7915
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7916
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},
7917
{GD_OP_STATE_COMMIT_OP_FAILED,
7918
glusterd_op_ac_none},
7919
{GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},
7922
glusterd_op_sm_t glusterd_op_state_commit_op_sent[] = {
7923
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},
7924
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},
7925
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_lock},
7926
{GD_OP_STATE_COMMIT_OP_SENT,
7927
glusterd_op_ac_rcvd_commit_op_acc},
7928
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},
7929
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},
7930
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},
7931
{GD_OP_STATE_COMMIT_OP_FAILED,
7932
glusterd_op_ac_commit_op_failed},
7933
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},
7934
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},
7935
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7936
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7937
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},
7938
{GD_OP_STATE_COMMIT_OP_SENT,
7939
glusterd_op_ac_none},
7940
{GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},
7943
glusterd_op_sm_t glusterd_op_state_committed[] = {
7944
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7945
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7946
{GD_OP_STATE_COMMITED, glusterd_op_ac_lock},
7947
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7948
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7949
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7950
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7951
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7952
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7953
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7954
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7955
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7956
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7957
{GD_OP_STATE_DEFAULT,
7958
glusterd_op_ac_local_unlock},
7959
{GD_OP_STATE_COMMITED, glusterd_op_ac_none},
7962
glusterd_op_sm_t glusterd_op_state_unlock_sent[] = {
7963
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7964
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7965
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_lock},
7966
{GD_OP_STATE_UNLOCK_SENT,
7967
glusterd_op_ac_rcvd_unlock_acc},
7968
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlocked_all},
7969
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7970
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7971
{GD_OP_STATE_UNLOCK_SENT,
7972
glusterd_op_ac_rcvd_unlock_acc},
7973
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7974
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7975
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7976
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7977
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7978
{GD_OP_STATE_UNLOCK_SENT,
7979
glusterd_op_ac_none},
7980
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},
7983
glusterd_op_sm_t glusterd_op_state_ack_drain[] = {
7984
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7985
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7986
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_lock},
7987
{GD_OP_STATE_ACK_DRAIN,
7988
glusterd_op_ac_send_unlock_drain},
7989
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7990
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7991
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7992
{GD_OP_STATE_ACK_DRAIN,
7993
glusterd_op_ac_send_unlock_drain},
7994
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7995
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7996
{GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},
7997
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
7998
{GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},
7999
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
8000
{GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},
8003
glusterd_op_sm_t *glusterd_op_state_table[] = {
8004
glusterd_op_state_default, glusterd_op_state_lock_sent,
8005
glusterd_op_state_locked, glusterd_op_state_stage_op_sent,
8006
glusterd_op_state_staged, glusterd_op_state_commit_op_sent,
8007
glusterd_op_state_committed, glusterd_op_state_unlock_sent,
8008
glusterd_op_state_stage_op_failed, glusterd_op_state_commit_op_failed,
8009
glusterd_op_state_brick_op_sent, glusterd_op_state_brick_op_failed,
8010
glusterd_op_state_brick_committed, glusterd_op_state_brick_commit_failed,
8011
glusterd_op_state_ack_drain};
8014
glusterd_op_sm_new_event(glusterd_op_sm_event_type_t event_type,
8015
glusterd_op_sm_event_t **new_event)
8017
glusterd_op_sm_event_t *event = NULL;
8019
GF_ASSERT(new_event);
8020
GF_ASSERT(GD_OP_EVENT_NONE <= event_type && GD_OP_EVENT_MAX > event_type);
8022
event = GF_CALLOC(1, sizeof(*event), gf_gld_mt_op_sm_event_t);
8028
event->event = event_type;
8029
CDS_INIT_LIST_HEAD(&event->list);
8035
glusterd_op_sm_inject_event(glusterd_op_sm_event_type_t event_type,
8036
uuid_t *txn_id, void *ctx)
8039
glusterd_op_sm_event_t *event = NULL;
8041
GF_ASSERT(event_type < GD_OP_EVENT_MAX && event_type >= GD_OP_EVENT_NONE);
8043
ret = glusterd_op_sm_new_event(event_type, &event);
8051
gf_uuid_copy(event->txn_id, *txn_id);
8053
gf_msg_debug(THIS->name, 0, "Enqueue event: '%s'",
8054
glusterd_op_sm_event_name_get(event->event));
8055
cds_list_add_tail(&event->list, &gd_op_sm_queue);
8062
glusterd_destroy_req_ctx(glusterd_req_ctx_t *ctx)
8067
dict_unref(ctx->dict);
8072
glusterd_destroy_local_unlock_ctx(uuid_t *ctx)
8080
glusterd_destroy_op_event_ctx(glusterd_op_sm_event_t *event)
8085
switch (event->event) {
8086
case GD_OP_EVENT_LOCK:
8087
case GD_OP_EVENT_UNLOCK:
8088
glusterd_destroy_lock_ctx(event->ctx);
8090
case GD_OP_EVENT_STAGE_OP:
8091
case GD_OP_EVENT_ALL_ACK:
8092
glusterd_destroy_req_ctx(event->ctx);
8094
case GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP:
8095
glusterd_destroy_local_unlock_ctx(event->ctx);
8105
glusterd_op_sm_event_t *event = NULL;
8106
glusterd_op_sm_event_t *tmp = NULL;
8109
glusterd_op_sm_ac_fn handler = NULL;
8110
glusterd_op_sm_t *state = NULL;
8111
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
8112
xlator_t *this = THIS;
8113
glusterd_op_info_t txn_op_info;
8114
glusterd_conf_t *priv = NULL;
8116
priv = this->private;
8119
ret = synclock_trylock(&gd_op_sm_lock);
8122
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_LOCK_FAIL,
8123
"lock failed due to %s", strerror(lock_err));
8127
while (!cds_list_empty(&gd_op_sm_queue)) {
8128
cds_list_for_each_entry_safe(event, tmp, &gd_op_sm_queue, list)
8130
cds_list_del_init(&event->list);
8131
event_type = event->event;
8132
gf_msg_debug(this->name, 0,
8133
"Dequeued event of "
8135
glusterd_op_sm_event_name_get(event_type));
8137
gf_msg_debug(this->name, 0, "transaction ID = %s",
8138
uuid_utoa(event->txn_id));
8140
ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
8142
gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
8143
GD_MSG_TRANS_OPINFO_GET_FAIL,
8144
"Unable to get transaction "
8145
"opinfo for transaction ID :"
8147
uuid_utoa(event->txn_id));
8148
glusterd_destroy_op_event_ctx(event);
8152
opinfo = txn_op_info;
8154
state = glusterd_op_state_table[opinfo.state];
8158
handler = state[event_type].handler;
8161
ret = handler(event, event->ctx);
8164
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDLER_RETURNED,
8165
"handler returned: %d", ret);
8166
glusterd_destroy_op_event_ctx(event);
8171
ret = glusterd_op_sm_transition_state(&opinfo, state, event_type);
8174
gf_msg(this->name, GF_LOG_ERROR, 0,
8175
GD_MSG_EVENT_STATE_TRANSITION_FAIL,
8176
"Unable to transition"
8177
"state from '%s' to '%s'",
8178
glusterd_op_sm_state_name_get(opinfo.state),
8179
glusterd_op_sm_state_name_get(
8180
state[event_type].next_state));
8181
(void)synclock_unlock(&gd_op_sm_lock);
8185
if ((state[event_type].next_state == GD_OP_STATE_DEFAULT) &&
8186
(event_type == GD_OP_EVENT_UNLOCK)) {
8188
ret = glusterd_clear_txn_opinfo(&event->txn_id);
8190
gf_msg(this->name, GF_LOG_ERROR, 0,
8191
GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
8193
"transaction's opinfo");
8195
if ((priv->op_version < GD_OP_VERSION_6_0) ||
8196
!(event_type == GD_OP_EVENT_STAGE_OP &&
8197
opinfo.state == GD_OP_STATE_STAGED &&
8198
opinfo.skip_locking)) {
8199
ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
8201
gf_msg(this->name, GF_LOG_ERROR, 0,
8202
GD_MSG_TRANS_OPINFO_SET_FAIL,
8204
"transaction's opinfo");
8208
glusterd_destroy_op_event_ctx(event);
8213
(void)synclock_unlock(&gd_op_sm_lock);
8222
glusterd_op_set_op(glusterd_op_t op)
8224
GF_ASSERT(op < GD_OP_MAX);
8225
GF_ASSERT(op > GD_OP_NONE);
8233
glusterd_op_get_op(void)
8239
glusterd_op_set_req(rpcsvc_request_t *req)
8247
glusterd_op_clear_op(void)
8249
opinfo.op = GD_OP_NONE;
8255
glusterd_op_free_ctx(glusterd_op_t op, void *ctx)
8259
case GD_OP_CREATE_VOLUME:
8260
case GD_OP_DELETE_VOLUME:
8261
case GD_OP_STOP_VOLUME:
8262
case GD_OP_ADD_BRICK:
8263
case GD_OP_REMOVE_BRICK:
8264
case GD_OP_REPLACE_BRICK:
8265
case GD_OP_LOG_ROTATE:
8266
case GD_OP_SYNC_VOLUME:
8267
case GD_OP_SET_VOLUME:
8268
case GD_OP_START_VOLUME:
8269
case GD_OP_RESET_VOLUME:
8270
case GD_OP_GSYNC_SET:
8272
case GD_OP_PROFILE_VOLUME:
8273
case GD_OP_STATUS_VOLUME:
8274
case GD_OP_REBALANCE:
8275
case GD_OP_HEAL_VOLUME:
8276
case GD_OP_STATEDUMP_VOLUME:
8277
case GD_OP_CLEARLOCKS_VOLUME:
8278
case GD_OP_DEFRAG_BRICK_VOLUME:
8279
case GD_OP_MAX_OPVERSION:
8288
glusterd_op_reset_ctx();
8293
glusterd_op_get_ctx(void)
8295
return opinfo.op_ctx;
8299
glusterd_op_sm_init(void)
8301
CDS_INIT_LIST_HEAD(&gd_op_sm_queue);
8302
synclock_init(&gd_op_sm_lock, SYNC_LOCK_DEFAULT);