10
#include <glusterfs/common-utils.h>
11
#include <glusterfs/syscall.h>
13
#include "glusterd-op-sm.h"
14
#include "glusterd-geo-rep.h"
15
#include "glusterd-store.h"
16
#include "glusterd-utils.h"
17
#include "glusterd-volgen.h"
18
#include "glusterd-messages.h"
19
#include <glusterfs/run.h>
20
#include "glusterd-snapshot-utils.h"
21
#include "glusterd-svc-mgmt.h"
22
#include "glusterd-svc-helper.h"
23
#include "glusterd-shd-svc.h"
24
#include "glusterd-snapd-svc.h"
25
#include "glusterd-mgmt.h"
26
#include "glusterd-server-quorum.h"
29
#include <sys/socket.h>
32
#include <netinet/in.h>
35
#define glusterd_op_start_volume_args_get(dict, volname, flags) \
36
glusterd_op_stop_volume_args_get(dict, volname, flags)
39
__glusterd_handle_create_volume(rpcsvc_request_t *req)
42
gf_cli_req cli_req = {{
49
int thin_arbiter_count = 0;
51
char err_str[2048] = {
57
xlator_t *this = THIS;
58
glusterd_conf_t *conf = NULL;
59
char *free_ptr = NULL;
60
char *trans_type = NULL;
61
char *address_family_str = NULL;
65
uuid_t tmp_uuid = {0};
67
char *username = NULL;
68
char *password = NULL;
70
char *addr_family = "inet6";
72
char *addr_family = "inet";
74
glusterd_volinfo_t *volinfo = NULL;
79
GF_VALIDATE_OR_GOTO(this->name, conf, out);
82
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
84
req->rpc_err = GARBAGE_ARGS;
85
snprintf(err_str, sizeof(err_str),
86
"Failed to decode request "
88
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
93
gf_msg_debug(this->name, 0, "Received create volume req");
95
if (cli_req.dict.dict_len) {
99
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
102
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
104
"unserialize req-buffer to dictionary");
105
snprintf(err_str, sizeof(err_str),
110
dict->extra_stdfree = cli_req.dict.dict_val;
114
ret = dict_get_str(dict, "volname", &volname);
116
snprintf(err_str, sizeof(err_str),
117
"Unable to get volume "
119
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
124
ret = glusterd_volinfo_find(volname, &volinfo);
127
snprintf(err_str, sizeof(err_str), "Volume %s already exists", volname);
128
gf_msg(this->name, GF_LOG_ERROR, EEXIST, GD_MSG_VOL_ALREADY_EXIST, "%s",
133
ret = dict_get_int32(dict, "count", &brick_count);
135
snprintf(err_str, sizeof(err_str),
136
"Unable to get brick count"
139
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
144
ret = dict_get_int32(dict, "type", &type);
146
snprintf(err_str, sizeof(err_str),
147
"Unable to get type of "
150
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
155
ret = dict_get_str(dict, "transport", &trans_type);
157
snprintf(err_str, sizeof(err_str),
159
"transport-type of volume %s",
161
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
166
ret = dict_get_str(this->options, "transport.address-family",
167
&address_family_str);
170
ret = dict_set_dynstr_with_alloc(dict, "transport.address-family",
173
gf_log(this->name, GF_LOG_ERROR,
174
"failed to set transport.address-family");
177
} else if (!strcmp(trans_type, "tcp")) {
181
if (conf->op_version >= GD_OP_VERSION_3_8_0) {
182
ret = dict_set_dynstr_with_alloc(dict, "transport.address-family",
185
gf_log(this->name, GF_LOG_ERROR,
187
"transport.address-family "
194
ret = dict_get_str(dict, "bricks", &bricks);
196
snprintf(err_str, sizeof(err_str),
197
"Unable to get bricks for "
200
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
205
ret = dict_get_int32(dict, "thin-arbiter-count", &thin_arbiter_count);
206
if (thin_arbiter_count && conf->op_version < GD_OP_VERSION_7_0) {
207
snprintf(err_str, sizeof(err_str),
208
"Cannot execute command. "
209
"The cluster is operating at version %d. "
210
"Thin-arbiter volume creation is unavailable in "
213
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_OP_FAILED, "%s",
219
if (!dict_get_sizen(dict, "force")) {
220
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
221
"Failed to get 'force' flag");
225
gf_uuid_generate(volume_id);
226
free_ptr = gf_strdup(uuid_utoa(volume_id));
227
ret = dict_set_dynstr_sizen(dict, "volume-id", free_ptr);
229
snprintf(err_str, sizeof(err_str),
230
"Unable to set volume "
233
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "%s",
241
gf_uuid_generate(tmp_uuid);
242
username = gf_strdup(uuid_utoa(tmp_uuid));
243
ret = dict_set_dynstr_sizen(dict, "internal-username", username);
245
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
246
"Failed to set username for "
252
gf_uuid_generate(tmp_uuid);
253
password = gf_strdup(uuid_utoa(tmp_uuid));
254
ret = dict_set_dynstr_sizen(dict, "internal-password", password);
256
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
257
"Failed to set password for "
263
ret = glusterd_op_begin_synctask(req, GD_OP_CREATE_VOLUME, dict);
269
if (err_str[0] == '\0')
270
snprintf(err_str, sizeof(err_str), "Operation failed");
271
rsp.op_errstr = err_str;
273
glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
284
glusterd_handle_create_volume(rpcsvc_request_t *req)
286
return glusterd_big_locked_handler(req, __glusterd_handle_create_volume);
290
__glusterd_handle_cli_start_volume(rpcsvc_request_t *req)
293
gf_cli_req cli_req = {{
296
char *volname = NULL;
298
glusterd_op_t cli_op = GD_OP_START_VOLUME;
299
char errstr[2048] = {
302
xlator_t *this = THIS;
306
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
308
snprintf(errstr, sizeof(errstr),
309
"Failed to decode message "
310
"received from cli");
311
req->rpc_err = GARBAGE_ARGS;
312
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
317
if (cli_req.dict.dict_len) {
321
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
324
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
326
"unserialize req-buffer to dictionary");
327
snprintf(errstr, sizeof(errstr),
334
ret = dict_get_str(dict, "volname", &volname);
336
snprintf(errstr, sizeof(errstr), "Unable to get volume name");
337
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
342
gf_msg_debug(this->name, 0,
343
"Received start vol req"
347
ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_START_VOLUME, dict);
349
free(cli_req.dict.dict_val);
352
if (errstr[0] == '\0')
353
snprintf(errstr, sizeof(errstr), "Operation failed");
354
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, errstr);
361
glusterd_handle_cli_start_volume(rpcsvc_request_t *req)
363
return glusterd_big_locked_handler(req, __glusterd_handle_cli_start_volume);
367
__glusterd_handle_cli_stop_volume(rpcsvc_request_t *req)
370
gf_cli_req cli_req = {{
373
char *dup_volname = NULL;
375
glusterd_op_t cli_op = GD_OP_STOP_VOLUME;
376
xlator_t *this = THIS;
380
glusterd_conf_t *conf = NULL;
383
conf = this->private;
386
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
388
snprintf(err_str, sizeof(err_str),
389
"Failed to decode message "
390
"received from cli");
391
req->rpc_err = GARBAGE_ARGS;
392
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
396
if (cli_req.dict.dict_len) {
400
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
403
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
405
"unserialize req-buffer to dictionary");
406
snprintf(err_str, sizeof(err_str),
413
ret = dict_get_str(dict, "volname", &dup_volname);
416
snprintf(err_str, sizeof(err_str),
417
"Failed to get volume "
419
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
424
gf_msg_debug(this->name, 0,
425
"Received stop vol req "
429
if (conf->op_version < GD_OP_VERSION_4_1_0) {
430
gf_msg_debug(this->name, 0,
431
"The cluster is operating at "
432
"version less than %d. Volume start "
433
"falling back to syncop framework.",
434
GD_OP_VERSION_4_1_0);
435
ret = glusterd_op_begin_synctask(req, GD_OP_STOP_VOLUME, dict);
437
ret = glusterd_mgmt_v3_initiate_all_phases(req, GD_OP_STOP_VOLUME,
442
free(cli_req.dict.dict_val);
445
if (err_str[0] == '\0')
446
snprintf(err_str, sizeof(err_str), "Operation failed");
447
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
454
glusterd_handle_cli_stop_volume(rpcsvc_request_t *req)
456
return glusterd_big_locked_handler(req, __glusterd_handle_cli_stop_volume);
460
__glusterd_handle_cli_delete_volume(rpcsvc_request_t *req)
463
gf_cli_req cli_req = {
468
glusterd_op_t cli_op = GD_OP_DELETE_VOLUME;
470
char *volname = NULL;
477
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
479
snprintf(err_str, sizeof(err_str),
480
"Failed to decode request "
481
"received from cli");
482
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
484
req->rpc_err = GARBAGE_ARGS;
488
if (cli_req.dict.dict_len) {
492
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
495
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
497
"unserialize req-buffer to dictionary");
498
snprintf(err_str, sizeof(err_str),
505
ret = dict_get_str(dict, "volname", &volname);
507
snprintf(err_str, sizeof(err_str),
508
"Failed to get volume "
510
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
512
req->rpc_err = GARBAGE_ARGS;
516
gf_msg_debug(THIS->name, 0,
517
"Received delete vol req"
521
ret = glusterd_op_begin_synctask(req, GD_OP_DELETE_VOLUME, dict);
524
free(cli_req.dict.dict_val);
527
if (err_str[0] == '\0')
528
snprintf(err_str, sizeof(err_str), "Operation failed");
529
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
535
glusterd_handle_cli_delete_volume(rpcsvc_request_t *req)
537
return glusterd_big_locked_handler(req,
538
__glusterd_handle_cli_delete_volume);
541
glusterd_handle_heal_options_enable_disable(rpcsvc_request_t *req, dict_t *dict,
542
glusterd_volinfo_t *volinfo)
544
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
549
ret = dict_get_int32(dict, "heal-op", (int32_t *)&heal_op);
550
if (ret || (heal_op == GF_SHD_OP_INVALID)) {
551
gf_smsg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
552
"Key=heal-op", NULL);
557
if ((heal_op != GF_SHD_OP_HEAL_ENABLE) &&
558
(heal_op != GF_SHD_OP_HEAL_DISABLE) &&
559
(heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) &&
560
(heal_op != GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
565
if (((heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE) ||
566
(heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) &&
567
(volinfo->type != GF_CLUSTER_TYPE_REPLICATE)) {
572
if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
573
(heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE)) {
575
} else if ((heal_op == GF_SHD_OP_HEAL_DISABLE) ||
576
(heal_op == GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE)) {
580
if ((heal_op == GF_SHD_OP_HEAL_ENABLE) ||
581
(heal_op == GF_SHD_OP_HEAL_DISABLE)) {
582
key = volgen_get_shd_key(volinfo->type);
588
key = "cluster.granular-entry-heal";
589
ret = dict_set_int8(dict, "is-special-key", 1);
591
gf_smsg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
592
"Key=is-special-key", NULL);
597
ret = dict_set_str_sizen(dict, "key1", key);
599
gf_smsg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
604
ret = dict_set_str_sizen(dict, "value1", value);
606
gf_smsg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
611
ret = dict_set_int32_sizen(dict, "count", 1);
613
gf_smsg(THIS->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
618
ret = glusterd_op_begin_synctask(req, GD_OP_SET_VOLUME, dict);
625
glusterd_add_bricks_hname_path_to_dict(dict_t *dict,
626
glusterd_volinfo_t *volinfo)
628
glusterd_brickinfo_t *brickinfo = NULL;
633
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
635
ret = snprintf(key, sizeof(key), "%d-hostname", index);
636
ret = dict_set_strn(dict, key, ret, brickinfo->hostname);
638
gf_smsg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
639
"Key=%s", key, NULL);
643
ret = snprintf(key, sizeof(key), "%d-path", index);
644
ret = dict_set_strn(dict, key, ret, brickinfo->path);
646
gf_smsg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
647
"Key=%s", key, NULL);
658
__glusterd_handle_cli_heal_volume(rpcsvc_request_t *req)
661
gf_cli_req cli_req = {{
665
glusterd_op_t cli_op = GD_OP_HEAL_VOLUME;
666
char *volname = NULL;
667
glusterd_volinfo_t *volinfo = NULL;
668
xlator_t *this = THIS;
669
char op_errstr[2048] = {
675
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
678
req->rpc_err = GARBAGE_ARGS;
679
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
683
if (cli_req.dict.dict_len) {
687
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
690
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
692
"unserialize req-buffer to dictionary");
693
snprintf(op_errstr, sizeof(op_errstr),
694
"Unable to decode the command");
697
dict->extra_stdfree = cli_req.dict.dict_val;
701
ret = dict_get_str(dict, "volname", &volname);
703
snprintf(op_errstr, sizeof(op_errstr),
706
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
711
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_HEAL_VOL_REQ_RCVD,
712
"Received heal vol req "
716
ret = glusterd_volinfo_find(volname, &volinfo);
718
snprintf(op_errstr, sizeof(op_errstr), "Volume %s does not exist",
723
ret = glusterd_handle_heal_options_enable_disable(req, dict, volinfo);
724
if (ret == -EINVAL) {
738
ret = glusterd_add_bricks_hname_path_to_dict(dict, volinfo);
742
ret = dict_set_int32_sizen(dict, "count", volinfo->brick_count);
744
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
749
ret = glusterd_op_begin_synctask(req, GD_OP_HEAL_VOLUME, dict);
753
if (op_errstr[0] == '\0')
754
snprintf(op_errstr, sizeof(op_errstr), "operation failed");
755
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GLUSTERD_OP_FAILED, "%s",
757
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict,
765
glusterd_handle_cli_heal_volume(rpcsvc_request_t *req)
767
return glusterd_big_locked_handler(req, __glusterd_handle_cli_heal_volume);
771
__glusterd_handle_cli_statedump_volume(rpcsvc_request_t *req)
774
gf_cli_req cli_req = {{
777
char *volname = NULL;
778
char *options = NULL;
780
int32_t option_cnt = 0;
781
glusterd_op_t cli_op = GD_OP_STATEDUMP_VOLUME;
782
char err_str[128] = {
785
glusterd_conf_t *priv = NULL;
787
priv = THIS->private;
793
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
795
req->rpc_err = GARBAGE_ARGS;
796
gf_smsg(THIS->name, GF_LOG_ERROR, errno, GD_MSG_GARBAGE_ARGS, NULL);
799
if (cli_req.dict.dict_len) {
803
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
806
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
808
"unserialize req-buffer to dictionary");
809
snprintf(err_str, sizeof(err_str),
811
"decode the command");
815
ret = dict_get_str(dict, "volname", &volname);
817
snprintf(err_str, sizeof(err_str), "Unable to get the volume name");
818
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
823
ret = dict_get_str(dict, "options", &options);
825
snprintf(err_str, sizeof(err_str), "Unable to get options");
826
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
831
ret = dict_get_int32(dict, "option_cnt", &option_cnt);
833
snprintf(err_str, sizeof(err_str),
834
"Unable to get option "
836
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
841
if (priv->op_version == GD_OP_VERSION_MIN && strstr(options, "quotad")) {
842
snprintf(err_str, sizeof(err_str),
843
"The cluster is operating "
844
"at op-version 1. Taking quotad's statedump is "
845
"disallowed in this state");
850
gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_STATEDUMP_VOL_REQ_RCVD,
851
"Received statedump request for "
852
"volume %s with options %s",
855
ret = glusterd_op_begin_synctask(req, GD_OP_STATEDUMP_VOLUME, dict);
859
if (err_str[0] == '\0')
860
snprintf(err_str, sizeof(err_str), "Operation failed");
861
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
863
free(cli_req.dict.dict_val);
869
glusterd_handle_cli_statedump_volume(rpcsvc_request_t *req)
871
return glusterd_big_locked_handler(req,
872
__glusterd_handle_cli_statedump_volume);
877
glusterd_op_stage_create_volume(dict_t *dict, char **op_errstr,
881
char *volname = NULL;
883
char *brick_list = NULL;
884
char *free_ptr = NULL;
886
glusterd_brickinfo_t *brick_info = NULL;
887
int32_t brick_count = 0;
888
int32_t local_brick_count = 0;
891
int32_t replica_count = 0;
892
int32_t disperse_count = 0;
895
xlator_t *this = THIS;
896
glusterd_conf_t *priv = NULL;
897
char msg[2048] = {0};
899
char *volume_uuid_str;
900
gf_boolean_t is_force = _gf_false;
901
glusterd_volinfo_t *volinfo = NULL;
903
priv = this->private;
907
ret = dict_get_str(dict, "volname", &volname);
909
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
910
"Unable to get volume name");
914
ret = glusterd_volinfo_find(volname, &volinfo);
916
snprintf(msg, sizeof(msg), "Volume %s already exists", volname);
921
ret = dict_get_int32(dict, "count", &brick_count);
923
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
924
"Unable to get brick count "
930
ret = dict_get_str(dict, "volume-id", &volume_uuid_str);
932
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
933
"Unable to get volume id of "
939
ret = gf_uuid_parse(volume_uuid_str, volume_uuid);
941
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUID_PARSE_FAIL,
942
"Unable to parse volume id of"
948
ret = dict_get_str(dict, "bricks", &bricks);
950
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
951
"Unable to get bricks for "
957
is_force = dict_get_str_boolean(dict, "force", _gf_false);
960
brick_list = gf_strdup(bricks);
965
free_ptr = brick_list;
972
if (is_origin_glusterd(dict)) {
973
ret = dict_get_int32(dict, "type", &type);
975
snprintf(msg, sizeof(msg),
976
"Unable to get type of "
979
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED, "%s",
985
if (type == GF_CLUSTER_TYPE_REPLICATE) {
986
ret = dict_get_int32(dict, "replica-count", &replica_count);
988
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
989
"Bricks check : Could"
990
" not retrieve replica count");
993
gf_msg_debug(this->name, 0,
994
"Replicate cluster type "
995
"found. Checking brick order.");
996
ret = glusterd_check_brick_order(dict, msg, type, &volname,
997
&bricks, &brick_count,
999
} else if (type == GF_CLUSTER_TYPE_DISPERSE) {
1000
ret = dict_get_int32(dict, "disperse-count", &disperse_count);
1002
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1003
"Bricks check : Could"
1004
" not retrieve disperse count");
1007
gf_msg_debug(this->name, 0,
1008
"Disperse cluster type"
1009
" found. Checking brick order.");
1010
ret = glusterd_check_brick_order(dict, msg, type, &volname,
1011
&bricks, &brick_count,
1015
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BAD_BRKORDER,
1016
"Not creating the volume because of "
1017
"bad brick order. %s",
1019
*op_errstr = gf_strdup(msg);
1025
while (i < brick_count) {
1027
brick = strtok_r(brick_list, " \n", &tmpptr);
1028
brick_list = tmpptr;
1030
if (!glusterd_store_is_valid_brickpath(volname, brick)) {
1031
snprintf(msg, sizeof(msg),
1032
"brick path %s is too "
1039
if (!glusterd_is_valid_volfpath(volname, brick)) {
1040
snprintf(msg, sizeof(msg),
1041
"Volume file path for "
1042
"volume %s and brick path %s is too long.",
1048
ret = glusterd_brickinfo_new_from_brick(brick, &brick_info, _gf_true,
1053
ret = glusterd_new_brick_validate(brick, brick_info, msg, sizeof(msg),
1058
ret = glusterd_resolve_brick(brick_info);
1060
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESOLVE_BRICK_FAIL,
1061
FMTSTR_RESOLVE_BRICK, brick_info->hostname,
1066
if (!gf_uuid_compare(brick_info->uuid, MY_UUID)) {
1067
ret = glusterd_validate_and_create_brickpath(
1068
brick_info, volume_uuid, volname, op_errstr, is_force,
1073
ret = glusterd_get_brick_mount_dir(
1074
brick_info->path, brick_info->hostname, brick_info->mount_dir);
1076
gf_msg(this->name, GF_LOG_ERROR, 0,
1077
GD_MSG_BRICK_MOUNTDIR_GET_FAIL,
1078
"Failed to get brick mount_dir");
1082
snprintf(key, sizeof(key), "brick%d.mount_dir", i);
1083
ret = dict_set_dynstr_with_alloc(rsp_dict, key,
1084
brick_info->mount_dir);
1086
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1087
"Failed to set %s", key);
1090
local_brick_count = i;
1092
brick_list = tmpptr;
1094
glusterd_brickinfo_delete(brick_info);
1098
ret = dict_set_int32_sizen(rsp_dict, "brick_count", local_brick_count);
1100
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1101
"Failed to set local_brick_count");
1107
glusterd_brickinfo_delete(brick_info);
1109
if (msg[0] != '\0') {
1110
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_CREATE_VOL_FAIL,
1112
*op_errstr = gf_strdup(msg);
1114
gf_msg_debug(this->name, 0, "Returning %d", ret);
1120
glusterd_op_stop_volume_args_get(dict_t *dict, char **volname, int *flags)
1125
gf_smsg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL);
1130
gf_smsg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL);
1135
gf_smsg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ARGUMENT, NULL);
1139
ret = dict_get_str(dict, "volname", volname);
1141
gf_smsg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1142
"Key=volname", NULL);
1146
ret = dict_get_int32(dict, "flags", flags);
1148
gf_smsg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1157
glusterd_op_statedump_volume_args_get(dict_t *dict, char **volname,
1158
char **options, int *option_cnt)
1162
if (!dict || !volname || !options || !option_cnt) {
1163
gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
1167
ret = dict_get_str(dict, "volname", volname);
1169
gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1170
"Key=volname", NULL);
1174
ret = dict_get_str(dict, "options", options);
1176
gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1177
"Key=options", NULL);
1181
ret = dict_get_int32(dict, "option_cnt", option_cnt);
1183
gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1184
"Key=option_cnt", NULL);
1193
glusterd_op_stage_start_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
1196
char *volname = NULL;
1199
int32_t brick_count = 0;
1200
int32_t local_brick_count = 0;
1201
glusterd_volinfo_t *volinfo = NULL;
1202
glusterd_brickinfo_t *brickinfo = NULL;
1206
xlator_t *this = THIS;
1207
uuid_t volume_id = {
1213
char xattr_volid[50] = {
1218
GF_ASSERT(rsp_dict);
1220
ret = glusterd_op_start_volume_args_get(dict, &volname, &flags);
1224
ret = glusterd_volinfo_find(volname, &volinfo);
1226
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1227
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
1228
FMTSTR_CHECK_VOL_EXISTS, volname);
1239
glusterd_volinfo_ref(volinfo);
1241
ret = glusterd_validate_quorum(this, GD_OP_START_VOLUME, dict, op_errstr);
1243
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
1244
"Server quorum not met. Rejecting operation.");
1248
ret = glusterd_validate_volume_id(dict, volinfo);
1252
if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1253
if (glusterd_is_volume_started(volinfo)) {
1254
snprintf(msg, sizeof(msg),
1255
"Volume %s already "
1263
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
1266
ret = glusterd_resolve_brick(brickinfo);
1268
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESOLVE_BRICK_FAIL,
1269
FMTSTR_RESOLVE_BRICK, brickinfo->hostname, brickinfo->path);
1273
if ((gf_uuid_compare(brickinfo->uuid, MY_UUID)) ||
1274
(brickinfo->snap_status == -1))
1277
ret = gf_lstat_dir(brickinfo->path, NULL);
1278
if (ret && (flags & GF_CLI_FLAG_OP_FORCE)) {
1281
len = snprintf(msg, sizeof(msg),
1283
"brick directory %s for volume %s. "
1285
brickinfo->path, volname, strerror(errno));
1287
strcpy(msg, "<error>");
1291
ret = sys_lgetxattr(brickinfo->path, GF_XATTR_VOL_ID_KEY, volume_id,
1293
if (ret < 0 && (!(flags & GF_CLI_FLAG_OP_FORCE))) {
1294
len = snprintf(msg, sizeof(msg),
1296
"extended attribute %s for brick dir "
1298
GF_XATTR_VOL_ID_KEY, brickinfo->path,
1301
strcpy(msg, "<error>");
1305
} else if (ret < 0) {
1306
ret = sys_lsetxattr(brickinfo->path, GF_XATTR_VOL_ID_KEY,
1307
volinfo->volume_id, 16, XATTR_CREATE);
1309
len = snprintf(msg, sizeof(msg),
1311
"set extended attribute %s on "
1313
GF_XATTR_VOL_ID_KEY, brickinfo->path,
1316
strcpy(msg, "<error>");
1323
if (gf_uuid_compare(volinfo->volume_id, volume_id)) {
1324
len = snprintf(msg, sizeof(msg),
1326
"mismatch for brick %s:%s. Expected "
1327
"volume id %s, volume id %s found",
1328
brickinfo->hostname, brickinfo->path,
1329
uuid_utoa_r(volinfo->volume_id, volid),
1330
uuid_utoa_r(volume_id, xattr_volid));
1332
strcpy(msg, "<error>");
1338
if (strlen(brickinfo->mount_dir) < 1) {
1339
ret = glusterd_get_brick_mount_dir(
1340
brickinfo->path, brickinfo->hostname, brickinfo->mount_dir);
1342
gf_msg(this->name, GF_LOG_ERROR, 0,
1343
GD_MSG_BRICK_MOUNTDIR_GET_FAIL,
1344
"Failed to get brick mount_dir");
1348
snprintf(key, sizeof(key), "brick%d.mount_dir", brick_count);
1349
ret = dict_set_dynstr_with_alloc(rsp_dict, key,
1350
brickinfo->mount_dir);
1352
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1353
"Failed to set %s", key);
1356
local_brick_count = brick_count;
1360
ret = dict_set_int32_sizen(rsp_dict, "brick_count", local_brick_count);
1362
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1363
"Failed to set local_brick_count");
1370
glusterd_volinfo_unref(volinfo);
1372
if (ret && (msg[0] != '\0')) {
1373
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_START_VOL_FAIL,
1375
*op_errstr = gf_strdup(msg);
1381
glusterd_op_stage_stop_volume(dict_t *dict, char **op_errstr)
1384
char *volname = NULL;
1386
glusterd_volinfo_t *volinfo = NULL;
1387
char msg[2048] = {0};
1388
xlator_t *this = THIS;
1389
gsync_status_param_t param = {
1393
ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags);
1395
snprintf(msg, sizeof(msg), "Failed to get details of volume %s",
1397
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_STOP_ARGS_GET_FAILED,
1398
"Volume name=%s", volname, NULL);
1402
ret = glusterd_volinfo_find(volname, &volinfo);
1404
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1405
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, "%s", msg);
1409
ret = glusterd_validate_volume_id(dict, volinfo);
1414
if (flags & GF_CLI_FLAG_OP_FORCE)
1417
if (_gf_false == glusterd_is_volume_started(volinfo)) {
1418
snprintf(msg, sizeof(msg),
1420
"is not in the started state",
1422
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED, "%s", msg);
1428
param.volinfo = volinfo;
1429
ret = glusterd_check_geo_rep_running(¶m, op_errstr);
1430
if (ret || param.is_active) {
1435
ret = glusterd_check_ganesha_export(volinfo);
1437
ret = ganesha_manage_export(dict, "off", _gf_false, op_errstr);
1439
gf_msg(this->name, GF_LOG_WARNING, 0,
1440
GD_MSG_NFS_GNS_UNEXPRT_VOL_FAIL,
1442
"unexport volume via NFS-Ganesha");
1447
if (glusterd_is_defrag_on(volinfo)) {
1448
snprintf(msg, sizeof(msg),
1449
"rebalance session is "
1450
"in progress for the volume '%s'",
1452
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_OIP, "%s", msg);
1459
*op_errstr = gf_strdup(msg);
1460
gf_msg_debug(this->name, 0, "Returning %d", ret);
1466
glusterd_op_stage_delete_volume(dict_t *dict, char **op_errstr)
1469
char *volname = NULL;
1470
glusterd_volinfo_t *volinfo = NULL;
1471
char msg[2048] = {0};
1472
xlator_t *this = THIS;
1474
ret = dict_get_str(dict, "volname", &volname);
1476
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1477
"Unable to get volume name");
1481
ret = glusterd_volinfo_find(volname, &volinfo);
1483
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1487
ret = glusterd_validate_volume_id(dict, volinfo);
1491
if (glusterd_is_volume_started(volinfo)) {
1492
snprintf(msg, sizeof(msg),
1493
"Volume %s has been started."
1494
"Volume needs to be stopped before deletion.",
1500
if (volinfo->snap_count > 0 || !cds_list_empty(&volinfo->snap_volumes)) {
1501
snprintf(msg, sizeof(msg),
1502
"Cannot delete Volume %s ,"
1503
"as it has %" PRIu64
1505
"To delete the volume, "
1506
"first delete all the snapshots under it.",
1507
volname, volinfo->snap_count);
1512
if (!glusterd_are_all_peers_up()) {
1514
snprintf(msg, sizeof(msg), "Some of the peers are down");
1517
volinfo->stage_deleted = _gf_true;
1518
gf_log(this->name, GF_LOG_INFO,
1519
"Setting stage deleted flag to true for "
1525
if (msg[0] != '\0') {
1526
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_DELETE_VOL_FAIL,
1528
*op_errstr = gf_strdup(msg);
1530
gf_msg_debug(this->name, 0, "Returning %d", ret);
1536
glusterd_handle_heal_cmd(xlator_t *this, glusterd_volinfo_t *volinfo,
1537
dict_t *dict, char **op_errstr)
1539
glusterd_svc_t *svc = NULL;
1540
gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
1546
"Self-heal daemon is not running. "
1547
"Check self-heal daemon log file.";
1549
ret = dict_get_int32(dict, "heal-op", (int32_t *)&heal_op);
1552
*op_errstr = gf_strdup("Heal operation not specified");
1556
svc = &(volinfo->shd.svc);
1558
case GF_SHD_OP_INVALID:
1559
case GF_SHD_OP_HEAL_ENABLE:
1560
case GF_SHD_OP_HEAL_DISABLE:
1562
case GF_SHD_OP_GRANULAR_ENTRY_HEAL_ENABLE:
1564
case GF_SHD_OP_GRANULAR_ENTRY_HEAL_DISABLE:
1566
case GF_SHD_OP_HEAL_SUMMARY:
1567
case GF_SHD_OP_SBRAIN_HEAL_FROM_BIGGER_FILE:
1568
case GF_SHD_OP_SBRAIN_HEAL_FROM_LATEST_MTIME:
1569
case GF_SHD_OP_SBRAIN_HEAL_FROM_BRICK:
1571
*op_errstr = gf_strdup("Invalid heal-op");
1574
case GF_SHD_OP_HEAL_INDEX:
1575
case GF_SHD_OP_HEAL_FULL:
1576
if (!glusterd_is_shd_compatible_volume(volinfo)) {
1578
snprintf(msg, sizeof(msg),
1579
"Volume %s is not of type "
1580
"replicate or disperse",
1582
*op_errstr = gf_strdup(msg);
1588
*op_errstr = gf_strdup(offline_msg);
1592
case GF_SHD_OP_INDEX_SUMMARY:
1593
case GF_SHD_OP_SPLIT_BRAIN_FILES:
1594
case GF_SHD_OP_STATISTICS:
1595
case GF_SHD_OP_STATISTICS_HEAL_COUNT:
1596
case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
1597
if (!glusterd_is_volume_replicate(volinfo)) {
1599
snprintf(msg, sizeof(msg),
1600
"This command is supported "
1601
"for only volume of replicated "
1602
"type. Volume %s is not of type "
1605
*op_errstr = gf_strdup(msg);
1611
*op_errstr = gf_strdup(offline_msg);
1615
case GF_SHD_OP_HEALED_FILES:
1616
case GF_SHD_OP_HEAL_FAILED_FILES:
1618
snprintf(msg, sizeof(msg),
1619
"Command not supported. "
1620
"Please use \"gluster volume heal %s info\" "
1621
"and logs to find the heal information.",
1623
*op_errstr = gf_strdup(msg);
1628
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_HANDLE_HEAL_CMD_FAIL, "%s",
1634
glusterd_op_stage_heal_volume(dict_t *dict, char **op_errstr)
1637
char *volname = NULL;
1638
gf_boolean_t enabled = _gf_false;
1639
glusterd_volinfo_t *volinfo = NULL;
1641
glusterd_conf_t *priv = NULL;
1642
dict_t *opt_dict = NULL;
1643
xlator_t *this = THIS;
1645
priv = this->private;
1648
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRIV_NULL, "priv is NULL");
1652
ret = dict_get_str(dict, "volname", &volname);
1654
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1655
"Unable to get volume name");
1659
ret = glusterd_volinfo_find(volname, &volinfo);
1662
snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
1663
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
1664
*op_errstr = gf_strdup(msg);
1668
ret = glusterd_validate_volume_id(dict, volinfo);
1672
if (!glusterd_is_volume_started(volinfo)) {
1674
snprintf(msg, sizeof(msg), "Volume %s is not started.", volname);
1675
gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_STARTED,
1676
"Volume=%s", volname, NULL);
1677
*op_errstr = gf_strdup(msg);
1681
opt_dict = volinfo->dict;
1683
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, NULL);
1687
enabled = gd_is_self_heal_enabled(volinfo, opt_dict);
1690
snprintf(msg, sizeof(msg),
1691
"Self-heal-daemon is "
1692
"disabled. Heal will not be triggered on volume %s",
1694
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_SELF_HEALD_DISABLED, "%s",
1696
*op_errstr = gf_strdup(msg);
1700
ret = glusterd_handle_heal_cmd(this, volinfo, dict, op_errstr);
1706
gf_msg_debug("glusterd", 0, "Returning %d", ret);
1712
glusterd_op_stage_statedump_volume(dict_t *dict, char **op_errstr)
1715
char *volname = NULL;
1716
char *options = NULL;
1718
gf_boolean_t is_running = _gf_false;
1719
glusterd_volinfo_t *volinfo = NULL;
1723
xlator_t *this = THIS;
1724
glusterd_conf_t *priv = NULL;
1726
priv = this->private;
1729
ret = glusterd_op_statedump_volume_args_get(dict, &volname, &options,
1734
ret = glusterd_volinfo_find(volname, &volinfo);
1736
snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1737
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
1738
"Volume=%s", volname, NULL);
1742
ret = glusterd_validate_volume_id(dict, volinfo);
1746
is_running = glusterd_is_volume_started(volinfo);
1748
snprintf(msg, sizeof(msg),
1749
"Volume %s is not in the started"
1756
if (priv->op_version == GD_OP_VERSION_MIN && strstr(options, "quotad")) {
1757
snprintf(msg, sizeof(msg),
1758
"The cluster is operating "
1759
"at op-version 1. Taking quotad's statedump is "
1760
"disallowed in this state");
1764
if ((strstr(options, "quotad")) &&
1765
(!glusterd_is_volume_quota_enabled(volinfo))) {
1766
snprintf(msg, sizeof(msg),
1767
"Quota is not enabled on "
1774
if (ret && msg[0] != '\0')
1775
*op_errstr = gf_strdup(msg);
1776
gf_msg_debug(this->name, 0, "Returning %d", ret);
1781
glusterd_op_stage_clearlocks_volume(dict_t *dict, char **op_errstr)
1784
char *volname = NULL;
1788
glusterd_volinfo_t *volinfo = NULL;
1793
ret = dict_get_str(dict, "volname", &volname);
1795
snprintf(msg, sizeof(msg), "Failed to get volume name");
1796
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s", msg);
1797
*op_errstr = gf_strdup(msg);
1801
ret = dict_get_str(dict, "path", &path);
1803
snprintf(msg, sizeof(msg), "Failed to get path");
1804
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s", msg);
1805
*op_errstr = gf_strdup(msg);
1809
ret = dict_get_str(dict, "kind", &kind);
1811
snprintf(msg, sizeof(msg), "Failed to get kind");
1812
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s", msg);
1813
*op_errstr = gf_strdup(msg);
1817
ret = dict_get_str(dict, "type", &type);
1819
snprintf(msg, sizeof(msg), "Failed to get type");
1820
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s", msg);
1821
*op_errstr = gf_strdup(msg);
1825
ret = glusterd_volinfo_find(volname, &volinfo);
1827
snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
1828
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
1829
*op_errstr = gf_strdup(msg);
1833
ret = glusterd_validate_volume_id(dict, volinfo);
1837
if (!glusterd_is_volume_started(volinfo)) {
1838
snprintf(msg, sizeof(msg), "Volume %s is not started", volname);
1839
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED, "%s", msg);
1840
*op_errstr = gf_strdup(msg);
1846
gf_msg_debug("glusterd", 0, "Returning %d", ret);
1851
glusterd_op_create_volume(dict_t *dict, char **op_errstr)
1854
char *volname = NULL;
1855
glusterd_conf_t *priv = NULL;
1856
glusterd_volinfo_t *volinfo = NULL;
1857
gf_boolean_t vol_added = _gf_false;
1858
glusterd_brickinfo_t *brickinfo = NULL;
1859
glusterd_brickinfo_t *ta_brickinfo = NULL;
1860
xlator_t *this = THIS;
1862
char *ta_brick = NULL;
1865
char *bricks = NULL;
1866
char *ta_bricks = NULL;
1867
char *brick_list = NULL;
1868
char *ta_brick_list = NULL;
1869
char *free_ptr = NULL;
1870
char *ta_free_ptr = NULL;
1871
char *saveptr = NULL;
1872
char *ta_saveptr = NULL;
1873
char *trans_type = NULL;
1875
char *username = NULL;
1876
char *password = NULL;
1878
char msg[1024] __attribute__((unused)) = {
1881
char *brick_mount_dir = NULL;
1883
char *address_family_str = NULL;
1884
struct statvfs brickstat = {
1888
priv = this->private;
1891
ret = glusterd_volinfo_new(&volinfo);
1894
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
1895
"Unable to allocate memory for volinfo");
1899
ret = dict_get_str(dict, "volname", &volname);
1902
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1903
"Unable to get volume name");
1907
if (snprintf(volinfo->volname, sizeof(volinfo->volname), "%s", volname) >=
1908
sizeof(volinfo->volname)) {
1913
GF_ASSERT(volinfo->volname);
1915
ret = dict_get_int32(dict, "type", &volinfo->type);
1917
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1918
"Unable to get type of volume"
1924
ret = dict_get_int32(dict, "count", &volinfo->brick_count);
1926
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1927
"Unable to get brick count of"
1933
ret = dict_get_int32(dict, "port", &volinfo->port);
1935
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1936
"Unable to get port");
1940
ret = dict_get_str(dict, "bricks", &bricks);
1942
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1943
"Unable to get bricks for "
1950
volinfo->replica_count = 1;
1952
if (GF_CLUSTER_TYPE_REPLICATE == volinfo->type) {
1959
if (priv->op_version >= GD_OP_VERSION_3_12_2) {
1960
ret = dict_set_sizen_str_sizen(
1961
volinfo->dict, "performance.client-io-threads", "off");
1963
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1965
"performance.client-io-threads to off");
1969
ret = dict_get_int32(dict, "replica-count", &volinfo->replica_count);
1971
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1973
"replica count for volume %s",
1979
ret = dict_get_int32(dict, "arbiter-count", &volinfo->arbiter_count);
1980
ret = dict_get_int32(dict, "thin-arbiter-count",
1981
&volinfo->thin_arbiter_count);
1982
if (volinfo->thin_arbiter_count) {
1983
ret = dict_get_str(dict, "ta-brick", &ta_bricks);
1985
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1986
"Unable to get thin arbiter brick for "
1993
} else if (GF_CLUSTER_TYPE_DISPERSE == volinfo->type) {
1994
ret = dict_get_int32(dict, "disperse-count", &volinfo->disperse_count);
1996
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1998
"disperse count for volume %s",
2002
ret = dict_get_int32(dict, "redundancy-count",
2003
&volinfo->redundancy_count);
2005
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2007
"redundancy count for volume %s",
2015
volinfo->dist_leaf_count = glusterd_get_dist_leaf_count(volinfo);
2019
volinfo->subvol_count = (volinfo->brick_count / volinfo->dist_leaf_count);
2023
if (volinfo->dist_leaf_count > 1)
2024
volinfo->sub_count = volinfo->dist_leaf_count;
2026
ret = dict_get_str(dict, "transport", &trans_type);
2028
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2029
"Unable to get transport type of volume %s", volname);
2033
ret = dict_get_str(dict, "volume-id", &str);
2035
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2036
"Unable to get volume-id of volume %s", volname);
2039
ret = gf_uuid_parse(str, volinfo->volume_id);
2041
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUID_PARSE_FAIL,
2042
"unable to parse uuid %s of volume %s", str, volname);
2046
ret = dict_get_str(dict, "internal-username", &username);
2048
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2049
"unable to get internal username of volume %s", volname);
2052
glusterd_auth_set_username(volinfo, username);
2054
ret = dict_get_str(dict, "internal-password", &password);
2056
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2057
"unable to get internal password of volume %s", volname);
2060
glusterd_auth_set_password(volinfo, password);
2062
if (strcasecmp(trans_type, "rdma") == 0) {
2063
volinfo->transport_type = GF_TRANSPORT_RDMA;
2064
} else if (strcasecmp(trans_type, "tcp") == 0) {
2065
volinfo->transport_type = GF_TRANSPORT_TCP;
2067
volinfo->transport_type = GF_TRANSPORT_BOTH_TCP_RDMA;
2071
ta_brick_list = gf_strdup(ta_bricks);
2072
ta_free_ptr = ta_brick_list;
2075
if (volinfo->thin_arbiter_count) {
2076
ta_brick = strtok_r(ta_brick_list + 1, " \n", &ta_saveptr);
2079
brickid = volinfo->replica_count;
2088
while (count <= volinfo->subvol_count) {
2089
ret = glusterd_brickinfo_new_from_brick(ta_brick, &ta_brickinfo,
2090
_gf_false, op_errstr);
2094
GLUSTERD_ASSIGN_BRICKID_TO_TA_BRICKINFO(ta_brickinfo, volinfo,
2096
cds_list_add_tail(&ta_brickinfo->brick_list, &volinfo->ta_bricks);
2098
brickid += volinfo->replica_count + 1;
2103
brick_list = gf_strdup(bricks);
2104
free_ptr = brick_list;
2107
count = volinfo->brick_count;
2110
brick = strtok_r(brick_list + 1, " \n", &saveptr);
2112
brickid = glusterd_get_next_available_brickid(volinfo);
2115
while (i <= count) {
2116
ret = glusterd_brickinfo_new_from_brick(brick, &brickinfo, _gf_true,
2120
if (volinfo->thin_arbiter_count == 1 &&
2121
(brickid + 1) % (volinfo->replica_count + 1) == 0) {
2122
brickid = brickid + 1;
2124
GLUSTERD_ASSIGN_BRICKID_TO_BRICKINFO(brickinfo, volinfo, brickid++);
2126
ret = glusterd_resolve_brick(brickinfo);
2128
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RESOLVE_BRICK_FAIL,
2129
FMTSTR_RESOLVE_BRICK, brickinfo->hostname, brickinfo->path);
2133
brick_mount_dir = NULL;
2134
ret = snprintf(key, sizeof(key), "brick%d.mount_dir", i);
2135
ret = dict_get_strn(dict, key, ret, &brick_mount_dir);
2137
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2138
"%s not present", key);
2141
snprintf(brickinfo->mount_dir, sizeof(brickinfo->mount_dir), "%s",
2144
if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
2145
ret = sys_statvfs(brickinfo->path, &brickstat);
2147
gf_log("brick-op", GF_LOG_ERROR,
2148
"Failed to fetch disk"
2149
" utilization from the brick (%s:%s). Please "
2150
"check health of the brick. Error code was %s",
2151
brickinfo->hostname, brickinfo->path, strerror(errno));
2154
brickinfo->statfs_fsid = brickstat.f_fsid;
2157
cds_list_add_tail(&brickinfo->brick_list, &volinfo->bricks);
2158
brick = strtok_r(NULL, " \n", &saveptr);
2162
ret = glusterd_enable_default_options(volinfo, NULL);
2164
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAIL_DEFAULT_OPT_SET,
2165
"Failed to set default "
2166
"options on create for volume %s",
2171
ret = dict_get_str(dict, "transport.address-family", &address_family_str);
2173
ret = dict_set_dynstr_with_alloc(
2174
volinfo->dict, "transport.address-family", address_family_str);
2176
gf_log(this->name, GF_LOG_ERROR,
2177
"Failed to set transport.address-family for %s",
2183
gd_update_volume_op_versions(volinfo);
2185
ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
2187
glusterd_store_delete_volume(volinfo);
2188
*op_errstr = gf_strdup(
2189
"Failed to store the "
2190
"Volume information");
2194
ret = glusterd_create_volfiles_and_notify_services(volinfo);
2196
*op_errstr = gf_strdup("Failed to create volume files");
2200
volinfo->rebal.defrag_status = 0;
2201
glusterd_list_add_order(&volinfo->vol_list, &priv->volumes,
2202
glusterd_compare_volume_name);
2203
vol_added = _gf_true;
2207
GF_FREE(ta_free_ptr);
2208
if (!vol_added && volinfo)
2209
glusterd_volinfo_unref(volinfo);
2214
glusterd_start_volume(glusterd_volinfo_t *volinfo, int flags, gf_boolean_t wait)
2218
glusterd_brickinfo_t *brickinfo = NULL;
2219
xlator_t *this = THIS;
2220
glusterd_volinfo_ver_ac_t verincrement = 0;
2224
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2231
if (flags & GF_CLI_FLAG_OP_FORCE) {
2232
brickinfo->start_triggered = _gf_false;
2234
ret = glusterd_brick_start(volinfo, brickinfo, wait, _gf_false);
2238
if (!(flags & GF_CLI_FLAG_OP_FORCE) && ret)
2247
if (GLUSTERD_STATUS_STARTED != volinfo->status) {
2248
verincrement = GLUSTERD_VOLINFO_VER_AC_INCREMENT;
2250
verincrement = GLUSTERD_VOLINFO_VER_AC_NONE;
2253
glusterd_set_volume_status(volinfo, GLUSTERD_STATUS_STARTED);
2259
LOCK(&volinfo->lock);
2260
ret = glusterd_store_volinfo(volinfo, verincrement);
2261
UNLOCK(&volinfo->lock);
2263
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_SET_FAIL,
2264
"Failed to store volinfo of "
2270
gf_msg_trace(this->name, 0, "returning %d ", ret);
2275
glusterd_op_start_volume(dict_t *dict, char **op_errstr)
2278
int32_t brick_count = 0;
2279
char *brick_mount_dir = NULL;
2281
char *volname = NULL;
2283
glusterd_volinfo_t *volinfo = NULL;
2284
glusterd_brickinfo_t *brickinfo = NULL;
2285
xlator_t *this = THIS;
2286
glusterd_conf_t *conf = NULL;
2287
glusterd_svc_t *svc = NULL;
2289
gf_boolean_t option = _gf_false;
2291
conf = this->private;
2294
ret = glusterd_op_start_volume_args_get(dict, &volname, &flags);
2298
ret = glusterd_volinfo_find(volname, &volinfo);
2300
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
2301
FMTSTR_CHECK_VOL_EXISTS, volname);
2312
glusterd_volinfo_ref(volinfo);
2314
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2319
if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
2321
if (strlen(brickinfo->mount_dir) < 1) {
2322
brick_mount_dir = NULL;
2323
ret = snprintf(key, sizeof(key), "brick%d.mount_dir", brick_count);
2324
ret = dict_get_strn(dict, key, ret, &brick_mount_dir);
2326
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2327
"%s not present", key);
2330
if (snprintf(brickinfo->mount_dir, sizeof(brickinfo->mount_dir),
2332
brick_mount_dir) >= sizeof(brickinfo->mount_dir)) {
2339
ret = dict_get_str(conf->opts, GLUSTERD_STORE_KEY_GANESHA_GLOBAL, &str);
2341
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
2342
"Global dict not present.");
2346
ret = gf_string2boolean(str, &option);
2349
gf_msg_debug(this->name, 0, "NFS-Ganesha is enabled");
2351
ret = dict_set_str(volinfo->dict, NFS_DISABLE_MAP_KEY, "on");
2353
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
2354
"Failed to set nfs.disable for"
2362
ret = glusterd_start_volume(volinfo, flags, _gf_true);
2366
if (!volinfo->is_snap_volume) {
2367
svc = &(volinfo->snapd.svc);
2368
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
2373
svc = &(volinfo->gfproxyd.svc);
2374
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
2375
ret = glusterd_svcs_manager(volinfo);
2379
glusterd_volinfo_unref(volinfo);
2381
gf_msg_trace(this->name, 0, "returning %d ", ret);
2386
glusterd_stop_volume(glusterd_volinfo_t *volinfo)
2389
glusterd_brickinfo_t *brickinfo = NULL;
2390
xlator_t *this = THIS;
2391
glusterd_svc_t *svc = NULL;
2393
GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
2395
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2397
ret = glusterd_brick_stop(volinfo, brickinfo, _gf_false);
2399
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_STOP_FAIL,
2407
glusterd_set_volume_status(volinfo, GLUSTERD_STATUS_STOPPED);
2409
ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
2411
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_SET_FAIL,
2412
"Failed to store volinfo of "
2418
if (!volinfo->is_snap_volume) {
2419
svc = &(volinfo->snapd.svc);
2420
ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
2425
ret = glusterd_svcs_manager(volinfo);
2427
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_GRAPH_CHANGE_NOTIFY_FAIL,
2428
"Failed to notify graph "
2429
"change for %s volume",
2440
glusterd_op_stop_volume(dict_t *dict)
2444
char *volname = NULL;
2445
glusterd_volinfo_t *volinfo = NULL;
2446
xlator_t *this = THIS;
2448
ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags);
2452
ret = glusterd_volinfo_find(volname, &volinfo);
2454
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
2455
FMTSTR_CHECK_VOL_EXISTS, volname);
2459
ret = glusterd_stop_volume(volinfo);
2461
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_STOP_FAILED,
2462
"Failed to stop %s volume", volname);
2466
gf_msg_trace(this->name, 0, "returning %d ", ret);
2471
glusterd_op_delete_volume(dict_t *dict)
2474
char *volname = NULL;
2475
glusterd_volinfo_t *volinfo = NULL;
2476
xlator_t *this = THIS;
2478
ret = dict_get_str(dict, "volname", &volname);
2480
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2481
"Unable to get volume name");
2485
ret = glusterd_volinfo_find(volname, &volinfo);
2487
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
2488
FMTSTR_CHECK_VOL_EXISTS, volname);
2492
if (glusterd_check_ganesha_export(volinfo) && is_origin_glusterd(dict)) {
2493
ret = manage_export_config(volname, "off", NULL);
2495
gf_msg(this->name, GF_LOG_WARNING, 0, 0,
2496
"Could not delete ganesha export conf file "
2501
ret = glusterd_delete_volume(volinfo);
2503
gf_msg_debug(this->name, 0, "returning %d", ret);
2508
glusterd_op_heal_volume(dict_t *dict, char **op_errstr)
2517
glusterd_client_statedump(char *volname, char *options, int option_cnt,
2521
char *dup_options = NULL;
2522
char *option = NULL;
2523
char *tmpptr = NULL;
2525
char *target_ip = NULL;
2528
dup_options = gf_strdup(options);
2530
gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
2531
"options=%s", options, NULL);
2534
option = strtok_r(dup_options, " ", &tmpptr);
2535
if (strcmp(option, "client")) {
2536
snprintf(msg, sizeof(msg),
2537
"for gluster client statedump, options "
2538
"should be after the key 'client'");
2539
gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY,
2540
"Options misplaced", NULL);
2541
*op_errstr = gf_strdup(msg);
2545
target_ip = strtok_r(NULL, " ", &tmpptr);
2546
if (target_ip == NULL) {
2547
snprintf(msg, sizeof(msg), "ip address not specified");
2548
gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, msg,
2550
*op_errstr = gf_strdup(msg);
2555
pid = strtok_r(NULL, " ", &tmpptr);
2557
snprintf(msg, sizeof(msg), "pid not specified");
2558
gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ENTRY, msg,
2560
*op_errstr = gf_strdup(msg);
2565
ret = glusterd_client_statedump_submit_req(volname, target_ip, pid);
2567
GF_FREE(dup_options);
2572
glusterd_op_statedump_volume(dict_t *dict, char **op_errstr)
2575
char *volname = NULL;
2576
char *options = NULL;
2578
glusterd_volinfo_t *volinfo = NULL;
2579
glusterd_brickinfo_t *brickinfo = NULL;
2581
ret = glusterd_op_statedump_volume_args_get(dict, &volname, &options,
2586
ret = glusterd_volinfo_find(volname, &volinfo);
2589
gf_msg_debug("glusterd", 0, "Performing statedump on volume %s", volname);
2590
if (strstr(options, "quotad")) {
2591
ret = glusterd_quotad_statedump(options, option_cnt, op_errstr);
2595
} else if (strstr(options, "nfs") != NULL) {
2596
ret = glusterd_nfs_statedump(options, option_cnt, op_errstr);
2600
} else if (strstr(options, "client")) {
2601
ret = glusterd_client_statedump(volname, options, option_cnt,
2607
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2609
ret = glusterd_brick_statedump(volinfo, brickinfo, options,
2610
option_cnt, op_errstr);
2615
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_BRK_STATEDUMP_FAIL,
2617
"take the statedump of the brick %s:%s."
2618
" Proceeding to other bricks",
2619
brickinfo->hostname, brickinfo->path);
2628
glusterd_clearlocks_send_cmd(glusterd_volinfo_t *volinfo, char *cmd, char *path,
2629
char *result, char *errstr, int err_len,
2633
char abspath[PATH_MAX] = {
2637
snprintf(abspath, sizeof(abspath), "%s/%s", mntpt, path);
2638
ret = sys_lgetxattr(abspath, cmd, result, PATH_MAX);
2640
snprintf(errstr, err_len,
2641
"clear-locks getxattr command "
2642
"failed. Reason: %s",
2644
gf_msg_debug(THIS->name, 0, "%s", errstr);
2654
glusterd_clearlocks_rmdir_mount(glusterd_volinfo_t *volinfo, char *mntpt)
2658
ret = sys_rmdir(mntpt);
2660
gf_msg_debug(THIS->name, 0, "rmdir failed");
2670
glusterd_clearlocks_unmount(glusterd_volinfo_t *volinfo, char *mntpt)
2672
glusterd_conf_t *priv = NULL;
2678
priv = THIS->private;
2685
runner_add_args(&runner, _PATH_UMOUNT, "-f", NULL);
2686
runner_argprintf(&runner, "%s", mntpt);
2688
synclock_unlock(&priv->big_lock);
2689
ret = runner_run(&runner);
2690
synclock_lock(&priv->big_lock);
2693
gf_msg_debug("glusterd", 0, "umount failed on maintenance client");
2700
glusterd_clearlocks_create_mount(glusterd_volinfo_t *volinfo, char **mntpt)
2703
char template[PATH_MAX] = {
2708
snprintf(template, sizeof(template), "/tmp/%s.XXXXXX", volinfo->volname);
2709
tmpl = mkdtemp(template);
2711
gf_msg_debug(THIS->name, errno,
2712
"Couldn't create temporary mount directory.");
2716
*mntpt = gf_strdup(tmpl);
2723
glusterd_clearlocks_mount(glusterd_volinfo_t *volinfo, char **xl_opts,
2728
glusterd_conf_t *priv = NULL;
2732
char client_volfpath[PATH_MAX] = {
2735
char self_heal_opts[3][1024] = {"*replicate*.data-self-heal=off",
2736
"*replicate*.metadata-self-heal=off",
2737
"*replicate*.entry-self-heal=off"};
2739
priv = THIS->private;
2742
glusterd_get_trusted_client_filepath(client_volfpath, volinfo,
2743
volinfo->transport_type);
2744
runner_add_args(&runner, SBIN_DIR "/glusterfs", "-f", NULL);
2745
runner_argprintf(&runner, "%s", client_volfpath);
2746
runner_add_arg(&runner, "-l");
2747
runner_argprintf(&runner, "%s/%s-clearlocks-mnt.log", priv->logdir,
2749
if (volinfo->memory_accounting)
2750
runner_add_arg(&runner, "--mem-accounting");
2752
for (i = 0; i < volinfo->brick_count && xl_opts[i]; i++) {
2753
runner_add_arg(&runner, "--xlator-option");
2754
runner_argprintf(&runner, "%s", xl_opts[i]);
2757
for (i = 0; i < 3; i++) {
2758
runner_add_args(&runner, "--xlator-option", self_heal_opts[i], NULL);
2761
runner_argprintf(&runner, "%s", mntpt);
2762
synclock_unlock(&priv->big_lock);
2763
ret = runner_run(&runner);
2764
synclock_lock(&priv->big_lock);
2766
gf_msg_debug(THIS->name, 0, "Could not start glusterfs");
2769
gf_msg_debug(THIS->name, 0, "Started glusterfs successfully");
2776
glusterd_clearlocks_get_local_client_ports(glusterd_volinfo_t *volinfo,
2779
glusterd_brickinfo_t *brickinfo = NULL;
2780
char brickname[PATH_MAX] = {
2791
gf_msg_debug(THIS->name, 0,
2792
"Should pass non-NULL "
2798
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2801
if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
2804
if (volinfo->transport_type == GF_TRANSPORT_RDMA) {
2805
len = snprintf(brickname, sizeof(brickname), "%s.rdma",
2808
len = snprintf(brickname, sizeof(brickname), "%s", brickinfo->path);
2809
if ((len < 0) || (len >= sizeof(brickname))) {
2813
port = pmap_registry_search(THIS, brickname, _gf_false);
2816
gf_msg_debug(THIS->name, 0,
2817
"Couldn't get port "
2819
brickinfo->hostname, brickinfo->path);
2823
ret = gf_asprintf(&xl_opts[i], "%s-client-%d.remote-port=%d",
2824
volinfo->volname, index, port);
2838
glusterd_op_clearlocks_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
2842
char *volname = NULL;
2847
char *cmd_str = NULL;
2848
char *free_ptr = NULL;
2849
char msg[PATH_MAX] = {
2852
char result[PATH_MAX] = {
2856
char **xl_opts = NULL;
2857
glusterd_volinfo_t *volinfo = NULL;
2858
xlator_t *this = THIS;
2860
ret = dict_get_str(dict, "volname", &volname);
2862
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2863
"Key=volname", NULL);
2866
gf_msg_debug("glusterd", 0, "Performing clearlocks on volume %s", volname);
2868
ret = dict_get_str(dict, "path", &path);
2870
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=path",
2875
ret = dict_get_str(dict, "kind", &kind);
2877
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=kind",
2882
ret = dict_get_str(dict, "type", &type);
2884
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "Key=type",
2889
ret = dict_get_str(dict, "opts", &opts);
2893
gf_smsg(this->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD,
2894
"Volume=%s, Kind=%s, Type=%s, Options=%s", volname, kind, type,
2898
ret = gf_asprintf(&cmd_str, GF_XATTR_CLRLK_CMD ".t%s.k%s.%s", type,
2901
ret = gf_asprintf(&cmd_str, GF_XATTR_CLRLK_CMD ".t%s.k%s", type, kind);
2905
ret = glusterd_volinfo_find(volname, &volinfo);
2907
snprintf(msg, sizeof(msg), "Volume %s doesn't exist.", volname);
2908
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "Volume=%s",
2913
xl_opts = GF_CALLOC(volinfo->brick_count + 1, sizeof(char *),
2916
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
2920
ret = glusterd_clearlocks_get_local_client_ports(volinfo, xl_opts);
2922
snprintf(msg, sizeof(msg),
2923
"Couldn't get port numbers of "
2925
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRK_PORT_NUM_GET_FAIL,
2930
ret = glusterd_clearlocks_create_mount(volinfo, &mntpt);
2932
snprintf(msg, sizeof(msg),
2933
"Creating mount directory "
2934
"for clear-locks failed.");
2935
gf_smsg(this->name, GF_LOG_ERROR, 0,
2936
GD_MSG_CLRLOCKS_MOUNTDIR_CREATE_FAIL, NULL);
2940
ret = glusterd_clearlocks_mount(volinfo, xl_opts, mntpt);
2942
snprintf(msg, sizeof(msg),
2943
"Failed to mount clear-locks "
2944
"maintenance client.");
2945
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRLOCKS_CLNT_MOUNT_FAIL,
2950
ret = glusterd_clearlocks_send_cmd(volinfo, cmd_str, path, result, msg,
2951
sizeof(msg), mntpt);
2953
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLRCLK_SND_CMD_FAIL, NULL);
2957
free_ptr = gf_strdup(result);
2958
if (dict_set_dynstr_sizen(rsp_dict, "lk-summary", free_ptr)) {
2960
snprintf(msg, sizeof(msg),
2961
"Failed to set clear-locks "
2963
gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
2964
"Key=lk-summary", NULL);
2968
glusterd_clearlocks_unmount(volinfo, mntpt);
2970
if (glusterd_clearlocks_rmdir_mount(volinfo, mntpt))
2971
gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_CLRLOCKS_CLNT_UMOUNT_FAIL,
2976
*op_errstr = gf_strdup(msg);
2979
for (i = 0; i < volinfo->brick_count && xl_opts[i]; i++)
2980
GF_FREE(xl_opts[i]);