12
#include <glusterfs/glusterfs.h>
13
#include <glusterfs/compat.h>
14
#include <glusterfs/dict.h>
15
#include <glusterfs/logging.h>
16
#include <glusterfs/syscall.h>
17
#include <glusterfs/timer.h>
18
#include <glusterfs/compat.h>
19
#include <glusterfs/compat-errno.h>
20
#include <glusterfs/run.h>
21
#include "glusterd-mem-types.h"
22
#include "glusterd-op-sm.h"
23
#include "glusterd-utils.h"
24
#include "glusterd-mgmt.h"
25
#include "glusterd-server-quorum.h"
26
#include "glusterd-store.h"
27
#include "glusterd-locks.h"
28
#include "glusterd-snapshot-utils.h"
29
#include "glusterd-geo-rep.h"
31
#include "glusterd-mountbroker.h"
32
#include "glusterd-messages.h"
33
#include "glusterd-errno.h"
37
#include "glusterd-syncop.h"
38
#include "glusterd-messages.h"
39
#include "protocol-utils.h"
41
#define STATUS_STRLEN 128
46
GF_DEPROBE_NOT_FRIEND,
47
GF_DEPROBE_BRICK_EXIST,
48
GF_DEPROBE_FRIEND_DOWN,
49
GF_DEPROBE_QUORUM_NOT_MET,
50
GF_DEPROBE_FRIEND_DETACHING,
51
GF_DEPROBE_SNAP_BRICK_EXIST,
57
glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
58
rpc_clnt_event_t event, void *data);
61
glusterd_handle_tier(rpcsvc_request_t *req);
64
glusterd_get_volumes(rpcsvc_request_t *req, dict_t *dict, int32_t flags);
67
glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags);
70
glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
71
uuid_t uuid, dict_t *dict, int *op_errno);
74
glusterd_friend_add(const char *hoststr, int port,
75
glusterd_friend_sm_state_t state, uuid_t *uuid,
76
glusterd_peerinfo_t **friend, gf_boolean_t restore,
77
glusterd_peerctx_args_t *args);
80
glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
81
dict_t *dict, int *op_errno);
84
glusterd_big_locked_notify(struct rpc_clnt *rpc, void *mydata,
85
rpc_clnt_event_t event, void *data,
86
rpc_clnt_notify_t notify_fn)
88
glusterd_conf_t *priv = THIS->private;
91
synclock_lock(&priv->big_lock);
92
ret = notify_fn(rpc, mydata, event, data);
93
synclock_unlock(&priv->big_lock);
99
glusterd_big_locked_handler(rpcsvc_request_t *req, rpcsvc_actor actor_fn)
101
glusterd_conf_t *priv = THIS->private;
104
synclock_lock(&priv->big_lock);
106
synclock_unlock(&priv->big_lock);
111
static char *specific_key_suffix[] = {".quota-cksum", ".ckusm", ".version",
112
".quota-version", ".name"};
115
glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
116
int port, gd1_mgmt_friend_req *friend_req)
119
glusterd_peerinfo_t *peerinfo = NULL;
120
glusterd_friend_sm_event_t *event = NULL;
121
glusterd_friend_req_ctx_t *ctx = NULL;
122
char rhost[UNIX_PATH_MAX + 1] = {0};
124
dict_t *peer_ver = NULL;
125
int totcount = sizeof(specific_key_suffix) / sizeof(specific_key_suffix[0]);
128
port = GF_DEFAULT_BASE_PORT;
130
ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
132
ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
134
peer_ver = dict_new();
138
if (!ctx || !dict || !peer_ver) {
139
gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
140
"Unable to allocate memory");
145
peerinfo = glusterd_peerinfo_find(uuid, rhost);
147
if (peerinfo == NULL) {
148
gf_event(EVENT_PEER_REJECT, "peer=%s", hostname);
149
ret = glusterd_xfer_friend_add_resp(req, hostname, rhost, port, -1,
150
GF_PROBE_UNKNOWN_PEER);
151
if (friend_req->vols.vols_val) {
152
free(friend_req->vols.vols_val);
153
friend_req->vols.vols_val = NULL;
158
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
161
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
162
"event generation failed: %d", ret);
166
event->peername = gf_strdup(peerinfo->hostname);
167
gf_uuid_copy(event->peerid, peerinfo->uuid);
169
gf_uuid_copy(ctx->uuid, uuid);
171
ctx->hostname = gf_strdup(hostname);
174
ret = dict_unserialize_specific_keys(
175
friend_req->vols.vols_val, friend_req->vols.vols_len, &dict,
176
specific_key_suffix, &peer_ver, totcount);
179
gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
183
dict->extra_stdfree = friend_req->vols.vols_val;
186
ctx->peer_ver = peer_ver;
189
ret = glusterd_friend_sm_inject_event(event);
191
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
192
"Unable to inject event %d, "
199
if (peerinfo && (0 == peerinfo->connected))
200
ret = GLUSTERD_CONNECTION_AWAITED;
205
if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
206
if (ctx && ctx->hostname)
207
GF_FREE(ctx->hostname);
210
if ((!dict->extra_stdfree) && friend_req->vols.vols_val)
211
free(friend_req->vols.vols_val);
214
free(friend_req->vols.vols_val);
217
dict_unref(peer_ver);
219
GF_FREE(event->peername);
227
glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
231
glusterd_peerinfo_t *peerinfo = NULL;
232
glusterd_friend_sm_event_t *event = NULL;
233
glusterd_friend_req_ctx_t *ctx = NULL;
236
port = GF_DEFAULT_BASE_PORT;
238
ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
242
peerinfo = glusterd_peerinfo_find(uuid, hostname);
244
if (peerinfo == NULL) {
246
gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
247
"Received remove-friend from unknown peer %s", hostname);
248
ret = glusterd_xfer_friend_remove_resp(req, hostname, port);
252
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_RCVD_REMOVE_FRIEND,
257
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
258
"event generation failed: %d", ret);
263
event->peername = gf_strdup(hostname);
265
gf_uuid_copy(event->peerid, uuid);
270
gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
271
"Unable to allocate memory");
275
gf_uuid_copy(ctx->uuid, uuid);
277
ctx->hostname = gf_strdup(hostname);
282
ret = glusterd_friend_sm_inject_event(event);
286
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
287
"Unable to inject event %d, "
300
if (ctx && ctx->hostname)
301
GF_FREE(ctx->hostname);
304
GF_FREE(event->peername);
318
_build_option_key(dict_t *d, char *k, data_t *v, void *tmp)
320
char reconfig_key[256] = {
324
struct args_pack *pack = NULL;
326
glusterd_conf_t *priv = NULL;
328
priv = THIS->private;
332
if (strcmp(k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
335
if (priv->op_version > GD_OP_VERSION_MIN) {
336
if ((strcmp(k, "features.limit-usage") == 0) ||
337
(strcmp(k, "features.soft-limit") == 0))
345
if ((strcmp(k, "snap-max-hard-limit") == 0) ||
346
(strcmp(k, "snap-max-soft-limit") == 0))
349
keylen = snprintf(reconfig_key, sizeof(reconfig_key), "volume%d.option.%s",
351
ret = dict_set_strn(pack->dict, reconfig_key, keylen, v->data);
359
glusterd_add_arbiter_info_to_bricks(glusterd_volinfo_t *volinfo,
360
dict_t *volumes, int count)
369
if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
371
for (i = 1; i <= volinfo->brick_count; i++) {
372
if (i % volinfo->replica_count != 0)
374
keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", count,
376
ret = dict_set_int32n(volumes, key, keylen, 1);
384
glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
392
glusterd_brickinfo_t *brickinfo = NULL;
393
glusterd_brickinfo_t *ta_brickinfo = NULL;
397
glusterd_conf_t *priv = NULL;
398
char *volume_id_str = NULL;
399
struct args_pack pack = {
402
xlator_t *this = THIS;
405
char ta_brick[4096] = {
412
priv = this->private;
416
keylen = snprintf(key, sizeof(key), "volume%d.name", count);
417
ret = dict_set_strn(volumes, key, keylen, volinfo->volname);
419
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
420
"Key=%s", key, NULL);
424
keylen = snprintf(key, sizeof(key), "volume%d.type", count);
425
ret = dict_set_int32n(volumes, key, keylen, volinfo->type);
427
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
428
"Key=%s", key, NULL);
432
keylen = snprintf(key, sizeof(key), "volume%d.status", count);
433
ret = dict_set_int32n(volumes, key, keylen, volinfo->status);
435
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
436
"Key=%s", key, NULL);
440
keylen = snprintf(key, sizeof(key), "volume%d.brick_count", count);
441
ret = dict_set_int32n(volumes, key, keylen, volinfo->brick_count);
443
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
444
"Key=%s", key, NULL);
448
keylen = snprintf(key, sizeof(key), "volume%d.dist_count", count);
449
ret = dict_set_int32n(volumes, key, keylen,
450
volinfo->brick_count / volinfo->dist_leaf_count);
452
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
453
"Key=%s", key, NULL);
457
keylen = snprintf(key, sizeof(key), "volume%d.replica_count", count);
458
ret = dict_set_int32n(volumes, key, keylen, volinfo->replica_count);
460
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
461
"Key=%s", key, NULL);
465
keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", count);
466
ret = dict_set_int32n(volumes, key, keylen, volinfo->disperse_count);
468
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
469
"Key=%s", key, NULL);
473
keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", count);
474
ret = dict_set_int32n(volumes, key, keylen, volinfo->redundancy_count);
476
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
477
"Key=%s", key, NULL);
481
keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", count);
482
ret = dict_set_int32n(volumes, key, keylen, volinfo->arbiter_count);
484
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
485
"Key=%s", key, NULL);
489
keylen = snprintf(key, sizeof(key), "volume%d.transport", count);
490
ret = dict_set_int32n(volumes, key, keylen, volinfo->transport_type);
492
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
493
"Key=%s", key, NULL);
497
keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", count);
498
ret = dict_set_int32n(volumes, key, keylen, volinfo->thin_arbiter_count);
500
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
501
"Key=%s", key, NULL);
505
volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
506
if (!volume_id_str) {
507
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
511
keylen = snprintf(key, sizeof(key), "volume%d.volume_id", count);
512
ret = dict_set_dynstrn(volumes, key, keylen, volume_id_str);
514
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
515
"Key=%s", key, NULL);
519
keylen = snprintf(key, sizeof(key), "volume%d.rebalance", count);
520
ret = dict_set_int32n(volumes, key, keylen, volinfo->rebal.defrag_cmd);
522
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
523
"Key=%s", key, NULL);
527
keylen = snprintf(key, sizeof(key), "volume%d.snap_count", count);
528
ret = dict_set_int32n(volumes, key, keylen, volinfo->snap_count);
530
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
531
"Key=%s", key, NULL);
535
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
540
char brick_uuid[64] = {
543
len = snprintf(brick, sizeof(brick), "%s:%s", brickinfo->hostname,
545
if ((len < 0) || (len >= sizeof(brick))) {
546
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
550
buf = gf_strdup(brick);
551
keylen = snprintf(key, sizeof(key), "volume%d.brick%d", count, i);
552
ret = dict_set_dynstrn(volumes, key, keylen, buf);
554
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
555
"Key=%s", key, NULL);
558
keylen = snprintf(key, sizeof(key), "volume%d.brick%d.uuid", count, i);
559
snprintf(brick_uuid, sizeof(brick_uuid), "%s",
560
uuid_utoa(brickinfo->uuid));
561
buf = gf_strdup(brick_uuid);
563
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
564
"brick_uuid=%s", brick_uuid, NULL);
567
ret = dict_set_dynstrn(volumes, key, keylen, buf);
569
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
570
"Key=%s", key, NULL);
576
if (volinfo->thin_arbiter_count == 1) {
577
ta_brickinfo = list_first_entry(&volinfo->ta_bricks,
578
glusterd_brickinfo_t, brick_list);
579
len = snprintf(ta_brick, sizeof(ta_brick), "%s:%s",
580
ta_brickinfo->hostname, ta_brickinfo->path);
581
if ((len < 0) || (len >= sizeof(ta_brick))) {
582
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
586
buf = gf_strdup(ta_brick);
587
keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick",
589
ret = dict_set_dynstrn(volumes, key, keylen, buf);
591
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
592
"Key=%s", key, NULL);
597
ret = glusterd_add_arbiter_info_to_bricks(volinfo, volumes, count);
599
gf_smsg(this->name, GF_LOG_ERROR, errno,
600
GD_MSG_ARBITER_BRICK_SET_INFO_FAIL, NULL);
604
dict = volinfo->dict;
606
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
612
pack.vol_count = count;
614
dict_foreach(dict, _build_option_key, (void *)&pack);
615
dict_foreach(priv->opts, _build_option_key, &pack);
617
keylen = snprintf(key, sizeof(key), "volume%d.opt_count", pack.vol_count);
618
ret = dict_set_int32n(volumes, key, keylen, pack.opt_count);
624
glusterd_op_txn_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
625
char *err_str, size_t err_len)
629
xlator_t *this = THIS;
630
glusterd_conf_t *priv = NULL;
633
char *volname = NULL;
634
uuid_t *txn_id = NULL;
635
glusterd_op_info_t txn_op_info = {
638
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
639
uint32_t op_errno = 0;
643
GF_ASSERT((op > GD_OP_NONE) && (op < GD_OP_MAX));
644
GF_ASSERT(NULL != ctx);
646
priv = this->private;
655
ret = glusterd_generate_txn_id(dict, &txn_id);
657
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_IDGEN_FAIL,
658
"Failed to generate transaction id");
665
ret = glusterd_set_originator_uuid(dict);
667
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUID_SET_FAIL,
668
"Failed to set originator_uuid.");
674
ret = dict_get_str(dict, "volname", &tmp);
676
gf_msg(this->name, GF_LOG_INFO, -ret, GD_MSG_DICT_GET_FAILED,
677
"No Volume name present. "
678
"Locks not being held.");
679
goto local_locking_done;
684
volname = gf_strdup(tmp);
694
ret = dict_get_time(dict, "timeout", &timeout);
696
priv->mgmt_v3_lock_timeout = timeout + 120;
698
ret = glusterd_mgmt_v3_lock(volname, MY_UUID, &op_errno, "vol");
700
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
701
"Unable to acquire lock for %s", volname);
702
snprintf(err_str, err_len,
703
"Another transaction is in progress for %s. "
704
"Please try again after some time.",
709
volname = gf_strdup(tmp);
715
gf_msg_debug(this->name, 0, "Acquired lock on localhost");
721
event_type = GD_OP_EVENT_START_LOCK;
723
txn_op_info.state = GD_OP_STATE_LOCK_SENT;
724
event_type = GD_OP_EVENT_ALL_ACC;
728
glusterd_txn_opinfo_init(&txn_op_info, 0, (int *)&op, ctx, req);
730
ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
732
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
733
"Unable to set transaction's opinfo");
739
ret = glusterd_op_sm_inject_event(event_type, txn_id, ctx);
741
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
742
"Failed to acquire cluster"
749
ret = glusterd_mgmt_v3_unlock(volname, MY_UUID, "vol");
751
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
752
"Unable to release lock for %s", volname);
759
gf_msg_debug(this->name, 0, "Returning %d", ret);
764
__glusterd_handle_cluster_lock(rpcsvc_request_t *req)
766
dict_t *op_ctx = NULL;
768
gd1_mgmt_cluster_lock_req lock_req = {
771
glusterd_op_lock_ctx_t *ctx = NULL;
772
glusterd_op_sm_event_type_t op = GD_OP_EVENT_LOCK;
773
glusterd_op_info_t txn_op_info = {
776
glusterd_conf_t *priv = NULL;
777
uuid_t *txn_id = NULL;
778
xlator_t *this = THIS;
780
priv = this->private;
784
txn_id = &priv->global_txn_id;
786
ret = xdr_to_generic(req->msg[0], &lock_req,
787
(xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
789
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
790
"Failed to decode lock "
791
"request received from peer");
792
req->rpc_err = GARBAGE_ARGS;
796
gf_msg_debug(this->name, 0, "Received LOCK from uuid: %s",
797
uuid_utoa(lock_req.uuid));
800
ret = (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL);
803
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
805
"belong to the cluster. Ignoring request.",
806
uuid_utoa(lock_req.uuid));
811
ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
818
gf_uuid_copy(ctx->uuid, lock_req.uuid);
825
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
826
"Unable to set new dict");
830
glusterd_txn_opinfo_init(&txn_op_info, 0, (int *)&op, op_ctx, req);
832
ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
834
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
835
"Unable to set transaction's opinfo");
836
dict_unref(txn_op_info.op_ctx);
840
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_LOCK, txn_id, ctx);
842
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
843
"Failed to inject event GD_OP_EVENT_LOCK");
846
gf_msg_debug(this->name, 0, "Returning %d", ret);
848
glusterd_friend_sm();
858
glusterd_handle_cluster_lock(rpcsvc_request_t *req)
860
return glusterd_big_locked_handler(req, __glusterd_handle_cluster_lock);
864
glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid,
865
char *buf_val, size_t buf_len,
866
gf_gld_mem_types_t mem_type,
867
glusterd_req_ctx_t **req_ctx_out)
873
glusterd_req_ctx_t *req_ctx = NULL;
875
xlator_t *this = THIS;
877
gf_uuid_unparse(uuid, str);
878
gf_msg_debug(this->name, 0, "Received op from uuid %s", str);
882
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
886
req_ctx = GF_CALLOC(1, sizeof(*req_ctx), mem_type);
888
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
892
gf_uuid_copy(req_ctx->uuid, uuid);
894
ret = dict_unserialize(buf_val, buf_len, &dict);
896
gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
901
req_ctx->dict = dict;
902
req_ctx->req = rpc_req;
903
*req_ctx_out = req_ctx;
915
__glusterd_handle_stage_op(rpcsvc_request_t *req)
918
glusterd_req_ctx_t *req_ctx = NULL;
919
gd1_mgmt_stage_op_req op_req = {
922
xlator_t *this = THIS;
923
uuid_t *txn_id = NULL;
924
glusterd_op_info_t txn_op_info = {
927
glusterd_op_sm_state_t state = GD_OP_STATE_DEFAULT;
928
glusterd_conf_t *priv = NULL;
930
priv = this->private;
934
txn_id = &priv->global_txn_id;
936
ret = xdr_to_generic(req->msg[0], &op_req,
937
(xdrproc_t)xdr_gd1_mgmt_stage_op_req);
939
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
940
"Failed to decode stage "
941
"request received from peer");
942
req->rpc_err = GARBAGE_ARGS;
946
ret = glusterd_req_ctx_create(req, op_req.op, op_req.uuid,
947
op_req.buf.buf_val, op_req.buf.buf_len,
948
gf_gld_mt_op_stage_ctx_t, &req_ctx);
950
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_CTX_CREATE_FAIL,
951
"Failed to create req_ctx");
955
ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
956
gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
959
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
962
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
964
"belong to the cluster. Ignoring request.",
965
uuid_utoa(op_req.uuid));
977
ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
979
gf_msg_debug(this->name, 0, "No transaction's opinfo set");
981
state = GD_OP_STATE_LOCKED;
982
glusterd_txn_opinfo_init(&txn_op_info, state, &op_req.op, req_ctx->dict,
985
if (req_ctx->op != GD_OP_GSYNC_SET)
986
txn_op_info.skip_locking = _gf_true;
987
ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
989
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
990
"Unable to set transaction's opinfo");
991
dict_unref(req_ctx->dict);
996
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_STAGE_OP, txn_id, req_ctx);
998
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
999
"Failed to inject event GD_OP_EVENT_STAGE_OP");
1002
free(op_req.buf.buf_val);
1003
glusterd_friend_sm();
1009
glusterd_handle_stage_op(rpcsvc_request_t *req)
1011
return glusterd_big_locked_handler(req, __glusterd_handle_stage_op);
1015
__glusterd_handle_commit_op(rpcsvc_request_t *req)
1018
glusterd_req_ctx_t *req_ctx = NULL;
1019
gd1_mgmt_commit_op_req op_req = {
1022
xlator_t *this = THIS;
1023
uuid_t *txn_id = NULL;
1024
glusterd_conf_t *priv = NULL;
1026
priv = this->private;
1030
txn_id = &priv->global_txn_id;
1032
ret = xdr_to_generic(req->msg[0], &op_req,
1033
(xdrproc_t)xdr_gd1_mgmt_commit_op_req);
1035
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1036
"Failed to decode commit "
1037
"request received from peer");
1038
req->rpc_err = GARBAGE_ARGS;
1043
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
1046
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
1048
"belong to the cluster. Ignoring request.",
1049
uuid_utoa(op_req.uuid));
1055
GF_ASSERT(sizeof(gd1_mgmt_commit_op_req) == sizeof(gd1_mgmt_stage_op_req));
1056
ret = glusterd_req_ctx_create(req, op_req.op, op_req.uuid,
1057
op_req.buf.buf_val, op_req.buf.buf_len,
1058
gf_gld_mt_op_commit_ctx_t, &req_ctx);
1062
ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
1063
gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
1065
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_COMMIT_OP, txn_id, req_ctx);
1068
free(op_req.buf.buf_val);
1069
glusterd_friend_sm();
1075
glusterd_handle_commit_op(rpcsvc_request_t *req)
1077
return glusterd_big_locked_handler(req, __glusterd_handle_commit_op);
1081
__glusterd_handle_cli_probe(rpcsvc_request_t *req)
1084
gf_cli_req cli_req = {
1089
glusterd_peerinfo_t *peerinfo = NULL;
1090
gf_boolean_t run_fsm = _gf_true;
1091
xlator_t *this = THIS;
1092
char *bind_name = NULL;
1093
dict_t *dict = NULL;
1094
char *hostname = NULL;
1100
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1103
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1104
"xdr decoding error");
1105
req->rpc_err = GARBAGE_ARGS;
1109
if (cli_req.dict.dict_len) {
1112
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1115
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1117
"unserialize req-buffer to dictionary");
1122
ret = dict_get_str(dict, "hostname", &hostname);
1124
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1125
"Failed to get hostname");
1129
ret = dict_get_int32(dict, "port", &port);
1131
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PORT_NOTFOUND_IN_DICT,
1132
"Failed to get port");
1136
if (glusterd_is_any_volume_in_server_quorum(this) &&
1137
!does_gd_meet_server_quorum(this)) {
1138
glusterd_xfer_cli_probe_resp(req, -1, GF_PROBE_QUORUM_NOT_MET, NULL,
1139
hostname, port, dict);
1140
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
1141
"Server quorum not met. Rejecting operation.");
1146
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
1147
"Received CLI probe req %s %d", hostname, port);
1149
if (dict_get_str(this->options, "transport.socket.bind-address",
1151
gf_msg_debug("glusterd", 0,
1152
"only checking probe address vs. bind address");
1153
ret = gf_is_same_address(bind_name, hostname);
1155
ret = glusterd_gf_is_local_addr(hostname);
1158
glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_LOCALHOST, NULL, hostname,
1166
peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
1167
ret = (peerinfo && gd_peer_has_address(peerinfo, hostname));
1172
gf_msg_debug("glusterd", 0,
1173
"Probe host %s port %d "
1176
glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_FRIEND, NULL, hostname,
1182
ret = glusterd_probe_begin(req, hostname, port, dict, &op_errno);
1184
if (ret == GLUSTERD_CONNECTION_AWAITED) {
1186
run_fsm = _gf_false;
1189
} else if (ret == -1) {
1190
glusterd_xfer_cli_probe_resp(req, -1, op_errno, NULL, hostname, port,
1196
free(cli_req.dict.dict_val);
1199
glusterd_friend_sm();
1207
glusterd_handle_cli_probe(rpcsvc_request_t *req)
1209
return glusterd_big_locked_handler(req, __glusterd_handle_cli_probe);
1219
glusterd_friend_contains_snap_bricks(glusterd_snap_t *snapinfo,
1223
glusterd_volinfo_t *volinfo = NULL;
1224
glusterd_brickinfo_t *brickinfo = NULL;
1227
GF_VALIDATE_OR_GOTO("glusterd", snapinfo, out);
1229
cds_list_for_each_entry(volinfo, &snapinfo->volumes, vol_list)
1231
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
1233
if (!gf_uuid_compare(brickinfo->uuid, friend_uuid)) {
1245
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
1250
__glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
1253
gf_cli_req cli_req = {
1260
xlator_t *this = THIS;
1261
glusterd_conf_t *priv = NULL;
1262
dict_t *dict = NULL;
1263
char *hostname = NULL;
1266
glusterd_volinfo_t *volinfo = NULL;
1267
glusterd_volinfo_t *tmp = NULL;
1268
glusterd_snap_t *snapinfo = NULL;
1269
glusterd_snap_t *tmpsnap = NULL;
1270
gf_boolean_t need_free = _gf_false;
1272
priv = this->private;
1276
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1279
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1281
"request received from cli");
1282
req->rpc_err = GARBAGE_ARGS;
1286
if (cli_req.dict.dict_len) {
1290
need_free = _gf_true;
1296
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1299
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1301
"unserialize req-buffer to dictionary");
1306
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
1307
"Received CLI deprobe req");
1309
ret = dict_get_str(dict, "hostname", &hostname);
1311
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1312
"Failed to get hostname");
1316
ret = dict_get_int32(dict, "port", &port);
1318
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PORT_NOTFOUND_IN_DICT,
1319
"Failed to get port");
1322
ret = dict_get_int32(dict, "flags", &flags);
1324
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
1325
"Failed to get flags");
1329
ret = glusterd_hostname_to_uuid(hostname, uuid);
1331
op_errno = GF_DEPROBE_NOT_FRIEND;
1335
if (!gf_uuid_compare(uuid, MY_UUID)) {
1336
op_errno = GF_DEPROBE_LOCALHOST;
1341
if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1344
if (!glusterd_chk_peers_connected_befriended(uuid)) {
1346
op_errno = GF_DEPROBE_FRIEND_DOWN;
1355
cds_list_for_each_entry_safe(volinfo, tmp, &priv->volumes, vol_list)
1357
ret = glusterd_friend_contains_vol_bricks(volinfo, uuid);
1359
op_errno = GF_DEPROBE_BRICK_EXIST;
1364
cds_list_for_each_entry_safe(snapinfo, tmpsnap, &priv->snapshots, snap_list)
1366
ret = glusterd_friend_contains_snap_bricks(snapinfo, uuid);
1368
op_errno = GF_DEPROBE_SNAP_BRICK_EXIST;
1372
if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1373
if (glusterd_is_any_volume_in_server_quorum(this) &&
1374
!does_gd_meet_server_quorum(this)) {
1375
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
1376
"Server quorum not met. Rejecting operation.");
1378
op_errno = GF_DEPROBE_QUORUM_NOT_MET;
1383
if (!gf_uuid_is_null(uuid)) {
1384
ret = glusterd_deprobe_begin(req, hostname, port, uuid, dict,
1387
ret = glusterd_deprobe_begin(req, hostname, port, NULL, dict,
1391
need_free = _gf_false;
1394
free(cli_req.dict.dict_val);
1397
ret = glusterd_xfer_cli_deprobe_resp(req, ret, op_errno, NULL, hostname,
1404
glusterd_friend_sm();
1411
glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
1413
return glusterd_big_locked_handler(req, __glusterd_handle_cli_deprobe);
1417
__glusterd_handle_cli_list_friends(rpcsvc_request_t *req)
1420
gf1_cli_peer_list_req cli_req = {
1423
dict_t *dict = NULL;
1427
ret = xdr_to_generic(req->msg[0], &cli_req,
1428
(xdrproc_t)xdr_gf1_cli_peer_list_req);
1431
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1433
"request received from cli");
1434
req->rpc_err = GARBAGE_ARGS;
1438
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
1439
"Received cli list req");
1441
if (cli_req.dict.dict_len) {
1445
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1448
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1450
"unserialize req-buffer to dictionary");
1453
dict->extra_stdfree = cli_req.dict.dict_val;
1457
ret = glusterd_list_friends(req, dict, cli_req.flags);
1463
glusterd_friend_sm();
1470
glusterd_handle_cli_list_friends(rpcsvc_request_t *req)
1472
return glusterd_big_locked_handler(req, __glusterd_handle_cli_list_friends);
1476
__glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
1479
gf_cli_req cli_req = {{
1483
dict_t *dict = NULL;
1484
xlator_t *this = THIS;
1488
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1491
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1493
"request received from cli");
1494
req->rpc_err = GARBAGE_ARGS;
1498
gf_msg(this->name, GF_LOG_DEBUG, 0, GD_MSG_GET_VOL_REQ_RCVD,
1499
"Received get vol req");
1501
if (cli_req.dict.dict_len) {
1505
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1508
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1510
"unserialize req-buffer to dictionary");
1513
dict->extra_stdfree = cli_req.dict.dict_val;
1517
ret = dict_get_int32(dict, "flags", &flags);
1519
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
1520
"failed to get flags");
1523
ret = glusterd_get_volumes(req, dict, flags);
1529
glusterd_friend_sm();
1536
glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
1538
return glusterd_big_locked_handler(req, __glusterd_handle_cli_get_volume);
1542
__glusterd_handle_cli_uuid_reset(rpcsvc_request_t *req)
1545
dict_t *dict = NULL;
1546
xlator_t *this = THIS;
1547
glusterd_conf_t *priv = NULL;
1552
gf_cli_req cli_req = {{
1555
char msg_str[128] = {
1561
priv = this->private;
1564
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1567
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1569
"request received from cli");
1570
req->rpc_err = GARBAGE_ARGS;
1574
gf_msg_debug("glusterd", 0, "Received uuid reset req");
1576
if (cli_req.dict.dict_len) {
1580
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1583
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1585
"unserialize req-buffer to dictionary");
1586
snprintf(msg_str, sizeof(msg_str),
1591
dict->extra_stdfree = cli_req.dict.dict_val;
1600
if (!cds_list_empty(&priv->volumes)) {
1601
snprintf(msg_str, sizeof(msg_str),
1602
"volumes are already "
1603
"present in the cluster. Resetting uuid is not "
1605
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLS_ALREADY_PRESENT, "%s",
1611
if (!cds_list_empty(&priv->peers)) {
1612
snprintf(msg_str, sizeof(msg_str),
1613
"trusted storage pool "
1614
"has been already formed. Please detach this peer "
1615
"from the pool and reset its uuid.");
1616
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_TSP_ALREADY_FORMED, "%s",
1621
gf_uuid_copy(uuid, priv->uuid);
1622
ret = glusterd_uuid_generate_save();
1624
if (!gf_uuid_compare(uuid, MY_UUID)) {
1625
snprintf(msg_str, sizeof(msg_str),
1626
"old uuid and the new uuid"
1627
" are same. Try gluster peer reset again");
1628
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUIDS_SAME_RETRY, "%s",
1637
if (msg_str[0] == '\0')
1638
snprintf(msg_str, sizeof(msg_str),
1641
rsp.op_errstr = msg_str;
1647
glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
1653
glusterd_handle_cli_uuid_reset(rpcsvc_request_t *req)
1655
return glusterd_big_locked_handler(req, __glusterd_handle_cli_uuid_reset);
1659
__glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
1662
dict_t *dict = NULL;
1663
dict_t *rsp_dict = NULL;
1664
xlator_t *this = THIS;
1668
gf_cli_req cli_req = {{
1671
char err_str[64] = {
1674
char uuid_str[64] = {
1680
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1682
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1684
"request received from cli");
1685
req->rpc_err = GARBAGE_ARGS;
1689
gf_msg_debug("glusterd", 0, "Received uuid get req");
1691
if (cli_req.dict.dict_len) {
1694
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
1700
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1703
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1705
"unserialize req-buffer to dictionary");
1706
snprintf(err_str, sizeof(err_str),
1712
dict->extra_stdfree = cli_req.dict.dict_val;
1716
rsp_dict = dict_new();
1718
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
1723
uuid_utoa_r(MY_UUID, uuid_str);
1724
ret = dict_set_str_sizen(rsp_dict, "uuid", uuid_str);
1726
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1727
"Failed to set uuid in "
1732
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
1733
&rsp.dict.dict_len);
1735
gf_smsg(this->name, GF_LOG_ERROR, errno,
1736
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1743
if (err_str[0] == '\0')
1744
snprintf(err_str, sizeof(err_str),
1747
rsp.op_errstr = err_str;
1753
glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
1756
dict_unref(rsp_dict);
1757
GF_FREE(rsp.dict.dict_val);
1762
glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
1764
return glusterd_big_locked_handler(req, __glusterd_handle_cli_uuid_get);
1768
__glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
1771
dict_t *dict = NULL;
1772
glusterd_conf_t *priv = NULL;
1773
glusterd_volinfo_t *volinfo = NULL;
1785
priv = THIS->private;
1790
gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
1794
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
1796
keylen = snprintf(key, sizeof(key), "volume%d", count);
1797
ret = dict_set_strn(dict, key, keylen, volinfo->volname);
1803
ret = dict_set_int32_sizen(dict, "count", count);
1805
gf_smsg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
1810
ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
1811
&rsp.dict.dict_len);
1820
rsp.op_errstr = "Error listing volumes";
1824
glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
1830
GF_FREE(rsp.dict.dict_val);
1832
glusterd_friend_sm();
1839
glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
1841
return glusterd_big_locked_handler(req, __glusterd_handle_cli_list_volume);
1845
glusterd_op_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
1846
char *err_str, size_t err_len)
1850
ret = glusterd_op_txn_begin(req, op, ctx, err_str, err_len);
1856
__glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
1859
gf_cli_req cli_req = {{
1862
dict_t *dict = NULL;
1863
glusterd_op_t cli_op = GD_OP_GANESHA;
1864
char *op_errstr = NULL;
1865
char err_str[2048] = {
1868
xlator_t *this = THIS;
1872
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1874
snprintf(err_str, sizeof(err_str),
1876
"request received from cli");
1877
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
1879
req->rpc_err = GARBAGE_ARGS;
1883
if (cli_req.dict.dict_len) {
1887
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
1893
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1896
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1898
"unserialize req-buffer to dictionary");
1899
snprintf(err_str, sizeof(err_str),
1904
dict->extra_stdfree = cli_req.dict.dict_val;
1908
gf_msg_trace(this->name, 0, "Received global option request");
1910
ret = glusterd_op_begin_synctask(req, GD_OP_GANESHA, dict);
1913
if (err_str[0] == '\0')
1914
snprintf(err_str, sizeof(err_str), "Operation failed");
1915
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
1926
glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
1928
return glusterd_big_locked_handler(req, __glusterd_handle_ganesha_cmd);
1932
__glusterd_handle_reset_volume(rpcsvc_request_t *req)
1935
gf_cli_req cli_req = {{
1938
dict_t *dict = NULL;
1939
glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
1940
char *volname = NULL;
1941
char err_str[64] = {
1944
xlator_t *this = THIS;
1948
gf_msg(this->name, GF_LOG_INFO, 0, 0, "Received reset vol req");
1950
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1952
snprintf(err_str, sizeof(err_str),
1953
"Failed to decode request "
1954
"received from cli");
1955
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
1957
req->rpc_err = GARBAGE_ARGS;
1961
if (cli_req.dict.dict_len) {
1965
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1968
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1970
"unserialize req-buffer to dictionary");
1971
snprintf(err_str, sizeof(err_str),
1976
dict->extra_stdfree = cli_req.dict.dict_val;
1980
ret = dict_get_str(dict, "volname", &volname);
1982
snprintf(err_str, sizeof(err_str),
1983
"Failed to get volume "
1985
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
1989
gf_msg_debug(this->name, 0,
1990
"Received volume reset request for "
1994
ret = glusterd_op_begin_synctask(req, GD_OP_RESET_VOLUME, dict);
1998
if (err_str[0] == '\0')
1999
snprintf(err_str, sizeof(err_str), "Operation failed");
2000
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
2007
glusterd_handle_reset_volume(rpcsvc_request_t *req)
2009
return glusterd_big_locked_handler(req, __glusterd_handle_reset_volume);
2013
__glusterd_handle_set_volume(rpcsvc_request_t *req)
2016
gf_cli_req cli_req = {{
2019
dict_t *dict = NULL;
2020
glusterd_op_t cli_op = GD_OP_SET_VOLUME;
2023
char *volname = NULL;
2024
char *op_errstr = NULL;
2025
gf_boolean_t help = _gf_false;
2026
char err_str[2048] = {
2029
xlator_t *this = THIS;
2033
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2035
snprintf(err_str, sizeof(err_str),
2037
"request received from cli");
2038
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
2040
req->rpc_err = GARBAGE_ARGS;
2044
if (cli_req.dict.dict_len) {
2048
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
2051
gf_msg(this->name, GF_LOG_ERROR, errno,
2052
GD_MSG_DICT_UNSERIALIZE_FAIL,
2054
"unserialize req-buffer to dictionary");
2055
snprintf(err_str, sizeof(err_str),
2060
dict->extra_stdfree = cli_req.dict.dict_val;
2064
ret = dict_get_str(dict, "volname", &volname);
2066
snprintf(err_str, sizeof(err_str),
2067
"Failed to get volume "
2068
"name while handling volume set command");
2069
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
2074
if (strcmp(volname, "help") == 0 || strcmp(volname, "help-xml") == 0) {
2075
ret = glusterd_volset_help(dict, &op_errstr);
2080
ret = dict_get_str(dict, "key1", &key);
2082
snprintf(err_str, sizeof(err_str),
2083
"Failed to get key while"
2084
" handling volume set for %s",
2086
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
2091
ret = dict_get_str(dict, "value1", &value);
2093
snprintf(err_str, sizeof(err_str),
2094
"Failed to get value while"
2095
" handling volume set for %s",
2097
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
2101
gf_msg_debug(this->name, 0,
2102
"Received volume set request for "
2106
ret = glusterd_op_begin_synctask(req, GD_OP_SET_VOLUME, dict);
2110
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict,
2111
(op_errstr) ? op_errstr : "");
2113
if (err_str[0] == '\0')
2114
snprintf(err_str, sizeof(err_str), "Operation failed");
2115
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
2124
glusterd_handle_set_volume(rpcsvc_request_t *req)
2126
return glusterd_big_locked_handler(req, __glusterd_handle_set_volume);
2130
__glusterd_handle_sync_volume(rpcsvc_request_t *req)
2133
gf_cli_req cli_req = {{
2136
dict_t *dict = NULL;
2137
gf_cli_rsp cli_rsp = {0.};
2141
char *volname = NULL;
2142
gf1_cli_sync_volume flags = 0;
2143
char *hostname = NULL;
2144
xlator_t *this = THIS;
2148
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2151
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
2153
"request received from cli");
2154
req->rpc_err = GARBAGE_ARGS;
2158
if (cli_req.dict.dict_len) {
2162
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
2165
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
2167
"unserialize req-buffer to dictionary");
2168
snprintf(msg, sizeof(msg),
2169
"Unable to decode the "
2173
dict->extra_stdfree = cli_req.dict.dict_val;
2177
ret = dict_get_str(dict, "hostname", &hostname);
2179
snprintf(msg, sizeof(msg), "Failed to get hostname");
2180
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
2185
ret = dict_get_str(dict, "volname", &volname);
2187
ret = dict_get_int32(dict, "flags", (int32_t *)&flags);
2189
snprintf(msg, sizeof(msg), "Failed to get volume name or flags");
2190
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
2196
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_SYNC_REQ_RCVD,
2197
"Received volume sync req "
2199
(flags & GF_CLI_SYNC_ALL) ? "all" : volname);
2201
if (glusterd_gf_is_local_addr(hostname)) {
2203
snprintf(msg, sizeof(msg),
2204
"sync from localhost"
2206
gf_msg(this->name, GF_LOG_ERROR, 0,
2207
GD_MSG_SYNC_FROM_LOCALHOST_UNALLOWED, "%s", msg);
2211
ret = glusterd_op_begin_synctask(req, GD_OP_SYNC_VOLUME, dict);
2215
cli_rsp.op_ret = -1;
2216
cli_rsp.op_errstr = msg;
2218
snprintf(msg, sizeof(msg), "Operation failed");
2219
glusterd_to_cli(req, &cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
2229
glusterd_handle_sync_volume(rpcsvc_request_t *req)
2231
return glusterd_big_locked_handler(req, __glusterd_handle_sync_volume);
2235
glusterd_fsm_log_send_resp(rpcsvc_request_t *req, int op_ret, char *op_errstr,
2239
gf1_cli_fsm_log_rsp rsp = {0};
2242
GF_ASSERT(op_errstr);
2244
rsp.op_ret = op_ret;
2245
rsp.op_errstr = op_errstr;
2246
if (rsp.op_ret == 0) {
2247
ret = dict_allocate_and_serialize(dict, &rsp.fsm_log.fsm_log_val,
2248
&rsp.fsm_log.fsm_log_len);
2250
gf_smsg("glusterd", GF_LOG_ERROR, errno,
2251
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
2256
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2257
(xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
2258
GF_FREE(rsp.fsm_log.fsm_log_val);
2260
gf_msg_debug("glusterd", 0, "Responded, ret: %d", ret);
2266
glusterd_sm_tr_log_transition_add_to_dict(dict_t *dict,
2267
glusterd_sm_tr_log_t *log, int i,
2273
char timestr[GF_TIMESTR_SIZE] = "";
2279
keylen = snprintf(key, sizeof(key), "log%d-old-state", count);
2280
str = log->state_name_get(log->transitions[i].old_state);
2281
ret = dict_set_strn(dict, key, keylen, str);
2285
keylen = snprintf(key, sizeof(key), "log%d-event", count);
2286
str = log->event_name_get(log->transitions[i].event);
2287
ret = dict_set_strn(dict, key, keylen, str);
2291
keylen = snprintf(key, sizeof(key), "log%d-new-state", count);
2292
str = log->state_name_get(log->transitions[i].new_state);
2293
ret = dict_set_strn(dict, key, keylen, str);
2297
snprintf(key, sizeof(key), "log%d-time", count);
2298
gf_time_fmt_FT(timestr, sizeof timestr, log->transitions[i].time);
2299
ret = dict_set_dynstr_with_alloc(dict, key, timestr);
2304
if (key[0] != '\0' && ret != 0)
2305
gf_smsg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2306
"Key=%s", key, NULL);
2307
gf_msg_debug("glusterd", 0, "returning %d", ret);
2312
glusterd_sm_tr_log_add_to_dict(dict_t *dict, glusterd_sm_tr_log_t *circular_log)
2320
glusterd_sm_tr_log_t *log = NULL;
2324
GF_ASSERT(circular_log);
2330
if (log->count == log->size)
2331
start = log->current + 1;
2333
end = start + log->count;
2334
for (i = start; i < end; i++, count++) {
2335
index = i % log->count;
2336
ret = glusterd_sm_tr_log_transition_add_to_dict(dict, log, index,
2342
ret = snprintf(key, sizeof(key), "count");
2343
ret = dict_set_int32n(dict, key, ret, log->count);
2346
gf_msg_debug("glusterd", 0, "returning %d", ret);
2351
__glusterd_handle_fsm_log(rpcsvc_request_t *req)
2354
gf1_cli_fsm_log_req cli_req = {
2357
dict_t *dict = NULL;
2358
xlator_t *this = THIS;
2359
glusterd_conf_t *conf = NULL;
2360
char msg[2048] = {0};
2361
glusterd_peerinfo_t *peerinfo = NULL;
2365
ret = xdr_to_generic(req->msg[0], &cli_req,
2366
(xdrproc_t)xdr_gf1_cli_fsm_log_req);
2369
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2371
"request received from client.");
2372
req->rpc_err = GARBAGE_ARGS;
2373
snprintf(msg, sizeof(msg), "Garbage request");
2379
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
2384
if (strcmp("", cli_req.name) == 0) {
2385
conf = this->private;
2386
ret = glusterd_sm_tr_log_add_to_dict(dict, &conf->op_sm_log);
2390
peerinfo = glusterd_peerinfo_find_by_hostname(cli_req.name);
2394
snprintf(msg, sizeof(msg), "%s is not a peer", cli_req.name);
2396
ret = glusterd_sm_tr_log_add_to_dict(dict, &peerinfo->sm_log);
2402
(void)glusterd_fsm_log_send_resp(req, ret, msg, dict);
2407
glusterd_friend_sm();
2414
glusterd_handle_fsm_log(rpcsvc_request_t *req)
2416
return glusterd_big_locked_handler(req, __glusterd_handle_fsm_log);
2420
glusterd_op_lock_send_resp(rpcsvc_request_t *req, int32_t status)
2422
gd1_mgmt_cluster_lock_rsp rsp = {
2428
glusterd_get_uuid(&rsp.uuid);
2429
rsp.op_ret = status;
2431
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2432
(xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
2434
gf_msg_debug(THIS->name, 0, "Responded to lock, ret: %d", ret);
2440
glusterd_op_unlock_send_resp(rpcsvc_request_t *req, int32_t status)
2442
gd1_mgmt_cluster_unlock_rsp rsp = {
2448
rsp.op_ret = status;
2449
glusterd_get_uuid(&rsp.uuid);
2451
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2452
(xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
2454
gf_msg_debug(THIS->name, 0, "Responded to unlock, ret: %d", ret);
2460
glusterd_op_mgmt_v3_lock_send_resp(rpcsvc_request_t *req, uuid_t *txn_id,
2463
gd1_mgmt_v3_lock_rsp rsp = {
2470
glusterd_get_uuid(&rsp.uuid);
2471
rsp.op_ret = status;
2473
rsp.op_errno = errno;
2474
gf_uuid_copy(rsp.txn_id, *txn_id);
2476
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2477
(xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
2479
gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 lock, ret: %d", ret);
2485
glusterd_op_mgmt_v3_unlock_send_resp(rpcsvc_request_t *req, uuid_t *txn_id,
2488
gd1_mgmt_v3_unlock_rsp rsp = {
2495
rsp.op_ret = status;
2497
rsp.op_errno = errno;
2498
glusterd_get_uuid(&rsp.uuid);
2499
gf_uuid_copy(rsp.txn_id, *txn_id);
2501
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2502
(xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
2504
gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 unlock, ret: %d", ret);
2510
__glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
2512
gd1_mgmt_cluster_unlock_req unlock_req = {
2516
glusterd_op_lock_ctx_t *ctx = NULL;
2517
xlator_t *this = THIS;
2518
uuid_t *txn_id = NULL;
2519
glusterd_conf_t *priv = NULL;
2521
priv = this->private;
2525
txn_id = &priv->global_txn_id;
2527
ret = xdr_to_generic(req->msg[0], &unlock_req,
2528
(xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
2530
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2531
"Failed to decode unlock "
2532
"request received from peer");
2533
req->rpc_err = GARBAGE_ARGS;
2537
gf_msg_debug(this->name, 0, "Received UNLOCK from uuid: %s",
2538
uuid_utoa(unlock_req.uuid));
2541
ret = (glusterd_peerinfo_find_by_uuid(unlock_req.uuid) == NULL);
2544
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
2546
"belong to the cluster. Ignoring request.",
2547
uuid_utoa(unlock_req.uuid));
2552
ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
2556
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
2560
gf_uuid_copy(ctx->uuid, unlock_req.uuid);
2564
ret = glusterd_op_sm_inject_event(GD_OP_EVENT_UNLOCK, txn_id, ctx);
2567
glusterd_friend_sm();
2574
glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
2576
return glusterd_big_locked_handler(req, __glusterd_handle_cluster_unlock);
2580
glusterd_op_stage_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
2581
char *op_errstr, dict_t *rsp_dict)
2583
gd1_mgmt_stage_op_rsp rsp = {
2587
xlator_t *this = THIS;
2591
rsp.op_ret = status;
2592
glusterd_get_uuid(&rsp.uuid);
2595
rsp.op_errstr = op_errstr;
2599
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
2600
&rsp.dict.dict_len);
2602
gf_smsg(this->name, GF_LOG_ERROR, errno,
2603
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
2607
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2608
(xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
2610
gf_msg_debug(this->name, 0, "Responded to stage, ret: %d", ret);
2611
GF_FREE(rsp.dict.dict_val);
2617
glusterd_op_commit_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
2618
char *op_errstr, dict_t *rsp_dict)
2620
gd1_mgmt_commit_op_rsp rsp = {
2624
xlator_t *this = THIS;
2627
rsp.op_ret = status;
2628
glusterd_get_uuid(&rsp.uuid);
2632
rsp.op_errstr = op_errstr;
2637
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
2638
&rsp.dict.dict_len);
2640
gf_smsg(this->name, GF_LOG_ERROR, errno,
2641
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
2646
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2647
(xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
2649
gf_msg_debug(this->name, 0, "Responded to commit, ret: %d", ret);
2652
GF_FREE(rsp.dict.dict_val);
2657
__glusterd_handle_incoming_friend_req(rpcsvc_request_t *req)
2660
gd1_mgmt_friend_req friend_req = {
2663
gf_boolean_t run_fsm = _gf_true;
2666
ret = xdr_to_generic(req->msg[0], &friend_req,
2667
(xdrproc_t)xdr_gd1_mgmt_friend_req);
2670
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2672
"request received from friend");
2673
req->rpc_err = GARBAGE_ARGS;
2677
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_RCVD,
2678
"Received probe from uuid: %s", uuid_utoa(friend_req.uuid));
2679
ret = glusterd_handle_friend_req(req, friend_req.uuid, friend_req.hostname,
2680
friend_req.port, &friend_req);
2682
if (ret == GLUSTERD_CONNECTION_AWAITED) {
2684
run_fsm = _gf_false;
2689
free(friend_req.hostname);
2692
glusterd_friend_sm();
2700
glusterd_handle_incoming_friend_req(rpcsvc_request_t *req)
2702
return glusterd_big_locked_handler(req,
2703
__glusterd_handle_incoming_friend_req);
2707
__glusterd_handle_incoming_unfriend_req(rpcsvc_request_t *req)
2710
gd1_mgmt_friend_req friend_req = {
2713
char remote_hostname[UNIX_PATH_MAX + 1] = {
2718
ret = xdr_to_generic(req->msg[0], &friend_req,
2719
(xdrproc_t)xdr_gd1_mgmt_friend_req);
2722
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2724
"request received.");
2725
req->rpc_err = GARBAGE_ARGS;
2729
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_UNFRIEND_REQ_RCVD,
2730
"Received unfriend from uuid: %s", uuid_utoa(friend_req.uuid));
2732
ret = glusterd_remote_hostname_get(req, remote_hostname,
2733
sizeof(remote_hostname));
2735
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
2736
"Unable to get the remote hostname");
2739
ret = glusterd_handle_unfriend_req(req, friend_req.uuid, remote_hostname,
2743
free(friend_req.hostname);
2744
free(friend_req.vols.vols_val);
2746
glusterd_friend_sm();
2753
glusterd_handle_incoming_unfriend_req(rpcsvc_request_t *req)
2755
return glusterd_big_locked_handler(req,
2756
__glusterd_handle_incoming_unfriend_req);
2760
glusterd_handle_friend_update_delete(dict_t *dict)
2762
char *hostname = NULL;
2767
ret = dict_get_str(dict, "hostname", &hostname);
2771
ret = glusterd_friend_remove(NULL, hostname);
2774
gf_msg_debug("glusterd", 0, "Returning %d", ret);
2779
glusterd_peer_hostname_update(glusterd_peerinfo_t *peerinfo,
2780
const char *hostname, gf_boolean_t store_update)
2784
GF_ASSERT(peerinfo);
2785
GF_ASSERT(hostname);
2787
ret = gd_add_address_to_peer(peerinfo, hostname, _gf_true);
2789
gf_msg(THIS->name, GF_LOG_ERROR, 0,
2790
GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
2791
"Couldn't add address to the peer info");
2796
ret = glusterd_store_peerinfo(peerinfo);
2798
if (peerinfo->hostname != NULL) {
2799
GF_FREE(peerinfo->hostname);
2801
peerinfo->hostname = gf_strdup(hostname);
2802
if (peerinfo->hostname == NULL) {
2807
if (peerinfo->rpc == NULL)
2810
char *remote_hostname = NULL;
2811
remote_hostname = gf_strdup(hostname);
2812
if (remote_hostname == NULL) {
2816
ret = dict_set_dynstr_sizen(peerinfo->rpc->conn.trans->options,
2817
"remote-host", remote_hostname);
2819
gf_msg_debug(THIS->name, 0, "failed to set remote-host with %s",
2821
GF_FREE(remote_hostname);
2824
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
2829
__glusterd_handle_friend_update(rpcsvc_request_t *req)
2832
gd1_mgmt_friend_update friend_req = {
2835
glusterd_peerinfo_t *peerinfo = NULL;
2836
xlator_t *this = THIS;
2837
gd1_mgmt_friend_update_rsp rsp = {
2840
dict_t *dict = NULL;
2845
char *uuid_buf = NULL;
2851
glusterd_peerctx_args_t args = {0};
2856
ret = xdr_to_generic(req->msg[0], &friend_req,
2857
(xdrproc_t)xdr_gd1_mgmt_friend_update);
2860
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2862
"request received");
2863
req->rpc_err = GARBAGE_ARGS;
2869
if (glusterd_peerinfo_find(friend_req.uuid, NULL) == NULL) {
2874
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
2875
"Received friend update request "
2876
"from unknown peer %s",
2877
uuid_utoa(friend_req.uuid));
2878
gf_event(EVENT_UNKNOWN_PEER, "peer=%s", uuid_utoa(friend_req.uuid));
2882
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_FRIEND_UPDATE_RCVD,
2883
"Received friend update from uuid: %s", uuid_utoa(friend_req.uuid));
2885
if (friend_req.friends.friends_len) {
2889
ret = dict_unserialize(friend_req.friends.friends_val,
2890
friend_req.friends.friends_len, &dict);
2892
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
2894
"unserialize req-buffer to dictionary");
2897
dict->extra_stdfree = friend_req.friends.friends_val;
2901
ret = dict_get_int32(dict, "count", &count);
2903
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
2908
ret = dict_get_int32(dict, "op", &op);
2910
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
2915
if (GD_FRIEND_UPDATE_DEL == op) {
2916
(void)glusterd_handle_friend_update_delete(dict);
2920
args.mode = GD_MODE_ON;
2921
while (i <= count) {
2922
keylen = snprintf(key, sizeof(key), "friend%d.uuid", i);
2923
ret = dict_get_strn(dict, key, keylen, &uuid_buf);
2926
gf_uuid_parse(uuid_buf, uuid);
2928
if (!gf_uuid_compare(uuid, MY_UUID)) {
2929
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_UUID_RECEIVED,
2930
"Received my uuid as Friend");
2935
snprintf(key, sizeof(key), "friend%d", i);
2938
peerinfo = glusterd_peerinfo_find(uuid, NULL);
2939
if (peerinfo == NULL) {
2943
peerinfo = gd_peerinfo_from_dict(dict, key);
2944
if (peerinfo == NULL) {
2946
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
2947
"Could not create peerinfo from dict "
2957
peerinfo->state = GD_FRIEND_STATE_BEFRIENDED;
2959
ret = glusterd_friend_add_from_peerinfo(peerinfo, 0, &args);
2964
ret = gd_update_peerinfo_from_dict(peerinfo, dict, key);
2966
gf_msg(this->name, GF_LOG_ERROR, 0,
2967
GD_MSG_PEER_INFO_UPDATE_FAIL,
2970
peerinfo->hostname);
2973
ret = glusterd_store_peerinfo(peerinfo);
2975
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
2976
"Failed to store peerinfo");
2977
gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s",
2978
peerinfo->hostname);
2991
gf_uuid_copy(rsp.uuid, MY_UUID);
2992
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2993
(xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
2995
if (!dict->extra_stdfree && friend_req.friends.friends_val)
2996
free(friend_req.friends.friends_val);
2999
free(friend_req.friends.friends_val);
3003
glusterd_peerinfo_cleanup(peerinfo);
3005
glusterd_friend_sm();
3012
glusterd_handle_friend_update(rpcsvc_request_t *req)
3014
return glusterd_big_locked_handler(req, __glusterd_handle_friend_update);
3018
__glusterd_handle_probe_query(rpcsvc_request_t *req)
3021
xlator_t *this = THIS;
3022
glusterd_conf_t *conf = NULL;
3023
gd1_mgmt_probe_req probe_req = {
3026
gd1_mgmt_probe_rsp rsp = {
3029
glusterd_peerinfo_t *peerinfo = NULL;
3030
glusterd_peerctx_args_t args = {0};
3032
char remote_hostname[UNIX_PATH_MAX + 1] = {
3038
ret = xdr_to_generic(req->msg[0], &probe_req,
3039
(xdrproc_t)xdr_gd1_mgmt_probe_req);
3042
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3043
"Failed to decode probe "
3045
req->rpc_err = GARBAGE_ARGS;
3049
conf = this->private;
3051
port = probe_req.port;
3053
port = GF_DEFAULT_BASE_PORT;
3055
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_RCVD,
3056
"Received probe from uuid: %s", uuid_utoa(probe_req.uuid));
3061
if (!gf_uuid_compare(probe_req.uuid, MY_UUID)) {
3062
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_UUIDS_SAME_RETRY,
3063
"Peer uuid %s is same as "
3064
"local uuid. Please check the uuid of both the peers "
3066
uuid_utoa(probe_req.uuid), GLUSTERD_DEFAULT_WORKDIR,
3067
GLUSTERD_INFO_FILE);
3069
rsp.op_errno = GF_PROBE_SAME_UUID;
3074
ret = glusterd_remote_hostname_get(req, remote_hostname,
3075
sizeof(remote_hostname));
3077
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
3078
"Unable to get the remote hostname");
3083
peerinfo = glusterd_peerinfo_find(probe_req.uuid, remote_hostname);
3084
if ((peerinfo == NULL) && (!cds_list_empty(&conf->peers))) {
3086
rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
3087
} else if (peerinfo == NULL) {
3088
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
3089
"Unable to find peerinfo"
3090
" for host: %s (%d)",
3091
remote_hostname, port);
3092
args.mode = GD_MODE_ON;
3093
ret = glusterd_friend_add(remote_hostname, port,
3094
GD_FRIEND_STATE_PROBE_RCVD, NULL, &peerinfo,
3097
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PEER_ADD_FAIL,
3098
"Failed to add peer %s", remote_hostname);
3099
rsp.op_errno = GF_PROBE_ADD_FAILED;
3105
gf_uuid_copy(rsp.uuid, MY_UUID);
3107
rsp.hostname = probe_req.hostname;
3110
glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3111
(xdrproc_t)xdr_gd1_mgmt_probe_rsp);
3114
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
3115
"Responded to %s, op_ret: %d, "
3116
"op_errno: %d, ret: %d",
3117
remote_hostname, rsp.op_ret, rsp.op_errno, ret);
3120
free(probe_req.hostname);
3122
glusterd_friend_sm();
3129
glusterd_handle_probe_query(rpcsvc_request_t *req)
3131
return glusterd_big_locked_handler(req, __glusterd_handle_probe_query);
3135
__glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
3138
gf_cli_req cli_req = {{
3141
dict_t *dict = NULL;
3142
glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME;
3143
char *volname = NULL;
3145
char err_str[64] = {
3148
xlator_t *this = THIS;
3149
glusterd_conf_t *conf = NULL;
3152
conf = this->private;
3153
GF_VALIDATE_OR_GOTO(this->name, conf, out);
3155
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
3158
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3160
"request received from cli");
3161
req->rpc_err = GARBAGE_ARGS;
3165
if (cli_req.dict.dict_len > 0) {
3168
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
3172
dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
3175
ret = dict_get_str(dict, "volname", &volname);
3177
snprintf(err_str, sizeof(err_str),
3178
"Unable to get volume "
3180
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
3185
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_PROFILE_REQ_RCVD,
3186
"Received volume profile req "
3189
ret = dict_get_int32(dict, "op", &op);
3191
snprintf(err_str, sizeof(err_str), "Unable to get operation");
3192
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
3197
if (conf->op_version < GD_OP_VERSION_6_0) {
3198
gf_msg_debug(this->name, 0,
3199
"The cluster is operating at "
3200
"version less than %d. Falling back "
3201
"to op-sm framework.",
3203
ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
3204
glusterd_friend_sm();
3207
ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(
3212
free(cli_req.dict.dict_val);
3215
if (err_str[0] == '\0')
3216
snprintf(err_str, sizeof(err_str), "Operation failed");
3217
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
3220
gf_msg_debug(this->name, 0, "Returning %d", ret);
3225
glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
3227
return glusterd_big_locked_handler(req,
3228
__glusterd_handle_cli_profile_volume);
3232
__glusterd_handle_getwd(rpcsvc_request_t *req)
3235
gf1_cli_getwd_rsp rsp = {
3238
glusterd_conf_t *priv = NULL;
3242
priv = THIS->private;
3245
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_GETWD_REQ_RCVD,
3246
"Received getwd req");
3248
rsp.wd = priv->workdir;
3250
glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3251
(xdrproc_t)xdr_gf1_cli_getwd_rsp);
3254
glusterd_friend_sm();
3261
glusterd_handle_getwd(rpcsvc_request_t *req)
3263
return glusterd_big_locked_handler(req, __glusterd_handle_getwd);
3267
__glusterd_handle_mount(rpcsvc_request_t *req)
3269
gf1_cli_mount_req mnt_req = {
3272
gf1_cli_mount_rsp rsp = {
3275
dict_t *dict = NULL;
3277
glusterd_conf_t *priv = NULL;
3280
priv = THIS->private;
3282
ret = xdr_to_generic(req->msg[0], &mnt_req,
3283
(xdrproc_t)xdr_gf1_cli_mount_req);
3286
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3287
"Failed to decode mount "
3288
"request received");
3289
req->rpc_err = GARBAGE_ARGS;
3291
rsp.op_errno = EINVAL;
3295
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_MOUNT_REQ_RCVD,
3296
"Received mount req");
3298
if (mnt_req.dict.dict_len) {
3302
ret = dict_unserialize(mnt_req.dict.dict_val, mnt_req.dict.dict_len,
3305
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
3307
"unserialize req-buffer to dictionary");
3309
rsp.op_errno = -EINVAL;
3312
dict->extra_stdfree = mnt_req.dict.dict_val;
3316
synclock_unlock(&priv->big_lock);
3317
rsp.op_ret = glusterd_do_mount(mnt_req.label, dict, &rsp.path,
3319
synclock_lock(&priv->big_lock);
3323
rsp.path = gf_strdup("");
3325
glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3326
(xdrproc_t)xdr_gf1_cli_mount_rsp);
3334
glusterd_friend_sm();
3341
glusterd_handle_mount(rpcsvc_request_t *req)
3343
return glusterd_big_locked_handler(req, __glusterd_handle_mount);
3347
__glusterd_handle_umount(rpcsvc_request_t *req)
3349
gf1_cli_umount_req umnt_req = {
3352
gf1_cli_umount_rsp rsp = {
3355
char *mountbroker_root = NULL;
3356
char mntp[PATH_MAX] = {
3364
xlator_t *this = THIS;
3365
gf_boolean_t dir_ok = _gf_false;
3368
glusterd_conf_t *priv = NULL;
3371
priv = this->private;
3373
ret = xdr_to_generic(req->msg[0], &umnt_req,
3374
(xdrproc_t)xdr_gf1_cli_umount_req);
3377
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3378
"Failed to decode umount"
3380
req->rpc_err = GARBAGE_ARGS;
3385
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_UMOUNT_REQ_RCVD,
3386
"Received umount req");
3388
if (dict_get_str(this->options, "mountbroker-root", &mountbroker_root) !=
3390
rsp.op_errno = ENOENT;
3395
path = gf_strdup(umnt_req.path);
3397
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
3398
rsp.op_errno = ENOMEM;
3402
pdir = dirname(path);
3403
t = strtail(pdir, mountbroker_root);
3404
if (t && *t == '/') {
3405
t = strtail(++t, MB_HIVE);
3411
rsp.op_errno = EACCES;
3415
synclock_unlock(&priv->big_lock);
3417
if (umnt_req.lazy) {
3418
rsp.op_ret = gf_umount_lazy(this->name, umnt_req.path, 0);
3421
runner_add_args(&runner, _PATH_UMOUNT, umnt_req.path, NULL);
3422
rsp.op_ret = runner_run(&runner);
3425
synclock_lock(&priv->big_lock);
3426
if (rsp.op_ret == 0) {
3427
if (realpath(umnt_req.path, mntp))
3431
rsp.op_errno = errno;
3433
if (sys_unlink(umnt_req.path) != 0) {
3435
rsp.op_errno = errno;
3443
glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3444
(xdrproc_t)xdr_gf1_cli_umount_rsp);
3447
glusterd_friend_sm();
3454
glusterd_handle_umount(rpcsvc_request_t *req)
3456
return glusterd_big_locked_handler(req, __glusterd_handle_umount);
3460
glusterd_friend_remove(uuid_t uuid, char *hostname)
3463
glusterd_peerinfo_t *peerinfo = NULL;
3467
peerinfo = glusterd_peerinfo_find(uuid, hostname);
3468
if (peerinfo == NULL) {
3473
ret = glusterd_friend_remove_cleanup_vols(peerinfo->uuid);
3476
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
3477
"Volumes cleanup failed");
3481
ret = glusterd_peerinfo_cleanup(peerinfo);
3483
gf_msg_debug(THIS->name, 0, "returning %d", ret);
3489
glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
3490
rpc_clnt_notify_t notify_fn, void *notify_data,
3493
struct rpc_clnt *new_rpc = NULL;
3495
xlator_t *this = THIS;
3498
GF_VALIDATE_OR_GOTO(this->name, rpc, out);
3500
if (force && rpc && *rpc) {
3501
(void)rpc_clnt_unref(*rpc);
3506
new_rpc = rpc_clnt_new(options, this, this->name, 16);
3510
ret = rpc_clnt_register_notify(new_rpc, notify_fn, notify_data);
3513
ret = rpc_clnt_start(new_rpc);
3517
(void)rpc_clnt_unref(new_rpc);
3523
gf_msg_debug(this->name, 0, "returning %d", ret);
3528
glusterd_transport_inet_options_build(dict_t *dict, const char *hostname,
3531
xlator_t *this = THIS;
3532
int32_t interval = -1;
3534
int32_t timeout = -1;
3538
GF_ASSERT(hostname);
3541
port = GLUSTERD_DEFAULT_PORT;
3544
ret = rpc_transport_inet_options_build(dict, hostname, port, af);
3553
ret = dict_set_int32_sizen(dict, "frame-timeout", 600);
3555
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3556
"Failed to set frame-timeout");
3561
ret = dict_get_int32(this->options, "transport.socket.keepalive-interval",
3564
gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
3565
"Failed to get socket keepalive-interval");
3567
ret = dict_get_int32(this->options, "transport.socket.keepalive-time",
3570
gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
3571
"Failed to get socket keepalive-time");
3573
ret = dict_get_int32(this->options, "transport.tcp-user-timeout", &timeout);
3575
gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
3576
"Failed to get tcp-user-timeout");
3579
if ((interval > 0) || (time > 0))
3580
ret = rpc_transport_keepalive_options_set(dict, interval, time,
3583
gf_msg_debug("glusterd", 0, "Returning %d", ret);
3588
glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
3589
glusterd_peerctx_args_t *args)
3591
dict_t *options = NULL;
3593
glusterd_peerctx_t *peerctx = NULL;
3594
data_t *data = NULL;
3597
peerctx = GF_CALLOC(1, sizeof(*peerctx), gf_gld_mt_peerctx_t);
3599
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
3603
options = dict_new();
3605
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
3610
peerctx->args = *args;
3612
gf_uuid_copy(peerctx->peerid, peerinfo->uuid);
3613
peerctx->peername = gf_strdup(peerinfo->hostname);
3614
peerctx->peerinfo_gen = peerinfo->generation;
3619
ret = dict_get_str(this->options, "transport.address-family", &af);
3621
gf_log(this->name, GF_LOG_TRACE,
3622
"option transport.address-family is not set in xlator options");
3623
ret = glusterd_transport_inet_options_build(options, peerinfo->hostname,
3624
peerinfo->port, af);
3634
if (this->options) {
3635
data = dict_get_sizen(this->options, "transport.socket.bind-address");
3637
ret = dict_set_sizen(options, "transport.socket.source-addr", data);
3639
data = dict_get_sizen(this->options, "ping-timeout");
3641
ret = dict_set_sizen(options, "ping-timeout", data);
3648
if (this->ctx->secure_mgmt) {
3649
ret = dict_set_sizen_str_sizen(options, "transport.socket.ssl-enabled",
3652
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3653
"failed to set ssl-enabled in dict");
3657
this->ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
3660
ret = glusterd_rpc_create(&peerinfo->rpc, options, glusterd_peer_rpc_notify,
3661
peerctx, _gf_false);
3663
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_CREATE_FAIL,
3664
"failed to create rpc for"
3666
peerinfo->hostname);
3667
gf_event(EVENT_PEER_RPC_CREATE_FAILED, "peer=%s", peerinfo->hostname);
3674
dict_unref(options);
3681
glusterd_friend_add(const char *hoststr, int port,
3682
glusterd_friend_sm_state_t state, uuid_t *uuid,
3683
glusterd_peerinfo_t **friend, gf_boolean_t restore,
3684
glusterd_peerctx_args_t *args)
3687
xlator_t *this = THIS;
3688
glusterd_conf_t *conf = NULL;
3690
conf = this->private;
3695
*friend = glusterd_peerinfo_new(state, uuid, hoststr, port);
3696
if (*friend == NULL) {
3698
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADD_FAIL, NULL);
3709
cds_list_add_tail_rcu(&(*friend)->uuid_list, &conf->peers);
3716
ret = glusterd_store_peerinfo(*friend);
3718
ret = glusterd_friend_rpc_create(this, *friend, args);
3720
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
3721
"Failed to store peerinfo");
3722
gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s", (*friend)->hostname);
3727
(void)glusterd_peerinfo_cleanup(*friend);
3732
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
3733
"connect returned %d", ret);
3742
glusterd_friend_add_from_peerinfo(glusterd_peerinfo_t *friend,
3743
gf_boolean_t restore,
3744
glusterd_peerctx_args_t *args)
3747
xlator_t *this = THIS;
3748
glusterd_conf_t *conf = NULL;
3750
conf = this->private;
3753
GF_VALIDATE_OR_GOTO(this->name, (friend != NULL), out);
3762
cds_list_add_tail_rcu(&friend->uuid_list, &conf->peers);
3769
ret = glusterd_store_peerinfo(friend);
3771
ret = glusterd_friend_rpc_create(this, friend, args);
3773
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
3774
"Failed to store peerinfo");
3775
gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s", friend->hostname);
3780
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
3781
"connect returned %d", ret);
3786
glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
3787
dict_t *dict, int *op_errno)
3790
glusterd_peerinfo_t *peerinfo = NULL;
3791
glusterd_peerctx_args_t args = {0};
3792
glusterd_friend_sm_event_t *event = NULL;
3797
peerinfo = glusterd_peerinfo_find(NULL, hoststr);
3799
if (peerinfo == NULL) {
3800
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
3801
"Unable to find peerinfo"
3802
" for host: %s (%d)",
3804
args.mode = GD_MODE_ON;
3807
ret = glusterd_friend_add(hoststr, port, GD_FRIEND_STATE_DEFAULT, NULL,
3808
&peerinfo, 0, &args);
3809
if ((!ret) && (!peerinfo->connected)) {
3810
ret = GLUSTERD_CONNECTION_AWAITED;
3813
} else if (peerinfo->connected &&
3814
(GD_FRIEND_STATE_BEFRIENDED == peerinfo->state)) {
3815
if (peerinfo->detaching) {
3818
*op_errno = GF_PROBE_FRIEND_DETACHING;
3821
ret = glusterd_peer_hostname_update(peerinfo, hoststr, _gf_false);
3825
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_NEW_NAME, &event);
3827
event->peername = gf_strdup(peerinfo->hostname);
3828
gf_uuid_copy(event->peerid, peerinfo->uuid);
3830
ret = glusterd_friend_sm_inject_event(event);
3831
glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_SUCCESS, NULL,
3832
(char *)hoststr, port, dict);
3835
glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_FRIEND, NULL,
3836
(char *)hoststr, port, dict);
3842
gf_msg_debug("glusterd", 0, "returning %d", ret);
3847
glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
3848
uuid_t uuid, dict_t *dict, int *op_errno)
3851
glusterd_peerinfo_t *peerinfo = NULL;
3852
glusterd_friend_sm_event_t *event = NULL;
3853
glusterd_probe_ctx_t *ctx = NULL;
3860
peerinfo = glusterd_peerinfo_find(uuid, hoststr);
3861
if (peerinfo == NULL) {
3863
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
3864
"Unable to find peerinfo"
3870
if (!peerinfo->rpc) {
3875
if (peerinfo->detaching) {
3878
*op_errno = GF_DEPROBE_FRIEND_DETACHING;
3882
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_INIT_REMOVE_FRIEND,
3886
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
3887
"Unable to get new event");
3891
ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
3897
ctx->hostname = gf_strdup(hoststr);
3904
event->peername = gf_strdup(hoststr);
3905
gf_uuid_copy(event->peerid, uuid);
3907
ret = glusterd_friend_sm_inject_event(event);
3910
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
3911
"Unable to inject event %d, "
3916
peerinfo->detaching = _gf_true;
3924
glusterd_xfer_friend_remove_resp(rpcsvc_request_t *req, char *hostname,
3927
gd1_mgmt_friend_rsp rsp = {
3932
GF_ASSERT(hostname);
3936
gf_uuid_copy(rsp.uuid, MY_UUID);
3937
rsp.hostname = hostname;
3939
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3940
(xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3942
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
3943
"Responded to %s (%d), ret: %d", hostname, port, ret);
3948
glusterd_xfer_friend_add_resp(rpcsvc_request_t *req, char *myhostname,
3949
char *remote_hostname, int port, int32_t op_ret,
3952
gd1_mgmt_friend_rsp rsp = {
3957
GF_ASSERT(myhostname);
3959
gf_uuid_copy(rsp.uuid, MY_UUID);
3960
rsp.op_ret = op_ret;
3961
rsp.op_errno = op_errno;
3962
rsp.hostname = gf_strdup(myhostname);
3965
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3966
(xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3968
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
3969
"Responded to %s (%d), ret: %d, op_ret: %d", remote_hostname, port,
3971
GF_FREE(rsp.hostname);
3976
set_probe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
3977
size_t len, char *hostname, int port)
3979
if ((op_errstr) && (strcmp(op_errstr, ""))) {
3980
snprintf(errstr, len, "%s", op_errstr);
3986
case GF_PROBE_LOCALHOST:
3987
snprintf(errstr, len,
3988
"Probe on localhost not "
3992
case GF_PROBE_FRIEND:
3993
snprintf(errstr, len,
3994
"Host %s port %d already"
3999
case GF_PROBE_FRIEND_DETACHING:
4000
snprintf(errstr, len,
4001
"Peer is already being "
4002
"detached from cluster.\n"
4003
"Check peer status by running "
4004
"gluster peer status");
4008
snprintf(errstr, len,
4011
strerror(op_errno));
4016
case GF_PROBE_ANOTHER_CLUSTER:
4017
snprintf(errstr, len,
4018
"%s is either already "
4019
"part of another cluster or having "
4020
"volumes configured",
4024
case GF_PROBE_VOLUME_CONFLICT:
4025
snprintf(errstr, len,
4026
"At least one volume on "
4027
"%s conflicts with existing volumes "
4032
case GF_PROBE_UNKNOWN_PEER:
4033
snprintf(errstr, len,
4034
"%s responded with "
4035
"'unknown peer' error, this could "
4036
"happen if %s doesn't have localhost "
4037
"in its peer database",
4038
hostname, hostname);
4041
case GF_PROBE_ADD_FAILED:
4042
snprintf(errstr, len,
4043
"Failed to add peer "
4044
"information on %s",
4048
case GF_PROBE_SAME_UUID:
4049
snprintf(errstr, len,
4050
"Peer uuid (host %s) is "
4051
"same as local uuid",
4055
case GF_PROBE_QUORUM_NOT_MET:
4056
snprintf(errstr, len,
4057
"Cluster quorum is not "
4058
"met. Changing peers is not allowed "
4062
case GF_PROBE_MISSED_SNAP_CONFLICT:
4063
snprintf(errstr, len,
4065
"list of missed snapshots from "
4070
case GF_PROBE_SNAP_CONFLICT:
4071
snprintf(errstr, len,
4072
"Conflict in comparing "
4073
"list of snapshots from "
4079
snprintf(errstr, len,
4080
"Probe returned with "
4082
strerror(op_errno));
4089
glusterd_xfer_cli_probe_resp(rpcsvc_request_t *req, int32_t op_ret,
4090
int32_t op_errno, char *op_errstr, char *hostname,
4091
int port, dict_t *dict)
4097
char errstr[2048] = {
4100
char *cmd_str = NULL;
4101
xlator_t *this = THIS;
4105
(void)set_probe_error_str(op_ret, op_errno, op_errstr, errstr,
4106
sizeof(errstr), hostname, port);
4109
ret = dict_get_str(dict, "cmd-str", &cmd_str);
4111
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CMDSTR_NOTFOUND_IN_DICT,
4116
rsp.op_ret = op_ret;
4117
rsp.op_errno = op_errno;
4118
rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
4120
gf_cmd_log("", "%s : %s %s %s", cmd_str, (op_ret) ? "FAILED" : "SUCCESS",
4121
(errstr[0] != '\0') ? ":" : " ",
4122
(errstr[0] != '\0') ? errstr : " ");
4124
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
4125
(xdrproc_t)xdr_gf_cli_rsp);
4129
gf_msg_debug(this->name, 0, "Responded to CLI, ret: %d", ret);
4135
set_deprobe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
4136
size_t len, char *hostname)
4138
if ((op_errstr) && (strcmp(op_errstr, ""))) {
4139
snprintf(errstr, len, "%s", op_errstr);
4145
case GF_DEPROBE_LOCALHOST:
4146
snprintf(errstr, len, "%s is localhost", hostname);
4149
case GF_DEPROBE_NOT_FRIEND:
4150
snprintf(errstr, len,
4151
"%s is not part of "
4156
case GF_DEPROBE_BRICK_EXIST:
4157
snprintf(errstr, len,
4158
"Peer %s hosts one or more bricks. If the peer is in "
4159
"not recoverable state then use either replace-brick "
4160
"or remove-brick command with force to remove all "
4161
"bricks from the peer and attempt the peer detach "
4166
case GF_DEPROBE_SNAP_BRICK_EXIST:
4167
snprintf(errstr, len,
4168
"%s is part of existing "
4169
"snapshot. Remove those snapshots "
4170
"before proceeding ",
4174
case GF_DEPROBE_FRIEND_DOWN:
4175
snprintf(errstr, len,
4176
"One of the peers is "
4177
"probably down. Check with "
4181
case GF_DEPROBE_QUORUM_NOT_MET:
4182
snprintf(errstr, len,
4183
"Cluster quorum is not "
4184
"met. Changing peers is not allowed "
4188
case GF_DEPROBE_FRIEND_DETACHING:
4189
snprintf(errstr, len,
4190
"Peer is already being "
4191
"detached from cluster.\n"
4192
"Check peer status by running "
4193
"gluster peer status");
4196
snprintf(errstr, len,
4197
"Detach returned with "
4199
strerror(op_errno));
4206
glusterd_xfer_cli_deprobe_resp(rpcsvc_request_t *req, int32_t op_ret,
4207
int32_t op_errno, char *op_errstr,
4208
char *hostname, dict_t *dict)
4214
char *cmd_str = NULL;
4215
char errstr[2048] = {
4221
(void)set_deprobe_error_str(op_ret, op_errno, op_errstr, errstr,
4222
sizeof(errstr), hostname);
4225
ret = dict_get_str(dict, "cmd-str", &cmd_str);
4227
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CMDSTR_NOTFOUND_IN_DICT,
4232
rsp.op_ret = op_ret;
4233
rsp.op_errno = op_errno;
4234
rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
4236
gf_cmd_log("", "%s : %s %s %s", cmd_str, (op_ret) ? "FAILED" : "SUCCESS",
4237
(errstr[0] != '\0') ? ":" : " ",
4238
(errstr[0] != '\0') ? errstr : " ");
4240
ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
4241
(xdrproc_t)xdr_gf_cli_rsp);
4243
gf_msg_debug(THIS->name, 0, "Responded to CLI, ret: %d", ret);
4249
glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4252
glusterd_conf_t *priv = NULL;
4253
glusterd_peerinfo_t *entry = NULL;
4255
dict_t *friends = NULL;
4256
gf1_cli_peer_list_rsp rsp = {
4259
char my_uuid_str[64] = {
4267
xlator_t *this = THIS;
4269
priv = this->private;
4272
friends = dict_new();
4274
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
4281
if (!cds_list_empty(&priv->peers)) {
4282
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
4285
ret = gd_add_peer_detail_to_dict(entry, friends, count);
4295
if (flags == GF_CLI_LIST_POOL_NODES) {
4297
keylen = snprintf(key, sizeof(key), "friend%d.uuid", count);
4298
uuid_utoa_r(MY_UUID, my_uuid_str);
4299
ret = dict_set_strn(friends, key, keylen, my_uuid_str);
4301
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4302
"Key=%s", key, NULL);
4306
keylen = snprintf(key, sizeof(key), "friend%d.hostname", count);
4307
ret = dict_set_nstrn(friends, key, keylen, "localhost",
4310
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4311
"Key=%s", key, NULL);
4315
keylen = snprintf(key, sizeof(key), "friend%d.connected", count);
4316
ret = dict_set_int32n(friends, key, keylen, 1);
4318
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4319
"Key=%s", key, NULL);
4324
ret = dict_set_int32_sizen(friends, "count", count);
4326
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4331
ret = dict_allocate_and_serialize(friends, &rsp.friends.friends_val,
4332
&rsp.friends.friends_len);
4341
dict_unref(friends);
4345
glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
4346
(xdrproc_t)xdr_gf1_cli_peer_list_rsp);
4348
GF_FREE(rsp.friends.friends_val);
4354
glusterd_get_volumes(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4357
int32_t ret_bkp = 0;
4358
glusterd_conf_t *priv = NULL;
4359
glusterd_volinfo_t *entry = NULL;
4361
dict_t *volumes = NULL;
4365
char *volname = NULL;
4367
priv = THIS->private;
4369
volumes = dict_new();
4371
gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
4376
if (cds_list_empty(&priv->volumes)) {
4377
if (flags == GF_CLI_GET_VOLUME)
4382
if (flags == GF_CLI_GET_NEXT_VOLUME) {
4383
ret = dict_get_str(dict, "volname", &volname);
4386
if (priv->volumes.next) {
4387
entry = cds_list_entry(priv->volumes.next, typeof(*entry),
4391
ret = glusterd_volinfo_find(volname, &entry);
4394
entry = cds_list_entry(entry->vol_list.next, typeof(*entry),
4398
if (&entry->vol_list == &priv->volumes) {
4401
ret = glusterd_add_volume_detail_to_dict(entry, volumes, count);
4407
} else if (flags == GF_CLI_GET_VOLUME) {
4408
ret = dict_get_str(dict, "volname", &volname);
4413
ret = glusterd_volinfo_find(volname, &entry);
4419
ret = glusterd_add_volume_detail_to_dict(entry, volumes, count);
4427
ret = dict_set_int32_sizen(volumes, "count", count);
4430
ret = dict_allocate_and_serialize(volumes, &rsp.dict.dict_val,
4431
&rsp.dict.dict_len);
4438
if (ret_bkp == -1) {
4439
rsp.op_ret = ret_bkp;
4440
rsp.op_errstr = "Volume does not exist";
4441
rsp.op_errno = EG_NOVOL;
4446
glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
4450
dict_unref(volumes);
4452
GF_FREE(rsp.dict.dict_val);
4457
__glusterd_handle_status_volume(rpcsvc_request_t *req)
4461
dict_t *dict = NULL;
4463
gf_cli_req cli_req = {{
4466
glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
4467
char err_str[256] = {
4470
xlator_t *this = THIS;
4471
glusterd_conf_t *conf = NULL;
4474
conf = this->private;
4477
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4480
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
4482
"request received from cli");
4483
req->rpc_err = GARBAGE_ARGS;
4487
if (cli_req.dict.dict_len > 0) {
4490
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
4494
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
4497
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
4499
"unserialize buffer");
4500
snprintf(err_str, sizeof(err_str),
4507
ret = dict_get_uint32(dict, "cmd", &cmd);
4511
if (!(cmd & GF_CLI_STATUS_ALL)) {
4512
ret = dict_get_str(dict, "volname", &volname);
4514
snprintf(err_str, sizeof(err_str),
4517
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
4521
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_STATUS_VOL_REQ_RCVD,
4522
"Received status volume req for volume %s", volname);
4525
if ((cmd & GF_CLI_STATUS_QUOTAD) &&
4526
(conf->op_version == GD_OP_VERSION_MIN)) {
4527
snprintf(err_str, sizeof(err_str),
4528
"The cluster is operating "
4529
"at version 1. Getting the status of quotad is not "
4530
"allowed in this state.");
4535
ret = glusterd_op_begin_synctask(req, GD_OP_STATUS_VOLUME, dict);
4540
if (err_str[0] == '\0')
4541
snprintf(err_str, sizeof(err_str), "Operation failed");
4542
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
4544
free(cli_req.dict.dict_val);
4550
glusterd_handle_status_volume(rpcsvc_request_t *req)
4552
return glusterd_big_locked_handler(req, __glusterd_handle_status_volume);
4556
__glusterd_handle_cli_clearlocks_volume(rpcsvc_request_t *req)
4559
gf_cli_req cli_req = {{
4562
glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
4563
char *volname = NULL;
4564
dict_t *dict = NULL;
4565
char err_str[64] = {
4568
xlator_t *this = THIS;
4573
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4575
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
4577
"request received from cli");
4578
req->rpc_err = GARBAGE_ARGS;
4582
if (cli_req.dict.dict_len) {
4585
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
4588
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
4589
"failed to unserialize req-buffer to"
4591
snprintf(err_str, sizeof(err_str),
4599
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLI_REQ_EMPTY,
4600
"Empty cli request.");
4604
ret = dict_get_str(dict, "volname", &volname);
4606
snprintf(err_str, sizeof(err_str),
4607
"Unable to get volume "
4609
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4614
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD,
4615
"Received clear-locks volume req "
4619
ret = glusterd_op_begin_synctask(req, GD_OP_CLEARLOCKS_VOLUME, dict);
4623
if (err_str[0] == '\0')
4624
snprintf(err_str, sizeof(err_str), "Operation failed");
4625
ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
4627
free(cli_req.dict.dict_val);
4633
glusterd_handle_cli_clearlocks_volume(rpcsvc_request_t *req)
4635
return glusterd_big_locked_handler(req,
4636
__glusterd_handle_cli_clearlocks_volume);
4640
glusterd_volinfo_find_by_volume_id(uuid_t volume_id,
4641
glusterd_volinfo_t **volinfo)
4644
xlator_t *this = THIS;
4645
glusterd_volinfo_t *voliter = NULL;
4646
glusterd_conf_t *priv = NULL;
4649
gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
4653
priv = this->private;
4655
cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
4657
if (gf_uuid_compare(volume_id, voliter->volume_id))
4661
gf_msg_debug(this->name, 0, "Volume %s found", voliter->volname);
4668
get_volinfo_from_brickid(char *brickid, glusterd_volinfo_t **volinfo)
4671
char *volid_str = NULL;
4673
char *brickid_dup = NULL;
4675
xlator_t *this = THIS;
4679
brickid_dup = gf_strdup(brickid);
4683
volid_str = brickid_dup;
4684
brick = strchr(brickid_dup, ':');
4686
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
4693
gf_uuid_parse(volid_str, volid);
4694
ret = glusterd_volinfo_find_by_volume_id(volid, volinfo);
4697
ret = glusterd_snap_volinfo_find_by_volume_id(volid, volinfo);
4699
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_GET_FAIL,
4700
"Failed to find volinfo");
4707
GF_FREE(brickid_dup);
4712
__glusterd_handle_barrier(rpcsvc_request_t *req)
4715
xlator_t *this = THIS;
4716
gf_cli_req cli_req = {{
4719
dict_t *dict = NULL;
4720
char *volname = NULL;
4724
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4726
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
4728
"request received from cli");
4729
req->rpc_err = GARBAGE_ARGS;
4733
if (!cli_req.dict.dict_len) {
4740
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
4744
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
4746
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
4747
"Failed to unserialize "
4748
"request dictionary.");
4752
ret = dict_get_str(dict, "volname", &volname);
4754
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4755
"Volname not present in "
4759
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BARRIER_VOL_REQ_RCVD,
4760
"Received barrier volume request for "
4764
ret = glusterd_op_begin_synctask(req, GD_OP_BARRIER, dict);
4768
ret = glusterd_op_send_cli_response(GD_OP_BARRIER, ret, 0, req, dict,
4769
"Operation failed");
4771
free(cli_req.dict.dict_val);
4776
glusterd_handle_barrier(rpcsvc_request_t *req)
4778
return glusterd_big_locked_handler(req, __glusterd_handle_barrier);
4782
gd_is_global_option(char *opt_key)
4784
GF_VALIDATE_OR_GOTO(THIS->name, opt_key, out);
4786
return (strcmp(opt_key, GLUSTERD_SHARED_STORAGE_KEY) == 0 ||
4787
strcmp(opt_key, GLUSTERD_QUORUM_RATIO_KEY) == 0 ||
4788
strcmp(opt_key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0 ||
4789
strcmp(opt_key, GLUSTERD_BRICK_MULTIPLEX_KEY) == 0 ||
4790
strcmp(opt_key, GLUSTERD_LOCALTIME_LOGGING_KEY) == 0 ||
4791
strcmp(opt_key, GLUSTERD_DAEMON_LOG_LEVEL_KEY) == 0 ||
4792
strcmp(opt_key, GLUSTERD_MAX_OP_VERSION_KEY) == 0 ||
4793
strcmp(opt_key, GLUSTER_BRICK_GRACEFUL_CLEANUP) == 0);
4800
glusterd_get_volume_opts(rpcsvc_request_t *req, dict_t *dict)
4806
char *orig_key = NULL;
4807
char *key_fixed = NULL;
4808
char *volname = NULL;
4810
char err_str[2048] = {
4813
char dict_key[50] = {
4817
xlator_t *this = THIS;
4818
glusterd_conf_t *priv = NULL;
4819
glusterd_volinfo_t *volinfo = NULL;
4823
char op_version_buff[10] = {
4827
priv = this->private;
4833
ret = dict_get_str(dict, "volname", &volname);
4835
snprintf(err_str, sizeof(err_str),
4836
"Failed to get volume "
4837
"name while handling get volume option command");
4838
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4843
if (strcasecmp(volname, "all") == 0) {
4844
ret = glusterd_get_global_options_for_all_vols(req, dict,
4849
ret = dict_get_str(dict, "key", &key);
4851
snprintf(err_str, sizeof(err_str),
4852
"Failed to get key "
4853
"while handling get volume option for %s",
4855
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
4859
gf_msg_debug(this->name, 0,
4860
"Received get volume opt request for "
4864
ret = glusterd_volinfo_find(volname, &volinfo);
4866
snprintf(err_str, sizeof(err_str), FMTSTR_CHECK_VOL_EXISTS, volname);
4867
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
4868
FMTSTR_CHECK_VOL_EXISTS, volname);
4871
if (strcmp(key, "all")) {
4872
if (fnmatch(GD_HOOKS_SPECIFIC_KEY, key, FNM_NOESCAPE) == 0) {
4873
keylen = sprintf(dict_key, "key%d", count);
4874
ret = dict_set_strn(dict, dict_key, keylen, key);
4876
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4878
"set %s in dictionary",
4882
ret = dict_get_str(volinfo->dict, key, &value);
4884
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4886
"get %s in dictionary",
4890
keylen = sprintf(dict_key, "value%d", count);
4891
ret = dict_set_strn(dict, dict_key, keylen, value);
4893
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4895
"set %s in dictionary",
4900
exists = glusterd_check_option_exists(key, &key_fixed);
4902
snprintf(err_str, sizeof(err_str),
4904
"with name: %s does not exist",
4906
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_UNKNOWN_KEY,
4909
snprintf(err_str + ret, sizeof(err_str) - ret,
4910
"Did you mean %s?", key_fixed);
4919
if (gd_is_global_option(key)) {
4921
"Warning: support to get \
4922
global option value using volume get \
4923
<volname>` will be deprecated from \
4924
next release. Consider using `volume \
4925
get all` instead for global options";
4927
ret = dict_set_str_sizen(dict, "warning", warn_str);
4929
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4930
"Failed to set warning "
4931
"message in dictionary");
4936
if (strcmp(key, GLUSTERD_MAX_OP_VERSION_KEY) == 0) {
4937
ret = glusterd_get_global_max_op_version(req, dict, 1);
4940
} else if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
4941
keylen = sprintf(dict_key, "key%d", count);
4942
ret = dict_set_strn(dict, dict_key, keylen, key);
4944
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4946
"to set %s in dictionary",
4950
keylen = sprintf(dict_key, "value%d", count);
4951
sprintf(op_version_buff, "%d", priv->op_version);
4952
ret = dict_set_strn(dict, dict_key, keylen, op_version_buff);
4954
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4956
" to set value for key %s in "
4961
} else if (strcmp(key, "config.memory-accounting") == 0) {
4962
keylen = sprintf(dict_key, "key%d", count);
4963
ret = dict_set_strn(dict, dict_key, keylen, key);
4965
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4967
" to set %s in dictionary",
4971
keylen = sprintf(dict_key, "value%d", count);
4973
if (volinfo->memory_accounting)
4974
ret = dict_set_nstrn(dict, dict_key, keylen, "Enabled",
4977
ret = dict_set_nstrn(dict, dict_key, keylen, "Disabled",
4980
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4982
" to set value for key %s in "
4987
} else if (strcmp(key, "config.transport") == 0) {
4988
keylen = sprintf(dict_key, "key%d", count);
4989
ret = dict_set_strn(dict, dict_key, keylen, key);
4991
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4992
"Failed to set %s in "
4997
keylen = sprintf(dict_key, "value%d", count);
4999
if (volinfo->transport_type == GF_TRANSPORT_RDMA)
5000
ret = dict_set_nstrn(dict, dict_key, keylen, "rdma",
5002
else if (volinfo->transport_type == GF_TRANSPORT_TCP)
5003
ret = dict_set_nstrn(dict, dict_key, keylen, "tcp",
5005
else if (volinfo->transport_type == GF_TRANSPORT_BOTH_TCP_RDMA)
5006
ret = dict_set_nstrn(dict, dict_key, keylen, "tcp,rdma",
5009
ret = dict_set_nstrn(dict, dict_key, keylen, "none",
5013
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5014
"Failed to set value for key "
5020
keylen = sprintf(dict_key, "key%d", count);
5021
ret = dict_set_strn(dict, dict_key, keylen, key);
5023
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5024
"Failed to set %s in "
5029
keylen = sprintf(dict_key, "value%d", count);
5030
ret = dict_get_str(priv->opts, key, &value);
5032
ret = dict_set_strn(dict, dict_key, keylen, value);
5034
gf_msg(this->name, GF_LOG_ERROR, 0,
5035
GD_MSG_DICT_SET_FAILED,
5036
"Failed to set %s in "
5042
ret = glusterd_get_default_val_for_volopt(
5043
dict, _gf_false, key, orig_key, volinfo,
5045
if (ret && !rsp.op_errstr) {
5046
snprintf(err_str, sizeof(err_str),
5047
"Failed to fetch the "
5048
"value of %s, check "
5059
ret = dict_set_int32_sizen(dict, "count", 1);
5061
gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5062
"Failed to set count "
5063
"value in the dictionary");
5068
ret = glusterd_get_default_val_for_volopt(dict, _gf_true, NULL, NULL,
5069
volinfo, &rsp.op_errstr);
5070
if (ret && !rsp.op_errstr) {
5071
snprintf(err_str, sizeof(err_str),
5072
"Failed to fetch the value of all volume "
5073
"options, check log file for more details");
5080
rsp.op_errstr = err_str;
5087
ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
5088
&rsp.dict.dict_len);
5090
glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
5091
GF_FREE(rsp.dict.dict_val);
5097
__glusterd_handle_get_vol_opt(rpcsvc_request_t *req)
5100
gf_cli_req cli_req = {{
5103
dict_t *dict = NULL;
5104
char err_str[64] = {
5107
xlator_t *this = THIS;
5111
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
5113
snprintf(err_str, sizeof(err_str),
5115
"request received from cli");
5116
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
5118
req->rpc_err = GARBAGE_ARGS;
5122
if (cli_req.dict.dict_len) {
5126
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
5129
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
5131
"unserialize req-buffer to dictionary");
5132
snprintf(err_str, sizeof(err_str),
5137
dict->extra_stdfree = cli_req.dict.dict_val;
5140
ret = glusterd_get_volume_opts(req, dict);
5150
glusterd_handle_get_vol_opt(rpcsvc_request_t *req)
5152
return glusterd_big_locked_handler(req, __glusterd_handle_get_vol_opt);
5155
extern struct rpc_clnt_program gd_brick_prog;
5158
glusterd_print_global_options(dict_t *opts, char *key, data_t *val, void *data)
5162
GF_VALIDATE_OR_GOTO(THIS->name, key, out);
5163
GF_VALIDATE_OR_GOTO(THIS->name, val, out);
5164
GF_VALIDATE_OR_GOTO(THIS->name, data, out);
5166
if (strcmp(key, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
5170
fprintf(fp, "%s: %s\n", key, val->data);
5176
glusterd_print_volume_options(dict_t *opts, char *key, data_t *val, void *data)
5180
GF_VALIDATE_OR_GOTO(THIS->name, key, out);
5181
GF_VALIDATE_OR_GOTO(THIS->name, val, out);
5182
GF_VALIDATE_OR_GOTO(THIS->name, data, out);
5185
fprintf(fp, "Volume%d.options.%s: %s\n", volcount, key, val->data);
5191
glusterd_print_gsync_status(FILE *fp, dict_t *gsync_dict)
5194
int gsync_count = 0;
5196
gf_gsync_status_t *status_vals = NULL;
5197
char status_val_name[PATH_MAX] = {
5201
GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
5202
GF_VALIDATE_OR_GOTO(THIS->name, gsync_dict, out);
5204
ret = dict_get_int32(gsync_dict, "gsync-count", &gsync_count);
5205
fprintf(fp, "Volume%d.gsync_count: %d\n", volcount, gsync_count);
5207
if (gsync_count == 0) {
5212
for (i = 0; i < gsync_count; i++) {
5213
snprintf(status_val_name, sizeof(status_val_name), "status_value%d", i);
5215
ret = dict_get_bin(gsync_dict, status_val_name,
5216
(void **)&(status_vals));
5220
fprintf(fp, "Volume%d.pair%d.session_secondary: %s\n", volcount, i + 1,
5221
get_struct_variable(21, status_vals));
5222
fprintf(fp, "Volume%d.pair%d.primary_node: %s\n", volcount, i + 1,
5223
get_struct_variable(0, status_vals));
5224
fprintf(fp, "Volume%d.pair%d.primary_volume: %s\n", volcount, i + 1,
5225
get_struct_variable(1, status_vals));
5226
fprintf(fp, "Volume%d.pair%d.primary_brick: %s\n", volcount, i + 1,
5227
get_struct_variable(2, status_vals));
5228
fprintf(fp, "Volume%d.pair%d.secondary_user: %s\n", volcount, i + 1,
5229
get_struct_variable(3, status_vals));
5230
fprintf(fp, "Volume%d.pair%d.secondary: %s\n", volcount, i + 1,
5231
get_struct_variable(4, status_vals));
5232
fprintf(fp, "Volume%d.pair%d.secondary_node: %s\n", volcount, i + 1,
5233
get_struct_variable(5, status_vals));
5234
fprintf(fp, "Volume%d.pair%d.status: %s\n", volcount, i + 1,
5235
get_struct_variable(6, status_vals));
5236
fprintf(fp, "Volume%d.pair%d.crawl_status: %s\n", volcount, i + 1,
5237
get_struct_variable(7, status_vals));
5238
fprintf(fp, "Volume%d.pair%d.last_synced: %s\n", volcount, i + 1,
5239
get_struct_variable(8, status_vals));
5240
fprintf(fp, "Volume%d.pair%d.entry: %s\n", volcount, i + 1,
5241
get_struct_variable(9, status_vals));
5242
fprintf(fp, "Volume%d.pair%d.data: %s\n", volcount, i + 1,
5243
get_struct_variable(10, status_vals));
5244
fprintf(fp, "Volume%d.pair%d.meta: %s\n", volcount, i + 1,
5245
get_struct_variable(11, status_vals));
5246
fprintf(fp, "Volume%d.pair%d.failures: %s\n", volcount, i + 1,
5247
get_struct_variable(12, status_vals));
5248
fprintf(fp, "Volume%d.pair%d.checkpoint_time: %s\n", volcount, i + 1,
5249
get_struct_variable(13, status_vals));
5250
fprintf(fp, "Volume%d.pair%d.checkpoint_completed: %s\n", volcount,
5251
i + 1, get_struct_variable(14, status_vals));
5252
fprintf(fp, "Volume%d.pair%d.checkpoint_completion_time: %s\n",
5253
volcount, i + 1, get_struct_variable(15, status_vals));
5260
glusterd_print_gsync_status_by_vol(FILE *fp, glusterd_volinfo_t *volinfo)
5263
dict_t *gsync_rsp_dict = NULL;
5264
xlator_t *this = THIS;
5266
GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
5267
GF_VALIDATE_OR_GOTO(this->name, fp, out);
5269
gsync_rsp_dict = dict_new();
5270
if (!gsync_rsp_dict) {
5271
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
5275
ret = glusterd_get_gsync_status_mst(volinfo, gsync_rsp_dict,
5279
ret = glusterd_print_gsync_status(fp, gsync_rsp_dict);
5282
dict_unref(gsync_rsp_dict);
5287
glusterd_print_snapinfo_by_vol(FILE *fp, glusterd_volinfo_t *volinfo,
5291
glusterd_volinfo_t *snap_vol = NULL;
5292
glusterd_volinfo_t *tmp_vol = NULL;
5293
glusterd_snap_t *snapinfo = NULL;
5295
char timestr[GF_TIMESTR_SIZE] = {
5298
char snap_status_str[STATUS_STRLEN] = {
5302
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5303
GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
5305
cds_list_for_each_entry_safe(snap_vol, tmp_vol, &volinfo->snap_volumes,
5309
snapinfo = snap_vol->snapshot;
5311
ret = glusterd_get_snap_status_str(snapinfo, snap_status_str);
5313
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5314
"Failed to get status for snapshot: %s", snapinfo->snapname);
5318
gf_time_fmt_FT(timestr, sizeof timestr, snapinfo->time_stamp);
5320
fprintf(fp, "Volume%d.snapshot%d.name: %s\n", volcount, snapcount,
5321
snapinfo->snapname);
5322
fprintf(fp, "Volume%d.snapshot%d.id: %s\n", volcount, snapcount,
5323
uuid_utoa(snapinfo->snap_id));
5324
fprintf(fp, "Volume%d.snapshot%d.time: %s\n", volcount, snapcount,
5327
if (snapinfo->description)
5328
fprintf(fp, "Volume%d.snapshot%d.description: %s\n", volcount,
5329
snapcount, snapinfo->description);
5330
fprintf(fp, "Volume%d.snapshot%d.status: %s\n", volcount, snapcount,
5340
glusterd_print_client_details(FILE *fp, dict_t *dict,
5341
glusterd_volinfo_t *volinfo, int volcount,
5342
glusterd_brickinfo_t *brickinfo, int brickcount)
5345
xlator_t *this = THIS;
5346
int brick_index = -1;
5347
int client_count = 0;
5352
char *clientname = NULL;
5353
uint64_t bytesread = 0;
5354
uint64_t byteswrite = 0;
5355
uint32_t opversion = 0;
5357
glusterd_pending_node_t *pending_node = NULL;
5358
rpc_clnt_t *rpc = NULL;
5359
struct syncargs args = {
5362
gd1_mgmt_brick_op_req *brick_req = NULL;
5364
GF_VALIDATE_OR_GOTO(this->name, dict, out);
5366
if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
5367
!glusterd_is_brick_started(brickinfo)) {
5373
pending_node = GF_CALLOC(1, sizeof(*pending_node),
5374
gf_gld_mt_pending_node_t);
5375
if (!pending_node) {
5377
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
5378
"Unable to allocate memory");
5382
pending_node->node = brickinfo;
5383
pending_node->type = GD_NODE_BRICK;
5384
pending_node->index = brick_index;
5386
rpc = glusterd_pending_node_get_rpc(pending_node);
5389
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
5390
"Failed to retrieve rpc object");
5394
brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t);
5397
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
5398
"Unable to allocate memory");
5402
brick_req->op = GLUSTERD_BRICK_STATUS;
5403
brick_req->name = "";
5404
brick_req->dict.dict_val = NULL;
5405
brick_req->dict.dict_len = 0;
5407
ret = dict_set_str_sizen(dict, "brick-name", brickinfo->path);
5409
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5410
"Key=brick-name", NULL);
5414
ret = dict_set_int32_sizen(dict, "cmd", GF_CLI_STATUS_CLIENTS);
5416
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5421
ret = dict_set_str_sizen(dict, "volname", volinfo->volname);
5423
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5424
"Key=volname", NULL);
5428
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
5429
&brick_req->input.input_len);
5433
GD_SYNCOP(rpc, (&args), NULL, gd_syncop_brick_op_cbk, brick_req,
5434
&gd_brick_prog, brick_req->op, xdr_gd1_mgmt_brick_op_req);
5439
ret = dict_get_int32(args.dict, "clientcount", &client_count);
5441
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5442
"Couldn't get client count");
5446
fprintf(fp, "Volume%d.Brick%d.client_count: %d\n", volcount, brickcount,
5449
if (client_count == 0) {
5455
for (i = 1; i <= client_count; i++) {
5456
keylen = snprintf(key, sizeof(key), "client%d.hostname", i - 1);
5457
ret = dict_get_strn(args.dict, key, keylen, &clientname);
5459
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5460
"Failed to get client hostname");
5464
snprintf(key, sizeof(key), "Client%d.hostname", i);
5465
fprintf(fp, "Volume%d.Brick%d.%s: %s\n", volcount, brickcount, key,
5468
snprintf(key, sizeof(key), "client%d.bytesread", i - 1);
5469
ret = dict_get_uint64(args.dict, key, &bytesread);
5471
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5472
"Failed to get bytesread from client");
5476
snprintf(key, sizeof(key), "Client%d.bytesread", i);
5477
fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu64 "\n", volcount, brickcount,
5480
snprintf(key, sizeof(key), "client%d.byteswrite", i - 1);
5481
ret = dict_get_uint64(args.dict, key, &byteswrite);
5483
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5484
"Failed to get byteswrite from client");
5488
snprintf(key, sizeof(key), "Client%d.byteswrite", i);
5489
fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu64 "\n", volcount, brickcount,
5492
snprintf(key, sizeof(key), "client%d.opversion", i - 1);
5493
ret = dict_get_uint32(args.dict, key, &opversion);
5495
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5496
"Failed to get client opversion");
5500
snprintf(key, sizeof(key), "Client%d.opversion", i);
5501
fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu32 "\n", volcount, brickcount,
5507
GF_FREE(pending_node);
5510
if (brick_req->input.input_val)
5511
GF_FREE(brick_req->input.input_val);
5515
dict_unref(args.dict);
5517
GF_FREE(args.errstr);
5523
glusterd_volume_get_type_str(glusterd_volinfo_t *volinfo, char **voltype_str)
5528
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5530
type = get_vol_type(volinfo->type, volinfo->dist_leaf_count,
5531
volinfo->brick_count);
5533
*voltype_str = vol_type_str[type];
5541
glusterd_volume_get_status_str(glusterd_volinfo_t *volinfo, char *status_str)
5545
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5546
GF_VALIDATE_OR_GOTO(THIS->name, status_str, out);
5548
switch (volinfo->status) {
5549
case GLUSTERD_STATUS_NONE:
5550
sprintf(status_str, "%s", "Created");
5552
case GLUSTERD_STATUS_STARTED:
5553
sprintf(status_str, "%s", "Started");
5555
case GLUSTERD_STATUS_STOPPED:
5556
sprintf(status_str, "%s", "Stopped");
5567
glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo, char *status_str)
5569
GF_VALIDATE_OR_GOTO(THIS->name, brickinfo, out);
5570
GF_VALIDATE_OR_GOTO(THIS->name, status_str, out);
5572
switch (brickinfo->status) {
5573
case GF_BRICK_STOPPED:
5574
sprintf(status_str, "%s", "Stopped");
5576
case GF_BRICK_STARTED:
5577
sprintf(status_str, "%s", "Started");
5579
case GF_BRICK_STARTING:
5580
sprintf(status_str, "%s", "Starting");
5582
case GF_BRICK_STOPPING:
5583
sprintf(status_str, "%s", "Stopping");
5586
sprintf(status_str, "%s", "None");
5595
glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo,
5596
char *transport_type_str)
5600
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5601
GF_VALIDATE_OR_GOTO(THIS->name, transport_type_str, out);
5603
switch (volinfo->transport_type) {
5604
case GF_TRANSPORT_TCP:
5605
sprintf(transport_type_str, "%s", "tcp");
5607
case GF_TRANSPORT_RDMA:
5608
sprintf(transport_type_str, "%s", "rdma");
5610
case GF_TRANSPORT_BOTH_TCP_RDMA:
5611
sprintf(transport_type_str, "%s", "tcp_rdma_both");
5622
glusterd_volume_get_quorum_status_str(glusterd_volinfo_t *volinfo,
5623
char *quorum_status_str)
5627
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5628
GF_VALIDATE_OR_GOTO(THIS->name, quorum_status_str, out);
5630
switch (volinfo->quorum_status) {
5631
case NOT_APPLICABLE_QUORUM:
5632
sprintf(quorum_status_str, "%s", "not_applicable");
5635
sprintf(quorum_status_str, "%s", "meets");
5637
case DOESNT_MEET_QUORUM:
5638
sprintf(quorum_status_str, "%s", "does_not_meet");
5649
glusterd_volume_get_rebalance_status_str(glusterd_volinfo_t *volinfo,
5650
char *rebal_status_str)
5654
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5655
GF_VALIDATE_OR_GOTO(THIS->name, rebal_status_str, out);
5657
switch (volinfo->rebal.defrag_status) {
5658
case GF_DEFRAG_STATUS_NOT_STARTED:
5659
sprintf(rebal_status_str, "%s", "not_started");
5661
case GF_DEFRAG_STATUS_STARTED:
5662
sprintf(rebal_status_str, "%s", "started");
5664
case GF_DEFRAG_STATUS_STOPPED:
5665
sprintf(rebal_status_str, "%s", "stopped");
5667
case GF_DEFRAG_STATUS_COMPLETE:
5668
sprintf(rebal_status_str, "%s", "completed");
5670
case GF_DEFRAG_STATUS_FAILED:
5671
sprintf(rebal_status_str, "%s", "failed");
5673
case GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED:
5674
sprintf(rebal_status_str, "%s", "layout_fix_started");
5676
case GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED:
5677
sprintf(rebal_status_str, "%s", "layout_fix_stopped");
5679
case GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE:
5680
sprintf(rebal_status_str, "%s", "layout_fix_complete");
5682
case GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED:
5683
sprintf(rebal_status_str, "%s", "layout_fix_failed");
5694
glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
5702
char err_str[2048] = {
5705
glusterd_conf_t *priv = NULL;
5706
glusterd_peerinfo_t *peerinfo = NULL;
5707
glusterd_peer_hostname_t *peer_hostname_info = NULL;
5708
glusterd_volinfo_t *volinfo = NULL;
5709
glusterd_brickinfo_t *brickinfo = NULL;
5710
xlator_t *this = THIS;
5711
dict_t *vol_all_opts = NULL;
5712
struct statvfs brickstat = {0};
5714
char *filename = NULL;
5715
char *ofilepath = NULL;
5716
char *tmp_str = NULL;
5721
char timestamp[16] = {
5724
uint32_t get_state_cmd = 0;
5725
uint64_t memtotal = 0;
5726
uint64_t memfree = 0;
5731
char *vol_type_str = NULL;
5733
char transport_type_str[STATUS_STRLEN] = {
5736
char quorum_status_str[STATUS_STRLEN] = {
5739
char rebal_status_str[STATUS_STRLEN] = {
5742
char vol_status_str[STATUS_STRLEN] = {
5745
char brick_status_str[STATUS_STRLEN] = {
5749
priv = THIS->private;
5750
GF_VALIDATE_OR_GOTO(this->name, priv, out);
5752
GF_VALIDATE_OR_GOTO(this->name, dict, out);
5754
ret = dict_get_str(dict, "odir", &tmp_str);
5756
odirlen = gf_asprintf(&odir, "%s", "/var/run/gluster/");
5757
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
5758
"Default output directory: %s", odir);
5760
odirlen = gf_asprintf(&odir, "%s", tmp_str);
5763
dp = sys_opendir(odir);
5767
if (errno == ENOENT) {
5768
snprintf(err_str, sizeof(err_str),
5769
"Output directory %s does not exist.", odir);
5770
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
5772
} else if (errno == ENOTDIR) {
5773
snprintf(err_str, sizeof(err_str),
5775
"does not exist. %s points to a file.",
5777
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
5786
ret = dict_get_str(dict, "filename", &tmp_str);
5789
strftime(timestamp, sizeof(timestamp), "%Y%m%d_%H%M%S",
5791
gf_asprintf(&filename, "%s_%s", "glusterd_state", timestamp);
5793
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
5794
"Default filename: %s", filename);
5796
gf_asprintf(&filename, "%s", tmp_str);
5799
ret = gf_asprintf(&ofilepath, "%s%s%s", odir,
5800
((odir[odirlen - 1] != '/') ? "/" : ""), filename);
5805
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5806
"Unable to get the output path");
5813
ret = dict_set_dynstr_sizen(dict, "ofilepath", ofilepath);
5815
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5816
"Unable to set output path");
5820
fp = fopen(ofilepath, "w");
5822
snprintf(err_str, sizeof(err_str), "Failed to open file at %s",
5824
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
5830
ret = dict_get_uint32(dict, "getstate-cmd", &get_state_cmd);
5832
gf_msg_debug(this->name, 0, "get-state command type not set");
5836
if (get_state_cmd == GF_CLI_GET_STATE_VOLOPTS) {
5837
fprintf(fp, "[Volume Options]\n");
5838
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
5840
fprintf(fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
5843
vol_all_opts = dict_new();
5845
ret = glusterd_get_default_val_for_volopt(
5846
vol_all_opts, _gf_true, NULL, NULL, volinfo, &rsp.op_errstr);
5848
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_OPTS_IMPORT_FAIL,
5850
"fetch the value of all volume options "
5854
dict_unref(vol_all_opts);
5858
dict_foreach(vol_all_opts, glusterd_print_volume_options, fp);
5861
dict_unref(vol_all_opts);
5867
fprintf(fp, "[Global]\n");
5869
uuid_utoa_r(priv->uuid, id_str);
5870
fprintf(fp, "MYUUID: %s\n", id_str);
5872
fprintf(fp, "op-version: %d\n", priv->op_version);
5874
fprintf(fp, "\n[Global options]\n");
5877
dict_foreach(priv->opts, glusterd_print_global_options, fp);
5879
fprintf(fp, "\n[Peers]\n");
5882
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
5884
fprintf(fp, "Peer%d.primary_hostname: %s\n", ++count,
5885
peerinfo->hostname);
5886
fprintf(fp, "Peer%d.uuid: %s\n", count, gd_peer_uuid_str(peerinfo));
5887
fprintf(fp, "Peer%d.state: %s\n", count,
5888
glusterd_friend_sm_state_name_get(peerinfo->state));
5889
fprintf(fp, "Peer%d.connected: %s\n", count,
5890
peerinfo->connected ? "Connected" : "Disconnected");
5892
fprintf(fp, "Peer%d.othernames: ", count);
5894
cds_list_for_each_entry(peer_hostname_info, &peerinfo->hostnames,
5897
if (strcmp(peerinfo->hostname, peer_hostname_info->hostname) == 0)
5903
fprintf(fp, "%s", peer_hostname_info->hostname);
5912
fprintf(fp, "\n[Volumes]\n");
5914
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
5916
ret = glusterd_volume_get_type_str(volinfo, &vol_type_str);
5918
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5919
"Failed to get type for volume: %s", volinfo->volname);
5923
ret = glusterd_volume_get_status_str(volinfo, vol_status_str);
5925
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5926
"Failed to get status for volume: %s", volinfo->volname);
5930
ret = glusterd_volume_get_transport_type_str(volinfo,
5931
transport_type_str);
5933
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5934
"Failed to get transport type for volume: %s",
5939
ret = glusterd_volume_get_quorum_status_str(volinfo, quorum_status_str);
5941
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5942
"Failed to get quorum status for volume: %s",
5947
ret = glusterd_volume_get_rebalance_status_str(volinfo,
5950
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5951
"Failed to get rebalance status for volume: %s",
5956
fprintf(fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
5958
uuid_utoa_r(volinfo->volume_id, id_str);
5959
fprintf(fp, "Volume%d.id: %s\n", count, id_str);
5961
fprintf(fp, "Volume%d.type: %s\n", count, vol_type_str);
5962
fprintf(fp, "Volume%d.transport_type: %s\n", count, transport_type_str);
5963
fprintf(fp, "Volume%d.status: %s\n", count, vol_status_str);
5964
fprintf(fp, "Volume%d.profile_enabled: %d\n", count,
5965
glusterd_is_profile_on(volinfo));
5966
fprintf(fp, "Volume%d.brickcount: %d\n", count, volinfo->brick_count);
5970
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
5972
fprintf(fp, "Volume%d.Brick%d.path: %s:%s\n", count_bkp, ++count,
5973
brickinfo->hostname, brickinfo->path);
5974
fprintf(fp, "Volume%d.Brick%d.hostname: %s\n", count_bkp, count,
5975
brickinfo->hostname);
5977
if (volinfo->arbiter_count == 1) {
5978
if (count % volinfo->replica_count == 0) {
5987
if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
5989
fprintf(fp, "Volume%d.Brick%d.port: %d\n", count_bkp, count,
5991
fprintf(fp, "Volume%d.Brick%d.rdma_port: %d\n", count_bkp, count,
5992
brickinfo->rdma_port);
5993
fprintf(fp, "Volume%d.Brick%d.port_registered: %d\n", count_bkp,
5994
count, brickinfo->port_registered);
5995
glusterd_brick_get_status_str(brickinfo, brick_status_str);
5996
fprintf(fp, "Volume%d.Brick%d.status: %s\n", count_bkp, count,
5999
ret = sys_statvfs(brickinfo->path, &brickstat);
6001
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
6002
"statfs error: %s ", strerror(errno));
6006
memfree = brickstat.f_bfree * brickstat.f_bsize;
6007
memtotal = brickstat.f_blocks * brickstat.f_bsize;
6010
fprintf(fp, "Volume%d.Brick%d.spacefree: %" PRIu64 "Bytes\n",
6011
count_bkp, count, memfree);
6012
fprintf(fp, "Volume%d.Brick%d.spacetotal: %" PRIu64 "Bytes\n",
6013
count_bkp, count, memtotal);
6015
if (get_state_cmd != GF_CLI_GET_STATE_DETAIL)
6018
ret = glusterd_print_client_details(fp, dict, volinfo, count_bkp,
6021
gf_msg(this->name, GF_LOG_ERROR, 0,
6022
GD_MSG_CLIENTS_GET_STATE_FAILED,
6023
"Failed to get client details");
6030
ret = glusterd_print_snapinfo_by_vol(fp, volinfo, count);
6034
fprintf(fp, "Volume%d.snap_count: %" PRIu64 "\n", count,
6035
volinfo->snap_count);
6036
fprintf(fp, "Volume%d.stripe_count: %d\n", count, STRIPE_COUNT);
6037
fprintf(fp, "Volume%d.replica_count: %d\n", count,
6038
volinfo->replica_count);
6039
fprintf(fp, "Volume%d.subvol_count: %d\n", count,
6040
volinfo->subvol_count);
6041
fprintf(fp, "Volume%d.arbiter_count: %d\n", count,
6042
volinfo->arbiter_count);
6043
fprintf(fp, "Volume%d.disperse_count: %d\n", count,
6044
volinfo->disperse_count);
6045
fprintf(fp, "Volume%d.redundancy_count: %d\n", count,
6046
volinfo->redundancy_count);
6047
fprintf(fp, "Volume%d.quorum_status: %s\n", count, quorum_status_str);
6049
fprintf(fp, "Volume%d.snapd_svc.online_status: %s\n", count,
6050
volinfo->snapd.svc.online ? "Online" : "Offline");
6051
fprintf(fp, "Volume%d.snapd_svc.inited: %s\n", count,
6052
volinfo->snapd.svc.inited ? "True" : "False");
6054
uuid_utoa_r(volinfo->rebal.rebalance_id, id_str);
6055
char *rebal_data = gf_uint64_2human_readable(
6056
volinfo->rebal.rebalance_data);
6058
fprintf(fp, "Volume%d.rebalance.id: %s\n", count, id_str);
6059
fprintf(fp, "Volume%d.rebalance.status: %s\n", count, rebal_status_str);
6060
fprintf(fp, "Volume%d.rebalance.failures: %" PRIu64 "\n", count,
6061
volinfo->rebal.rebalance_failures);
6062
fprintf(fp, "Volume%d.rebalance.skipped: %" PRIu64 "\n", count,
6063
volinfo->rebal.skipped_files);
6064
fprintf(fp, "Volume%d.rebalance.lookedup: %" PRIu64 "\n", count,
6065
volinfo->rebal.lookedup_files);
6066
fprintf(fp, "Volume%d.rebalance.files: %" PRIu64 "\n", count,
6067
volinfo->rebal.rebalance_files);
6068
fprintf(fp, "Volume%d.rebalance.data: %s\n", count, rebal_data);
6069
fprintf(fp, "Volume%d.time_left: %ld\n", count,
6070
volinfo->rebal.time_left);
6072
GF_FREE(rebal_data);
6074
fprintf(fp, "Volume%d.shd_svc.online_status: %s\n", count,
6075
volinfo->shd.svc.online ? "Online" : "Offline");
6076
fprintf(fp, "Volume%d.shd_svc.inited: %s\n", count,
6077
volinfo->shd.svc.inited ? "True" : "False");
6079
if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
6080
fprintf(fp, "Volume%d.replace_brick.src: %s:%s\n", count,
6081
volinfo->rep_brick.src_brick->hostname,
6082
volinfo->rep_brick.src_brick->path);
6083
fprintf(fp, "Volume%d.replace_brick.dest: %s:%s\n", count,
6084
volinfo->rep_brick.dst_brick->hostname,
6085
volinfo->rep_brick.dst_brick->path);
6089
ret = glusterd_print_gsync_status_by_vol(fp, volinfo);
6094
dict_foreach(volinfo->dict, glusterd_print_volume_options, fp);
6101
fprintf(fp, "\n[Services]\n");
6103
if (priv->nfs_svc.inited) {
6104
fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
6105
fprintf(fp, "svc%d.online_status: %s\n\n", count,
6106
priv->nfs_svc.online ? "Online" : "Offline");
6109
if (priv->bitd_svc.inited) {
6110
fprintf(fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
6111
fprintf(fp, "svc%d.online_status: %s\n\n", count,
6112
priv->bitd_svc.online ? "Online" : "Offline");
6115
if (priv->scrub_svc.inited) {
6116
fprintf(fp, "svc%d.name: %s\n", ++count, priv->scrub_svc.name);
6117
fprintf(fp, "svc%d.online_status: %s\n\n", count,
6118
priv->scrub_svc.online ? "Online" : "Offline");
6121
if (priv->quotad_svc.inited) {
6122
fprintf(fp, "svc%d.name: %s\n", ++count, priv->quotad_svc.name);
6123
fprintf(fp, "svc%d.online_status: %s\n\n", count,
6124
priv->quotad_svc.online ? "Online" : "Offline");
6127
fprintf(fp, "\n[Misc]\n");
6129
fprintf(fp, "Base port: %d\n", priv->pmap->base_port);
6137
if (rsp.op_errstr == NULL)
6138
rsp.op_errstr = err_str;
6140
ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
6141
&rsp.dict.dict_len);
6142
glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
6143
GF_FREE(rsp.dict.dict_val);
6149
__glusterd_handle_get_state(rpcsvc_request_t *req)
6152
gf_cli_req cli_req = {
6157
dict_t *dict = NULL;
6158
char err_str[64] = {
6161
xlator_t *this = THIS;
6163
GF_VALIDATE_OR_GOTO(this->name, req, out);
6165
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DAEMON_STATE_REQ_RCVD,
6166
"Received request to get state for glusterd");
6168
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
6170
snprintf(err_str, sizeof(err_str),
6172
"request received from cli");
6173
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
6175
req->rpc_err = GARBAGE_ARGS;
6179
if (cli_req.dict.dict_len) {
6183
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
6186
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
6188
"unserialize req-buffer to dictionary");
6189
snprintf(err_str, sizeof(err_str),
6194
dict->extra_stdfree = cli_req.dict.dict_val;
6198
ret = glusterd_get_state(req, dict);
6213
glusterd_handle_get_state(rpcsvc_request_t *req)
6215
return glusterd_big_locked_handler(req, __glusterd_handle_get_state);
6219
get_brickinfo_from_brickid(char *brickid, glusterd_brickinfo_t **brickinfo)
6221
glusterd_volinfo_t *volinfo = NULL;
6222
char *volid_str = NULL;
6224
char *brickid_dup = NULL;
6228
xlator_t *this = THIS;
6230
brickid_dup = gf_strdup(brickid);
6232
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
6233
"brick_id=%s", brickid, NULL);
6237
volid_str = brickid_dup;
6238
brick = strchr(brickid_dup, ':');
6240
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
6245
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
6251
gf_uuid_parse(volid_str, volid);
6252
ret = glusterd_volinfo_find_by_volume_id(volid, &volinfo);
6255
ret = glusterd_snap_volinfo_find_by_volume_id(volid, &volinfo);
6260
ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, brickinfo,
6267
GF_FREE(brickid_dup);
6271
static int gd_stale_rpc_disconnect_log;
6274
__glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6275
rpc_clnt_event_t event, void *data)
6277
char *brickid = NULL;
6279
glusterd_conf_t *conf = NULL;
6280
glusterd_brickinfo_t *brickinfo = NULL;
6281
glusterd_volinfo_t *volinfo = NULL;
6282
xlator_t *this = THIS;
6284
glusterd_brickinfo_t *brickinfo_tmp = NULL;
6285
glusterd_brick_proc_t *brick_proc = NULL;
6286
char pidfile[PATH_MAX] = {0};
6287
char *brickpath = NULL;
6288
gf_boolean_t is_service_running = _gf_true;
6294
ret = get_brickinfo_from_brickid(brickid, &brickinfo);
6298
conf = this->private;
6302
case RPC_CLNT_CONNECT:
6303
ret = get_volinfo_from_brickid(brickid, &volinfo);
6305
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
6306
"Failed to get volinfo from "
6317
if (brickinfo->snap_status == -1) {
6318
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SNAPSHOT_PENDING,
6319
"Snapshot is pending on %s:%s. "
6320
"Hence not starting the brick",
6321
brickinfo->hostname, brickinfo->path);
6322
ret = glusterd_brick_stop(volinfo, brickinfo, _gf_false);
6324
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_STOP_FAIL,
6325
"Unable to stop %s:%s", brickinfo->hostname,
6332
gf_msg_debug(this->name, 0, "Connected to %s:%s",
6333
brickinfo->hostname, brickinfo->path);
6335
glusterd_set_brick_status(brickinfo, GF_BRICK_STARTED);
6337
gf_event(EVENT_BRICK_CONNECTED, "peer=%s;volume=%s;brick=%s",
6338
brickinfo->hostname, volinfo->volname, brickinfo->path);
6340
ret = default_notify(this, GF_EVENT_CHILD_UP, NULL);
6344
case RPC_CLNT_DISCONNECT:
6345
if (rpc != brickinfo->rpc) {
6354
GF_LOG_OCCASIONALLY(gd_stale_rpc_disconnect_log, this->name,
6356
"got disconnect from stale rpc on "
6361
if (glusterd_is_brick_started(brickinfo)) {
6362
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BRICK_DISCONNECTED,
6363
"Brick %s:%s has disconnected from glusterd.",
6364
brickinfo->hostname, brickinfo->path);
6366
ret = get_volinfo_from_brickid(brickid, &volinfo);
6368
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
6369
"Failed to get volinfo from "
6374
gf_event(EVENT_BRICK_DISCONNECTED, "peer=%s;volume=%s;brick=%s",
6375
brickinfo->hostname, volinfo->volname,
6384
GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, conf);
6385
is_service_running = gf_is_service_running(pidfile, &pid);
6387
brickpath = search_brick_path_from_proc(pid,
6389
if (!is_service_running || !brickpath) {
6390
ret = pmap_port_remove(this, brickinfo->port,
6391
brickinfo->path, NULL, _gf_true);
6393
gf_msg(this->name, GF_LOG_WARNING,
6394
GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, 0,
6395
"Failed to remove pmap "
6396
"registry for port %d for "
6398
brickinfo->port, brickinfo->path);
6407
if (is_brick_mx_enabled() && glusterd_is_brick_started(brickinfo)) {
6408
brick_proc = brickinfo->brick_proc;
6411
cds_list_for_each_entry(brickinfo_tmp, &brick_proc->bricks,
6414
glusterd_set_brick_status(brickinfo_tmp, GF_BRICK_STOPPED);
6415
brickinfo_tmp->start_triggered = _gf_false;
6419
pmap_port_remove(this, brickinfo_tmp->port,
6420
brickinfo_tmp->path, NULL, _gf_true);
6423
glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPED);
6424
brickinfo->start_triggered = _gf_false;
6428
case RPC_CLNT_DESTROY:
6433
gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
6442
glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6443
rpc_clnt_event_t event, void *data)
6445
return glusterd_big_locked_notify(rpc, mydata, event, data,
6446
__glusterd_brick_rpc_notify);
6450
glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
6453
glusterd_friend_sm_event_t *new_event = NULL;
6454
glusterd_peerinfo_t *peerinfo = NULL;
6455
rpcsvc_request_t *req = NULL;
6456
char *errstr = NULL;
6457
dict_t *dict = NULL;
6462
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
6464
gf_msg_debug(THIS->name, 0,
6465
"Could not find peer %s(%s). "
6466
"Peer could have been deleted.",
6467
peerctx->peername, uuid_utoa(peerctx->peerid));
6472
req = peerctx->args.req;
6473
dict = peerctx->args.dict;
6474
errstr = peerctx->errstr;
6476
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_REMOVE_FRIEND,
6480
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_EVENT_NEW_GET_FAIL,
6481
"Unable to find the request for responding "
6483
peerinfo->hostname);
6487
glusterd_xfer_cli_probe_resp(req, -1, op_errno, errstr,
6488
peerinfo->hostname, peerinfo->port, dict);
6490
new_event->peername = gf_strdup(peerinfo->hostname);
6491
gf_uuid_copy(new_event->peerid, peerinfo->uuid);
6492
ret = glusterd_friend_sm_inject_event(new_event);
6495
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
6496
"Unable to create event for removing peer %s",
6497
peerinfo->hostname);
6506
__glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6507
rpc_clnt_event_t event, void *data)
6509
xlator_t *this = NULL;
6510
glusterd_conf_t *conf = NULL;
6512
int32_t op_errno = ENOTCONN;
6513
glusterd_peerinfo_t *peerinfo = NULL;
6514
glusterd_peerctx_t *peerctx = NULL;
6515
gf_boolean_t quorum_action = _gf_false;
6516
glusterd_volinfo_t *volinfo = NULL;
6517
glusterfs_ctx_t *ctx = NULL;
6524
case RPC_CLNT_DESTROY:
6525
GF_FREE(peerctx->errstr);
6526
GF_FREE(peerctx->peername);
6536
conf = this->private;
6538
GF_VALIDATE_OR_GOTO(this->name, ctx, out);
6539
if (ctx->cleanup_started) {
6540
gf_log(this->name, GF_LOG_INFO,
6541
"glusterd already received a SIGTERM, "
6542
"dropping the event %d for peer %s",
6543
event, peerctx->peername);
6548
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
6556
(RPC_CLNT_CONNECT == event) ? GF_LOG_CRITICAL : GF_LOG_DEBUG,
6557
ENOENT, GD_MSG_PEER_NOT_FOUND,
6558
"Could not find peer "
6560
peerctx->peername, uuid_utoa(peerctx->peerid));
6562
if (RPC_CLNT_CONNECT == event) {
6563
gf_event(EVENT_PEER_NOT_FOUND, "peer=%s;uuid=%s", peerctx->peername,
6564
uuid_utoa(peerctx->peerid));
6571
case RPC_CLNT_CONNECT: {
6572
gf_msg_debug(this->name, 0, "got RPC_CLNT_CONNECT");
6573
peerinfo->connected = 1;
6574
peerinfo->quorum_action = _gf_true;
6575
peerinfo->generation = uatomic_add_return(&conf->generation, 1);
6576
peerctx->peerinfo_gen = peerinfo->generation;
6582
if (!gf_uuid_is_null(peerinfo->uuid)) {
6583
gf_event(EVENT_PEER_CONNECT, "host=%s;uuid=%s",
6584
peerinfo->hostname, uuid_utoa(peerinfo->uuid));
6586
ret = glusterd_peer_dump_version(this, rpc, peerctx);
6588
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDSHAKE_FAILED,
6589
"glusterd handshake failed");
6593
case RPC_CLNT_DISCONNECT: {
6597
if (rpc_clnt_connection_status(&rpc->conn) ==
6598
RPC_STATUS_DISCONNECTED)
6601
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PEER_DISCONNECTED,
6602
"Peer <%s> (<%s>), in state <%s>, has disconnected "
6604
peerinfo->hostname, uuid_utoa(peerinfo->uuid),
6605
glusterd_friend_sm_state_name_get(peerinfo->state));
6606
gf_event(EVENT_PEER_DISCONNECT, "peer=%s;uuid=%s;state=%s",
6607
peerinfo->hostname, uuid_utoa(peerinfo->uuid),
6608
glusterd_friend_sm_state_name_get(peerinfo->state));
6610
if (peerinfo->connected) {
6611
cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
6613
ret = glusterd_mgmt_v3_unlock(volinfo->volname,
6614
peerinfo->uuid, "vol");
6616
gf_msg(this->name, GF_LOG_WARNING, 0,
6617
GD_MSG_MGMTV3_UNLOCK_FAIL,
6618
"Lock not released "
6623
op_errno = GF_PROBE_ANOTHER_CLUSTER;
6627
if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
6628
(peerinfo->state == GD_FRIEND_STATE_BEFRIENDED)) {
6629
peerinfo->quorum_contrib = QUORUM_DOWN;
6630
quorum_action = _gf_true;
6631
peerinfo->quorum_action = _gf_false;
6637
if (peerinfo->state == GD_FRIEND_STATE_DEFAULT) {
6638
glusterd_friend_remove_notify(peerctx, op_errno);
6642
peerinfo->connected = 0;
6647
gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
6655
glusterd_friend_sm();
6658
glusterd_do_quorum_action();
6663
glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6664
rpc_clnt_event_t event, void *data)
6666
return glusterd_big_locked_notify(rpc, mydata, event, data,
6667
__glusterd_peer_rpc_notify);
6671
glusterd_null(rpcsvc_request_t *req)
6676
static rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
6677
[GLUSTERD_MGMT_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
6679
[GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK",
6680
glusterd_handle_cluster_lock, NULL,
6681
GLUSTERD_MGMT_CLUSTER_LOCK, DRC_NA, 0},
6682
[GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
6683
glusterd_handle_cluster_unlock, NULL,
6684
GLUSTERD_MGMT_CLUSTER_UNLOCK, DRC_NA, 0},
6685
[GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_handle_stage_op, NULL,
6686
GLUSTERD_MGMT_STAGE_OP, DRC_NA, 0},
6687
[GLUSTERD_MGMT_COMMIT_OP] =
6690
glusterd_handle_commit_op,
6692
GLUSTERD_MGMT_COMMIT_OP,
6698
struct rpcsvc_program gd_svc_mgmt_prog = {
6699
.progname = "GlusterD svc mgmt",
6700
.prognum = GD_MGMT_PROGRAM,
6701
.progver = GD_MGMT_VERSION,
6702
.numactors = GLUSTERD_MGMT_MAXVALUE,
6703
.actors = gd_svc_mgmt_actors,
6704
.synctask = _gf_true,
6707
static rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
6708
[GLUSTERD_FRIEND_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
6710
[GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", glusterd_handle_probe_query, NULL,
6711
GLUSTERD_PROBE_QUERY, DRC_NA, 0},
6712
[GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", glusterd_handle_incoming_friend_req,
6713
NULL, GLUSTERD_FRIEND_ADD, DRC_NA, 0},
6714
[GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE",
6715
glusterd_handle_incoming_unfriend_req, NULL,
6716
GLUSTERD_FRIEND_REMOVE, DRC_NA, 0},
6717
[GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd_handle_friend_update,
6718
NULL, GLUSTERD_FRIEND_UPDATE, DRC_NA, 0},
6721
struct rpcsvc_program gd_svc_peer_prog = {
6722
.progname = "GlusterD svc peer",
6723
.prognum = GD_FRIEND_PROGRAM,
6724
.progver = GD_FRIEND_VERSION,
6725
.numactors = GLUSTERD_FRIEND_MAXVALUE,
6726
.actors = gd_svc_peer_actors,
6727
.synctask = _gf_false,
6730
static rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
6731
[GLUSTER_CLI_PROBE] = {"CLI_PROBE", glusterd_handle_cli_probe, NULL,
6732
GLUSTER_CLI_PROBE, DRC_NA, 0},
6733
[GLUSTER_CLI_CREATE_VOLUME] = {"CLI_CREATE_VOLUME",
6734
glusterd_handle_create_volume, NULL,
6735
GLUSTER_CLI_CREATE_VOLUME, DRC_NA, 0},
6736
[GLUSTER_CLI_DEFRAG_VOLUME] = {"CLI_DEFRAG_VOLUME",
6737
glusterd_handle_defrag_volume, NULL,
6738
GLUSTER_CLI_DEFRAG_VOLUME, DRC_NA, 0},
6739
[GLUSTER_CLI_DEPROBE] = {"FRIEND_REMOVE", glusterd_handle_cli_deprobe, NULL,
6740
GLUSTER_CLI_DEPROBE, DRC_NA, 0},
6741
[GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
6742
glusterd_handle_cli_list_friends, NULL,
6743
GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
6744
[GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", glusterd_handle_cli_uuid_reset,
6745
NULL, GLUSTER_CLI_UUID_RESET, DRC_NA, 0},
6746
[GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
6747
GLUSTER_CLI_UUID_GET, DRC_NA, 0},
6748
[GLUSTER_CLI_START_VOLUME] = {"START_VOLUME",
6749
glusterd_handle_cli_start_volume, NULL,
6750
GLUSTER_CLI_START_VOLUME, DRC_NA, 0},
6751
[GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", glusterd_handle_cli_stop_volume,
6752
NULL, GLUSTER_CLI_STOP_VOLUME, DRC_NA, 0},
6753
[GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME",
6754
glusterd_handle_cli_delete_volume, NULL,
6755
GLUSTER_CLI_DELETE_VOLUME, DRC_NA, 0},
6756
[GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
6757
NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
6758
[GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", glusterd_handle_add_brick, NULL,
6759
GLUSTER_CLI_ADD_BRICK, DRC_NA, 0},
6760
[GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", glusterd_handle_attach_tier,
6761
NULL, GLUSTER_CLI_ATTACH_TIER, DRC_NA, 0},
6762
[GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK",
6763
glusterd_handle_replace_brick, NULL,
6764
GLUSTER_CLI_REPLACE_BRICK, DRC_NA, 0},
6765
[GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", glusterd_handle_remove_brick,
6766
NULL, GLUSTER_CLI_REMOVE_BRICK, DRC_NA, 0},
6767
[GLUSTER_CLI_LOG_ROTATE] = {"LOG FILENAME", glusterd_handle_log_rotate,
6768
NULL, GLUSTER_CLI_LOG_ROTATE, DRC_NA, 0},
6769
[GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", glusterd_handle_set_volume, NULL,
6770
GLUSTER_CLI_SET_VOLUME, DRC_NA, 0},
6771
[GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", glusterd_handle_sync_volume,
6772
NULL, GLUSTER_CLI_SYNC_VOLUME, DRC_NA, 0},
6773
[GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", glusterd_handle_reset_volume,
6774
NULL, GLUSTER_CLI_RESET_VOLUME, DRC_NA, 0},
6775
[GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", glusterd_handle_fsm_log, NULL,
6776
GLUSTER_CLI_FSM_LOG, DRC_NA, 0},
6777
[GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", glusterd_handle_gsync_set, NULL,
6778
GLUSTER_CLI_GSYNC_SET, DRC_NA, 0},
6779
[GLUSTER_CLI_PROFILE_VOLUME] = {"STATS_VOLUME",
6780
glusterd_handle_cli_profile_volume, NULL,
6781
GLUSTER_CLI_PROFILE_VOLUME, DRC_NA, 0},
6782
[GLUSTER_CLI_QUOTA] = {"QUOTA", glusterd_handle_quota, NULL,
6783
GLUSTER_CLI_QUOTA, DRC_NA, 0},
6784
[GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
6785
GLUSTER_CLI_GETWD, DRC_NA, 1},
6786
[GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
6787
glusterd_handle_status_volume, NULL,
6788
GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
6789
[GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
6790
GLUSTER_CLI_MOUNT, DRC_NA, 1},
6791
[GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
6792
GLUSTER_CLI_UMOUNT, DRC_NA, 1},
6793
[GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", glusterd_handle_cli_heal_volume,
6794
NULL, GLUSTER_CLI_HEAL_VOLUME, DRC_NA, 0},
6795
[GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME",
6796
glusterd_handle_cli_statedump_volume,
6797
NULL, GLUSTER_CLI_STATEDUMP_VOLUME,
6799
[GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
6800
NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
6801
[GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME",
6802
glusterd_handle_cli_clearlocks_volume,
6803
NULL, GLUSTER_CLI_CLRLOCKS_VOLUME, DRC_NA,
6805
[GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", glusterd_handle_copy_file, NULL,
6806
GLUSTER_CLI_COPY_FILE, DRC_NA, 0},
6807
[GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", glusterd_handle_sys_exec, NULL,
6808
GLUSTER_CLI_SYS_EXEC, DRC_NA, 0},
6809
[GLUSTER_CLI_SNAP] = {"SNAP", glusterd_handle_snapshot, NULL,
6810
GLUSTER_CLI_SNAP, DRC_NA, 0},
6811
[GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", glusterd_handle_barrier,
6812
NULL, GLUSTER_CLI_BARRIER_VOLUME, DRC_NA,
6814
[GLUSTER_CLI_GANESHA] = {"GANESHA", glusterd_handle_ganesha_cmd, NULL,
6815
GLUSTER_CLI_GANESHA, DRC_NA, 0},
6816
[GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", glusterd_handle_get_vol_opt,
6818
[GLUSTER_CLI_BITROT] = {"BITROT", glusterd_handle_bitrot, NULL,
6819
GLUSTER_CLI_BITROT, DRC_NA, 0},
6820
[GLUSTER_CLI_GET_STATE] = {"GET_STATE", glusterd_handle_get_state, NULL,
6821
GLUSTER_CLI_GET_STATE, DRC_NA, 0},
6822
[GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", glusterd_handle_reset_brick,
6823
NULL, GLUSTER_CLI_RESET_BRICK, DRC_NA, 0},
6824
[GLUSTER_CLI_TIER] = {"TIER", glusterd_handle_tier, NULL, GLUSTER_CLI_TIER,
6826
[GLUSTER_CLI_REMOVE_TIER_BRICK] = {"REMOVE_TIER_BRICK",
6827
glusterd_handle_tier, NULL,
6828
GLUSTER_CLI_REMOVE_TIER_BRICK, DRC_NA,
6830
[GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK",
6831
glusterd_handle_add_tier_brick, NULL,
6832
GLUSTER_CLI_ADD_TIER_BRICK, DRC_NA, 0},
6835
struct rpcsvc_program gd_svc_cli_prog = {
6836
.progname = "GlusterD svc cli",
6837
.prognum = GLUSTER_CLI_PROGRAM,
6838
.progver = GLUSTER_CLI_VERSION,
6839
.numactors = GLUSTER_CLI_MAXVALUE,
6840
.actors = gd_svc_cli_actors,
6841
.synctask = _gf_true,
6849
static rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
6850
[GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
6851
glusterd_handle_cli_list_friends, NULL,
6852
GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
6853
[GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
6854
GLUSTER_CLI_UUID_GET, DRC_NA, 0},
6855
[GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
6856
NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
6857
[GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
6858
GLUSTER_CLI_GETWD, DRC_NA, 1},
6859
[GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
6860
glusterd_handle_status_volume, NULL,
6861
GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
6862
[GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
6863
NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
6864
[GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
6865
GLUSTER_CLI_MOUNT, DRC_NA, 1},
6866
[GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
6867
GLUSTER_CLI_UMOUNT, DRC_NA, 1},
6870
struct rpcsvc_program gd_svc_cli_trusted_progs = {
6871
.progname = "GlusterD svc cli read-only",
6872
.prognum = GLUSTER_CLI_PROGRAM,
6873
.progver = GLUSTER_CLI_VERSION,
6874
.numactors = GLUSTER_CLI_MAXVALUE,
6875
.actors = gd_svc_cli_trusted_actors,
6876
.synctask = _gf_true,
6885
glusterd_handle_tier(rpcsvc_request_t *req)