2
Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
3
This file is part of GlusterFS.
5
This file is licensed to you under your choice of the GNU Lesser
6
General Public License, version 3 or any later version (LGPLv3 or
7
later), or the GNU General Public License, version 2 (GPLv2), in all
8
cases as published by the Free Software Foundation.
13
#include <glusterfs/compat-errno.h>
14
#include "glusterd-op-sm.h"
15
#include "glusterd-sm.h"
16
#include "glusterd-utils.h"
17
#include <glusterfs/common-utils.h>
18
#include "glusterd-messages.h"
19
#include "glusterd-snapshot-utils.h"
22
#define SERVER_PATH_MAX (16 * 1024)
24
#define GLUSTERD_STACK_DESTROY(frame) \
26
frame->local = NULL; \
27
STACK_DESTROY(frame->root); \
30
extern glusterd_op_info_t opinfo;
31
extern uuid_t global_txn_id;
34
glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret,
35
int32_t op_errno, rpcsvc_request_t *req,
36
void *op_ctx, char *op_errstr)
41
char *free_ptr = NULL;
42
glusterd_conf_t *conf = NULL;
43
xdrproc_t xdrproc = NULL;
50
xlator_t *this = THIS;
59
case GD_OP_REMOVE_BRICK: {
61
ret = dict_get_str(ctx, "errstr", &errstr);
64
case GD_OP_RESET_VOLUME: {
65
if (op_ret && !op_errstr)
66
errstr = "Error while resetting options";
70
case GD_OP_DEFRAG_BRICK_VOLUME: {
72
ret = dict_get_int32(ctx, "status", &status);
74
gf_msg_trace(this->name, 0, "failed to get status");
79
case GD_OP_GSYNC_CREATE:
80
case GD_OP_GSYNC_SET: {
82
ret = dict_get_str(ctx, "errstr", &errstr);
83
ret = dict_set_str_sizen(ctx, "glusterd_workdir",
85
/* swallow error here, that will be re-triggered in cli */
89
case GD_OP_PROFILE_VOLUME: {
90
if (ctx && dict_get_int32(ctx, "count", &count)) {
91
ret = dict_set_int32_sizen(ctx, "count", 0);
93
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
94
"failed to set count in dictionary");
99
case GD_OP_START_BRICK:
100
case GD_OP_STOP_BRICK: {
101
gf_msg_debug(this->name, 0, "op '%s' not supported",
107
gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_OP_UNSUPPORTED,
108
"invalid operation");
111
case GD_OP_CREATE_VOLUME:
112
case GD_OP_START_VOLUME:
113
case GD_OP_STOP_VOLUME:
114
case GD_OP_DELETE_VOLUME:
115
case GD_OP_DEFRAG_VOLUME:
116
case GD_OP_ADD_BRICK:
117
case GD_OP_LOG_ROTATE:
118
case GD_OP_SYNC_VOLUME:
119
case GD_OP_STATEDUMP_VOLUME:
120
case GD_OP_REPLACE_BRICK:
121
case GD_OP_STATUS_VOLUME:
122
case GD_OP_SET_VOLUME:
123
case GD_OP_LIST_VOLUME:
124
case GD_OP_CLEARLOCKS_VOLUME:
125
case GD_OP_HEAL_VOLUME:
130
case GD_OP_SCRUB_STATUS:
131
case GD_OP_SCRUB_ONDEMAND:
132
case GD_OP_RESET_BRICK:
133
case GD_OP_MAX_OPVERSION:
134
case GD_OP_DETACH_NOT_STARTED:
136
case GD_OP_DETACH_TIER:
137
case GD_OP_TIER_MIGRATE:
138
case GD_OP_TIER_START_STOP:
139
case GD_OP_TIER_STATUS:
140
case GD_OP_DETACH_TIER_STATUS:
141
case GD_OP_REMOVE_TIER_BRICK:
142
case GD_OP_ADD_TIER_BRICK:
145
/*nothing specific to be done*/
148
case GD_OP_COPY_FILE: {
150
ret = dict_get_str(ctx, "errstr", &errstr);
153
case GD_OP_SYS_EXEC: {
155
ret = dict_get_str(ctx, "errstr", &errstr);
156
ret = dict_set_str_sizen(ctx, "glusterd_workdir",
164
rsp.op_errno = op_errno;
167
rsp.op_errstr = errstr;
169
rsp.op_errstr = op_errstr;
175
ret = dict_allocate_and_serialize(ctx, &rsp.dict.dict_val,
178
gf_smsg(this->name, GF_LOG_ERROR, errno,
179
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
181
free_ptr = rsp.dict.dict_val;
184
/* needed by 'rebalance status' */
186
rsp.op_errno = status;
189
xdrproc = (xdrproc_t)xdr_gf_cli_rsp;
191
glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, xdrproc, ctx);
195
gf_msg_debug(this->name, 0, "Returning %d", ret);
200
glusterd_big_locked_cbk(struct rpc_req *req, struct iovec *iov, int count,
201
void *myframe, fop_cbk_fn_t fn)
203
glusterd_conf_t *priv = THIS->private;
206
synclock_lock(&priv->big_lock);
207
ret = fn(req, iov, count, myframe);
208
synclock_unlock(&priv->big_lock);
214
__glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
217
gd1_mgmt_probe_rsp rsp = {
221
glusterd_peerinfo_t *peerinfo = NULL;
222
glusterd_friend_sm_event_t *event = NULL;
223
glusterd_probe_ctx_t *ctx = NULL;
224
xlator_t *this = THIS;
226
if (-1 == req->rpc_status) {
230
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
232
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL, "error");
234
// rsp.op_errno = EINVAL;
238
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PROBE_REQ_RESP_RCVD,
239
"Received probe resp from uuid: %s, host: %s", uuid_utoa(rsp.uuid),
241
if (rsp.op_ret != 0) {
242
ctx = ((call_frame_t *)myframe)->local;
243
((call_frame_t *)myframe)->local = NULL;
248
glusterd_xfer_cli_probe_resp(ctx->req, rsp.op_ret, rsp.op_errno,
249
rsp.op_errstr, ctx->hostname,
250
ctx->port, ctx->dict);
253
glusterd_destroy_probe_ctx(ctx);
254
(void)glusterd_friend_remove(rsp.uuid, rsp.hostname);
260
peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
261
if (peerinfo == NULL) {
264
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
265
"Could not find peerd %s(%s)", rsp.hostname,
266
uuid_utoa(rsp.uuid));
271
* In the case of a fresh probe rsp.uuid and peerinfo.uuid will not
272
* match, as peerinfo->uuid will be NULL.
274
* In the case of a peer probe being done to add a new network to a
275
* peer, rsp.uuid will match an existing peerinfo.uuid. If we have this
276
* stage it means that the current address/hostname being used isn't
277
* present in the found peerinfo. If it were, we would have found out
278
* earlier in the probe process and wouldn't even reach till here. So,
279
* we need to add the new hostname to the peer.
281
* This update should only be done when an explicit CLI probe
282
* command was used to begin the probe process.
284
if (gf_uuid_compare(rsp.uuid, peerinfo->uuid) == 0) {
285
ctx = ((call_frame_t *)myframe)->local;
286
/* Presence of ctx->req implies this probe was started by a cli
289
if (ctx->req == NULL)
292
gf_msg_debug(this->name, 0,
293
"Adding address '%s' to "
295
rsp.hostname, uuid_utoa(rsp.uuid));
297
ret = glusterd_friend_remove(NULL, rsp.hostname);
299
gf_msg(this->name, GF_LOG_ERROR, 0,
300
GD_MSG_STALE_PEERINFO_REMOVE_FAIL,
302
"stale peerinfo with name %s",
307
ret = glusterd_peer_hostname_update(peerinfo, rsp.hostname, _gf_false);
309
gf_msg(this->name, GF_LOG_ERROR, 0,
310
GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
311
"Couldn't add hostname to peer list");
315
/* Injecting EVENT_NEW_NAME to send update */
316
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_NEW_NAME, &event);
318
event->peername = gf_strdup(peerinfo->hostname);
319
gf_uuid_copy(event->peerid, peerinfo->uuid);
321
ret = glusterd_friend_sm_inject_event(event);
323
rsp.op_errno = GF_PROBE_FRIEND;
326
ctx = ((call_frame_t *)myframe)->local;
327
((call_frame_t *)myframe)->local = NULL;
335
glusterd_xfer_cli_probe_resp(ctx->req, ret, rsp.op_errno,
336
rsp.op_errstr, ctx->hostname,
337
ctx->port, ctx->dict);
340
glusterd_destroy_probe_ctx(ctx);
344
} else if (strncasecmp(rsp.hostname, peerinfo->hostname, 1024)) {
345
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_HOST_PRESENT_ALREADY,
346
"Host: %s with uuid: %s "
347
"already present in cluster with alias hostname: %s",
348
rsp.hostname, uuid_utoa(rsp.uuid), peerinfo->hostname);
350
ctx = ((call_frame_t *)myframe)->local;
351
((call_frame_t *)myframe)->local = NULL;
358
rsp.op_errno = GF_PROBE_FRIEND;
360
glusterd_xfer_cli_probe_resp(ctx->req, rsp.op_ret, rsp.op_errno,
361
rsp.op_errstr, ctx->hostname,
362
ctx->port, ctx->dict);
365
glusterd_destroy_probe_ctx(ctx);
366
(void)glusterd_friend_remove(NULL, rsp.hostname);
373
gf_uuid_copy(peerinfo->uuid, rsp.uuid);
375
ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
379
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_NEW_FRIEND_SM_EVENT_GET_FAIL,
380
"Unable to get event");
384
event->peername = gf_strdup(peerinfo->hostname);
385
gf_uuid_copy(event->peerid, peerinfo->uuid);
387
event->ctx = ((call_frame_t *)myframe)->local;
388
((call_frame_t *)myframe)->local = NULL;
389
ret = glusterd_friend_sm_inject_event(event);
391
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_REQ_RESP_RCVD,
392
"Received resp to probe req");
398
free(rsp.hostname); // malloced by xdr
399
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
401
/* Attempt to start the state machine. Needed as no state machine could
402
* be running at time this RPC reply was received
405
glusterd_friend_sm();
413
glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
416
return glusterd_big_locked_cbk(req, iov, count, myframe,
417
__glusterd_probe_cbk);
421
__glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
424
gd1_mgmt_friend_rsp rsp = {
428
glusterd_friend_sm_event_t *event = NULL;
429
glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
430
glusterd_peerinfo_t *peerinfo = NULL;
432
int32_t op_errno = EINVAL;
433
glusterd_probe_ctx_t *ctx = NULL;
434
glusterd_friend_update_ctx_t *ev_ctx = NULL;
436
if (-1 == req->rpc_status) {
438
rsp.op_errno = EINVAL;
442
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
444
gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_RES_DECODE_FAIL,
447
rsp.op_errno = EINVAL;
452
op_errno = rsp.op_errno;
454
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
455
"Received %s from uuid: %s, host: %s, port: %d",
456
(op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
461
peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
462
if (peerinfo == NULL) {
465
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
466
"received friend add response from"
467
" unknown peer uuid: %s",
468
uuid_utoa(rsp.uuid));
473
event_type = GD_FRIEND_EVENT_RCVD_RJT;
475
event_type = GD_FRIEND_EVENT_RCVD_ACC;
477
ret = glusterd_friend_sm_new_event(event_type, &event);
480
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
481
"Unable to get event");
485
ev_ctx = GF_CALLOC(1, sizeof(*ev_ctx), gf_gld_mt_friend_update_ctx_t);
491
gf_uuid_copy(ev_ctx->uuid, rsp.uuid);
492
ev_ctx->hostname = gf_strdup(rsp.hostname);
494
event->peername = gf_strdup(peerinfo->hostname);
495
gf_uuid_copy(event->peerid, peerinfo->uuid);
497
ret = glusterd_friend_sm_inject_event(event);
502
ctx = ((call_frame_t *)myframe)->local;
503
((call_frame_t *)myframe)->local = NULL;
505
if (ctx && ctx->req) {
506
/*reverse probe doesn't have req*/
507
ret = glusterd_xfer_cli_probe_resp(ctx->req, op_ret, op_errno, NULL,
508
ctx->hostname, ctx->port, ctx->dict);
511
glusterd_friend_sm();
516
glusterd_destroy_probe_ctx(ctx);
517
free(rsp.hostname); // malloced by xdr
518
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
523
glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
526
return glusterd_big_locked_cbk(req, iov, count, myframe,
527
__glusterd_friend_add_cbk);
531
__glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
534
gd1_mgmt_friend_rsp rsp = {
537
glusterd_conf_t *conf = NULL;
539
glusterd_friend_sm_event_t *event = NULL;
540
glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
541
glusterd_peerinfo_t *peerinfo = NULL;
543
int32_t op_errno = 0;
544
glusterd_probe_ctx_t *ctx = NULL;
545
gf_boolean_t move_sm_now = _gf_true;
547
conf = THIS->private;
550
ctx = ((call_frame_t *)myframe)->local;
551
((call_frame_t *)myframe)->local = NULL;
553
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
554
"Unable to get glusterd probe context");
557
if (-1 == req->rpc_status) {
559
rsp.op_errno = EINVAL;
560
move_sm_now = _gf_false;
564
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
566
gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_RES_DECODE_FAIL,
569
rsp.op_errno = EINVAL;
574
op_errno = rsp.op_errno;
576
gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
577
"Received %s from uuid: %s, host: %s, port: %d",
578
(op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
584
peerinfo = glusterd_peerinfo_find(rsp.uuid, ctx->hostname);
585
if (peerinfo == NULL) {
586
// can happen as part of rpc clnt connection cleanup
587
// when the frame timeout happens after 30 minutes
591
event_type = GD_FRIEND_EVENT_REMOVE_FRIEND;
593
ret = glusterd_friend_sm_new_event(event_type, &event);
596
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
597
"Unable to get event");
600
event->peername = gf_strdup(peerinfo->hostname);
601
gf_uuid_copy(event->peerid, peerinfo->uuid);
603
ret = glusterd_friend_sm_inject_event(event);
608
/*friend_sm would be moved on CLNT_DISCONNECT, consequently
609
cleaning up peerinfo. Else, we run the risk of triggering
610
a clnt_destroy within saved_frames_unwind.
618
ret = glusterd_xfer_cli_deprobe_resp(ctx->req, op_ret, op_errno, NULL,
619
ctx->hostname, ctx->dict);
620
if (!ret && move_sm_now) {
621
glusterd_friend_sm();
625
glusterd_broadcast_friend_delete(ctx->hostname, NULL);
626
glusterd_destroy_probe_ctx(ctx);
628
free(rsp.hostname); // malloced by xdr
629
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
634
glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
637
return glusterd_big_locked_cbk(req, iov, count, myframe,
638
__glusterd_friend_remove_cbk);
642
__glusterd_friend_update_cbk(struct rpc_req *req, struct iovec *iov, int count,
646
gd1_mgmt_friend_update_rsp rsp = {
649
xlator_t *this = THIS;
653
if (-1 == req->rpc_status) {
654
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE, "RPC Error");
658
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
660
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
661
"Failed to serialize friend"
668
gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
669
"Received %s from uuid: %s", (ret) ? "RJT" : "ACC",
670
uuid_utoa(rsp.uuid));
672
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
677
glusterd_friend_update_cbk(struct rpc_req *req, struct iovec *iov, int count,
680
return glusterd_big_locked_cbk(req, iov, count, myframe,
681
__glusterd_friend_update_cbk);
685
__glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
688
gd1_mgmt_cluster_lock_rsp rsp = {
693
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
694
xlator_t *this = THIS;
695
uuid_t *txn_id = NULL;
696
glusterd_conf_t *priv = NULL;
697
char *err_str = NULL;
699
priv = this->private;
703
txn_id = &priv->global_txn_id;
705
if (-1 == req->rpc_status) {
706
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_LOCK_RESP_FROM_PEER,
707
"Lock response is not "
708
"received from one of the peer");
709
err_str = "Lock response is not received from one of the peer";
710
glusterd_set_opinfo(err_str, ENETRESET, -1);
711
event_type = GD_OP_EVENT_RCVD_RJT;
715
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
717
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
719
"cluster lock response received from peer");
721
"Failed to decode cluster lock response received from"
723
glusterd_set_opinfo(err_str, EINVAL, -1);
724
event_type = GD_OP_EVENT_RCVD_RJT;
731
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_LOCK_FROM_UUID_REJCT,
732
"Received lock RJT from uuid: %s", uuid_utoa(rsp.uuid));
734
gf_msg_debug(this->name, 0, "Received lock ACC from uuid: %s",
735
uuid_utoa(rsp.uuid));
739
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
743
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
744
"cluster lock response received from unknown peer: %s."
746
uuid_utoa(rsp.uuid));
747
err_str = "cluster lock response received from unknown peer";
752
event_type = GD_OP_EVENT_RCVD_RJT;
753
opinfo.op_ret = op_ret;
754
opinfo.op_errstr = gf_strdup(
755
"Another transaction could be in "
756
"progress. Please try again after"
759
event_type = GD_OP_EVENT_RCVD_ACC;
764
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
766
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
768
"transaction's opinfo");
770
ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
773
glusterd_friend_sm();
777
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
782
glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
785
return glusterd_big_locked_cbk(req, iov, count, myframe,
786
__glusterd_cluster_lock_cbk);
790
glusterd_set_opinfo(char *errstr, int32_t op_errno, int32_t op_ret)
792
opinfo.op_errstr = gf_strdup(errstr);
793
opinfo.op_errno = op_errno;
794
opinfo.op_ret = op_ret;
798
glusterd_mgmt_v3_lock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
799
int count, void *myframe)
801
gd1_mgmt_v3_lock_rsp rsp = {
806
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
807
xlator_t *this = THIS;
808
call_frame_t *frame = NULL;
809
uuid_t *txn_id = NULL;
810
char *err_str = NULL;
815
txn_id = frame->cookie;
816
frame->cookie = NULL;
818
if (-1 == req->rpc_status) {
819
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_LOCK_RESP_FROM_PEER,
820
"Lock response is not "
821
"received from one of the peer");
822
err_str = "Lock response is not received from one of the peer";
823
glusterd_set_opinfo(err_str, ENETRESET, -1);
824
event_type = GD_OP_EVENT_RCVD_RJT;
828
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
830
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
832
"mgmt_v3 lock response received from peer");
834
"Failed to decode mgmt_v3 lock response received from"
836
glusterd_set_opinfo(err_str, EINVAL, -1);
837
event_type = GD_OP_EVENT_RCVD_RJT;
843
txn_id = &rsp.txn_id;
846
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_FROM_UUID_REJCT,
847
"Received mgmt_v3 lock RJT from uuid: %s", uuid_utoa(rsp.uuid));
849
gf_msg_debug(this->name, 0, "Received mgmt_v3 lock ACC from uuid: %s",
850
uuid_utoa(rsp.uuid));
854
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
858
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
859
"mgmt_v3 lock response received "
860
"from unknown peer: %s. Ignoring response",
861
uuid_utoa(rsp.uuid));
866
event_type = GD_OP_EVENT_RCVD_RJT;
867
opinfo.op_ret = op_ret;
868
opinfo.op_errstr = gf_strdup(
869
"Another transaction could be in "
870
"progress. Please try again after"
873
event_type = GD_OP_EVENT_RCVD_ACC;
878
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
880
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
882
"transaction's opinfo");
884
ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
886
glusterd_friend_sm();
890
GF_FREE(frame->cookie);
891
GLUSTERD_STACK_DESTROY(frame);
896
glusterd_mgmt_v3_lock_peers_cbk(struct rpc_req *req, struct iovec *iov,
897
int count, void *myframe)
899
return glusterd_big_locked_cbk(req, iov, count, myframe,
900
glusterd_mgmt_v3_lock_peers_cbk_fn);
904
glusterd_mgmt_v3_unlock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
905
int count, void *myframe)
907
gd1_mgmt_v3_unlock_rsp rsp = {
912
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
913
xlator_t *this = THIS;
914
call_frame_t *frame = NULL;
915
uuid_t *txn_id = NULL;
916
char *err_str = NULL;
921
txn_id = frame->cookie;
922
frame->cookie = NULL;
924
if (-1 == req->rpc_status) {
925
err_str = "Unlock response not received from one of the peer.";
926
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
927
"UnLock response is not received from one of the peer");
928
glusterd_set_opinfo(err_str, 0, 0);
929
event_type = GD_OP_EVENT_RCVD_RJT;
933
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
935
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
936
"Failed to decode mgmt_v3 unlock response received from"
939
"Failed to decode mgmt_v3 unlock response received "
941
glusterd_set_opinfo(err_str, 0, 0);
942
event_type = GD_OP_EVENT_RCVD_RJT;
948
txn_id = &rsp.txn_id;
952
this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FROM_UUID_REJCT,
953
"Received mgmt_v3 unlock RJT from uuid: %s", uuid_utoa(rsp.uuid));
955
gf_msg_debug(this->name, 0, "Received mgmt_v3 unlock ACC from uuid: %s",
956
uuid_utoa(rsp.uuid));
960
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
964
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
965
"mgmt_v3 unlock response received "
966
"from unknown peer: %s. Ignoring response",
967
uuid_utoa(rsp.uuid));
972
event_type = GD_OP_EVENT_RCVD_RJT;
973
opinfo.op_ret = op_ret;
974
opinfo.op_errstr = gf_strdup(
975
"Another transaction could be in "
976
"progress. Please try again after"
979
event_type = GD_OP_EVENT_RCVD_ACC;
984
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
986
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
988
"transaction's opinfo");
990
ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
993
glusterd_friend_sm();
997
GF_FREE(frame->cookie);
998
GLUSTERD_STACK_DESTROY(frame);
1003
glusterd_mgmt_v3_unlock_peers_cbk(struct rpc_req *req, struct iovec *iov,
1004
int count, void *myframe)
1006
return glusterd_big_locked_cbk(req, iov, count, myframe,
1007
glusterd_mgmt_v3_unlock_peers_cbk_fn);
1011
__glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
1014
gd1_mgmt_cluster_lock_rsp rsp = {
1018
int32_t op_ret = -1;
1019
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
1020
xlator_t *this = THIS;
1021
uuid_t *txn_id = NULL;
1022
glusterd_conf_t *priv = NULL;
1023
char *err_str = NULL;
1025
priv = this->private;
1029
txn_id = &priv->global_txn_id;
1031
if (-1 == req->rpc_status) {
1032
err_str = "Unlock response not received from one of the peer.";
1033
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
1034
"UnLock response is not received from one of the peer");
1035
glusterd_set_opinfo(err_str, 0, 0);
1036
event_type = GD_OP_EVENT_RCVD_RJT;
1040
ret = xdr_to_generic(*iov, &rsp,
1041
(xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
1043
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
1044
"Failed to decode unlock response received from peer");
1046
"Failed to decode cluster unlock response received "
1048
glusterd_set_opinfo(err_str, 0, 0);
1049
event_type = GD_OP_EVENT_RCVD_RJT;
1053
op_ret = rsp.op_ret;
1056
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNLOCK_FROM_UUID_REJCT,
1057
"Received unlock RJT from uuid: %s", uuid_utoa(rsp.uuid));
1059
gf_msg_debug(this->name, 0, "Received unlock ACC from uuid: %s",
1060
uuid_utoa(rsp.uuid));
1064
ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
1068
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
1069
"Unlock response received from unknown peer %s",
1070
uuid_utoa(rsp.uuid));
1075
event_type = GD_OP_EVENT_RCVD_RJT;
1076
opinfo.op_ret = op_ret;
1078
event_type = GD_OP_EVENT_RCVD_ACC;
1083
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
1085
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
1087
"transaction's opinfo");
1089
ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
1092
glusterd_friend_sm();
1096
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
1101
glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
1104
return glusterd_big_locked_cbk(req, iov, count, myframe,
1105
__glusterd_cluster_unlock_cbk);
1109
__glusterd_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1112
gd1_mgmt_stage_op_rsp rsp = {
1116
int32_t op_ret = -1;
1117
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
1118
glusterd_peerinfo_t *peerinfo = NULL;
1119
dict_t *dict = NULL;
1120
char *peer_str = NULL;
1121
xlator_t *this = THIS;
1122
uuid_t *txn_id = NULL;
1123
call_frame_t *frame = NULL;
1129
txn_id = frame->cookie;
1131
if (-1 == req->rpc_status) {
1133
rsp.op_errno = EINVAL;
1134
/* use standard allocation because to keep uniformity
1136
rsp.op_errstr = strdup("error");
1140
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
1142
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
1143
"Failed to decode stage "
1144
"response received from peer");
1146
rsp.op_errno = EINVAL;
1147
/* use standard allocation because to keep uniformity
1149
rsp.op_errstr = strdup(
1150
"Failed to decode stage response "
1151
"received from peer.");
1155
if (rsp.dict.dict_len) {
1156
/* Unserialize the dictionary */
1159
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
1161
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1163
"unserialize rsp-buffer to dictionary");
1164
event_type = GD_OP_EVENT_RCVD_RJT;
1167
dict->extra_stdfree = rsp.dict.dict_val;
1172
op_ret = rsp.op_ret;
1175
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAGE_FROM_UUID_REJCT,
1176
"Received stage RJT from uuid: %s", uuid_utoa(rsp.uuid));
1178
gf_msg_debug(this->name, 0, "Received stage ACC from uuid: %s",
1179
uuid_utoa(rsp.uuid));
1183
peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
1184
if (peerinfo == NULL) {
1185
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
1186
"Stage response received "
1187
"from unknown peer: %s. Ignoring response.",
1188
uuid_utoa(rsp.uuid));
1192
event_type = GD_OP_EVENT_RCVD_RJT;
1193
opinfo.op_ret = op_ret;
1194
if (strcmp("", rsp.op_errstr)) {
1195
opinfo.op_errstr = gf_strdup(rsp.op_errstr);
1198
peer_str = peerinfo->hostname;
1200
peer_str = uuid_utoa(rsp.uuid);
1202
snprintf(err_str, sizeof(err_str), OPERRSTR_STAGE_FAIL, peer_str);
1203
opinfo.op_errstr = gf_strdup(err_str);
1206
event_type = GD_OP_EVENT_RCVD_ACC;
1211
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
1213
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
1215
"transaction's opinfo");
1217
ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
1220
glusterd_friend_sm();
1224
free(rsp.op_errstr); // malloced by xdr
1226
if (!dict->extra_stdfree && rsp.dict.dict_val)
1227
free(rsp.dict.dict_val); // malloced by xdr
1230
free(rsp.dict.dict_val); // malloced by xdr
1232
GF_FREE(frame->cookie);
1233
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
1238
glusterd_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1241
return glusterd_big_locked_cbk(req, iov, count, myframe,
1242
__glusterd_stage_op_cbk);
1246
__glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1249
gd1_mgmt_commit_op_rsp rsp = {
1253
int32_t op_ret = -1;
1254
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
1255
glusterd_peerinfo_t *peerinfo = NULL;
1256
dict_t *dict = NULL;
1257
char *peer_str = NULL;
1258
xlator_t *this = THIS;
1259
uuid_t *txn_id = NULL;
1260
glusterd_op_info_t txn_op_info = {
1261
GD_OP_STATE_DEFAULT,
1263
call_frame_t *frame = NULL;
1269
txn_id = frame->cookie;
1271
if (-1 == req->rpc_status) {
1273
rsp.op_errno = EINVAL;
1274
/* use standard allocation because to keep uniformity
1276
rsp.op_errstr = strdup("error");
1277
event_type = GD_OP_EVENT_RCVD_RJT;
1281
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
1283
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
1284
"Failed to decode commit "
1285
"response received from peer");
1287
rsp.op_errno = EINVAL;
1288
/* use standard allocation because to keep uniformity
1290
rsp.op_errstr = strdup(
1291
"Failed to decode commit response "
1292
"received from peer.");
1293
event_type = GD_OP_EVENT_RCVD_RJT;
1297
if (rsp.dict.dict_len) {
1298
/* Unserialize the dictionary */
1301
ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
1303
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1305
"unserialize rsp-buffer to dictionary");
1306
event_type = GD_OP_EVENT_RCVD_RJT;
1309
dict->extra_stdfree = rsp.dict.dict_val;
1313
op_ret = rsp.op_ret;
1316
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_FROM_UUID_REJCT,
1317
"Received commit RJT from uuid: %s", uuid_utoa(rsp.uuid));
1319
gf_msg_debug(this->name, 0, "Received commit ACC from uuid: %s",
1320
uuid_utoa(rsp.uuid));
1323
ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
1325
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_GET_FAIL,
1326
"Failed to get txn_op_info "
1328
uuid_utoa(*txn_id));
1332
peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
1333
if (peerinfo == NULL) {
1334
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
1335
"Commit response for "
1336
"'Volume %s' received from unknown peer: %s",
1337
gd_op_list[opinfo.op], uuid_utoa(rsp.uuid));
1341
event_type = GD_OP_EVENT_RCVD_RJT;
1342
opinfo.op_ret = op_ret;
1343
if (strcmp("", rsp.op_errstr)) {
1344
opinfo.op_errstr = gf_strdup(rsp.op_errstr);
1347
peer_str = peerinfo->hostname;
1349
peer_str = uuid_utoa(rsp.uuid);
1351
snprintf(err_str, sizeof(err_str), OPERRSTR_COMMIT_FAIL, peer_str);
1352
opinfo.op_errstr = gf_strdup(err_str);
1354
if (!opinfo.op_errstr) {
1358
event_type = GD_OP_EVENT_RCVD_ACC;
1359
GF_ASSERT(rsp.op == txn_op_info.op);
1362
case GD_OP_PROFILE_VOLUME:
1363
ret = glusterd_profile_volume_use_rsp_dict(txn_op_info.op_ctx,
1369
case GD_OP_REBALANCE:
1370
case GD_OP_DEFRAG_BRICK_VOLUME:
1371
ret = glusterd_volume_rebalance_use_rsp_dict(txn_op_info.op_ctx,
1386
ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
1388
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
1390
"transaction's opinfo");
1392
ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
1395
glusterd_friend_sm();
1401
free(rsp.op_errstr); // malloced by xdr
1402
GF_FREE(frame->cookie);
1403
GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
1408
glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1411
return glusterd_big_locked_cbk(req, iov, count, myframe,
1412
__glusterd_commit_op_cbk);
1416
glusterd_rpc_probe(call_frame_t *frame, xlator_t *this, void *data)
1418
gd1_mgmt_probe_req req = {
1423
char *hostname = NULL;
1424
glusterd_peerinfo_t *peerinfo = NULL;
1425
dict_t *dict = NULL;
1427
if (!frame || !data) {
1428
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
1435
ret = dict_get_str(dict, "hostname", &hostname);
1437
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1438
"Key=hostname", NULL);
1441
ret = dict_get_int32(dict, "port", &port);
1443
gf_smsg(this->name, GF_LOG_DEBUG, -ret, GD_MSG_DICT_GET_FAILED,
1445
port = GF_DEFAULT_BASE_PORT;
1448
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1450
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1451
"Key=peerinfo", NULL);
1455
gf_uuid_copy(req.uuid, MY_UUID);
1456
req.hostname = gf_strdup(hostname);
1459
ret = glusterd_submit_request(
1460
peerinfo->rpc, &req, frame, peerinfo->peer, GLUSTERD_PROBE_QUERY, NULL,
1461
this, glusterd_probe_cbk, (xdrproc_t)xdr_gd1_mgmt_probe_req);
1464
GF_FREE(req.hostname);
1465
gf_msg_debug(this->name, 0, "Returning %d", ret);
1470
glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
1472
gd1_mgmt_friend_req req = {
1476
glusterd_peerinfo_t *peerinfo = NULL;
1477
glusterd_friend_sm_event_t *event = NULL;
1478
dict_t *peer_data = NULL;
1480
if (!frame || !data) {
1481
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
1490
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
1494
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
1495
"Could not find peer %s(%s)", event->peername,
1496
uuid_utoa(event->peerid));
1500
req.hostname = gf_strdup(peerinfo->hostname);
1501
req.port = peerinfo->port;
1505
gf_uuid_copy(req.uuid, MY_UUID);
1507
peer_data = dict_new();
1509
gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
1515
ret = dict_set_dynstr_with_alloc(peer_data, "hostname_in_cluster",
1516
peerinfo->hostname);
1518
gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
1519
"Unable to add hostname of the peer");
1523
ret = glusterd_add_missed_snaps_to_export_dict(peer_data);
1525
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MISSED_SNAP_LIST_STORE_FAIL,
1526
"Unable to add list of missed snapshots "
1527
"in the peer_data dict for handshake");
1531
ret = glusterd_add_snapshots_to_export_dict(peer_data);
1533
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_LIST_SET_FAIL,
1534
"Unable to add list of snapshots "
1535
"in the peer_data dict for handshake");
1539
/* Don't add any key-value in peer_data dictionary after call this function
1541
ret = glusterd_add_volumes_to_export_dict(peer_data, &req.vols.vols_val,
1542
&req.vols.vols_len);
1544
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1545
"Unable to add list of volumes "
1546
"in the peer_data dict for handshake");
1550
if (!req.vols.vols_len) {
1551
ret = dict_allocate_and_serialize(peer_data, &req.vols.vols_val,
1552
&req.vols.vols_len);
1554
gf_smsg(this->name, GF_LOG_ERROR, errno,
1555
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1560
ret = glusterd_submit_request(
1561
peerinfo->rpc, &req, frame, peerinfo->peer, GLUSTERD_FRIEND_ADD, NULL,
1562
this, glusterd_friend_add_cbk, (xdrproc_t)xdr_gd1_mgmt_friend_req);
1565
GF_FREE(req.vols.vols_val);
1566
GF_FREE(req.hostname);
1569
dict_unref(peer_data);
1571
gf_msg_debug(this->name, 0, "Returning %d", ret);
1576
glusterd_rpc_friend_remove(call_frame_t *frame, xlator_t *this, void *data)
1578
gd1_mgmt_friend_req req = {
1582
glusterd_peerinfo_t *peerinfo = NULL;
1583
glusterd_friend_sm_event_t *event = NULL;
1585
if (!frame || !data) {
1594
peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
1598
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
1599
"Could not find peer %s(%s)", event->peername,
1600
uuid_utoa(event->peerid));
1604
gf_uuid_copy(req.uuid, MY_UUID);
1605
req.hostname = gf_strdup(peerinfo->hostname);
1606
req.port = peerinfo->port;
1608
ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->peer,
1609
GLUSTERD_FRIEND_REMOVE, NULL, this,
1610
glusterd_friend_remove_cbk,
1611
(xdrproc_t)xdr_gd1_mgmt_friend_req);
1615
GF_FREE(req.hostname);
1617
gf_msg_debug(this->name, 0, "Returning %d", ret);
1622
glusterd_rpc_friend_update(call_frame_t *frame, xlator_t *this, void *data)
1624
gd1_mgmt_friend_update req = {
1628
dict_t *friends = NULL;
1629
call_frame_t *dummy_frame = NULL;
1630
glusterd_peerinfo_t *peerinfo = NULL;
1636
ret = dict_get_ptr(friends, "peerinfo", VOID(&peerinfo));
1639
/* Don't want to send the pointer over */
1640
dict_del_sizen(friends, "peerinfo");
1642
ret = dict_allocate_and_serialize(friends, &req.friends.friends_val,
1643
&req.friends.friends_len);
1647
gf_uuid_copy(req.uuid, MY_UUID);
1649
dummy_frame = create_frame(this, this->ctx->pool);
1650
ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
1651
peerinfo->peer, GLUSTERD_FRIEND_UPDATE, NULL,
1652
this, glusterd_friend_update_cbk,
1653
(xdrproc_t)xdr_gd1_mgmt_friend_update);
1656
GF_FREE(req.friends.friends_val);
1658
if (ret && dummy_frame)
1659
STACK_DESTROY(dummy_frame->root);
1661
gf_msg_debug(this->name, 0, "Returning %d", ret);
1666
glusterd_cluster_lock(call_frame_t *frame, xlator_t *this, void *data)
1668
gd1_mgmt_cluster_lock_req req = {
1672
glusterd_peerinfo_t *peerinfo = NULL;
1673
call_frame_t *dummy_frame = NULL;
1677
glusterd_get_uuid(&req.uuid);
1679
dummy_frame = create_frame(this, this->ctx->pool);
1683
ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
1684
peerinfo->mgmt, GLUSTERD_MGMT_CLUSTER_LOCK,
1685
NULL, this, glusterd_cluster_lock_cbk,
1686
(xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
1688
gf_msg_debug(this->name, 0, "Returning %d", ret);
1690
if (ret && dummy_frame)
1691
STACK_DESTROY(dummy_frame->root);
1696
glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data)
1698
gd1_mgmt_v3_lock_req req = {
1702
glusterd_peerinfo_t *peerinfo = NULL;
1703
dict_t *dict = NULL;
1704
uuid_t *txn_id = NULL;
1708
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1710
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1711
"Key=peerinfo", NULL);
1715
// peerinfo should not be in payload
1716
dict_del_sizen(dict, "peerinfo");
1718
glusterd_get_uuid(&req.uuid);
1720
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
1721
&req.dict.dict_len);
1723
gf_smsg(this->name, GF_LOG_ERROR, 0,
1724
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1728
/* Sending valid transaction ID to peers */
1729
ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1731
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1732
"Failed to get transaction id.");
1735
gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1736
gf_uuid_copy(req.txn_id, *txn_id);
1740
frame = create_frame(this, this->ctx->pool);
1746
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1747
if (!frame->cookie) {
1748
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
1752
gf_uuid_copy(frame->cookie, req.txn_id);
1754
ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt_v3,
1755
GLUSTERD_MGMT_V3_LOCK, NULL, this,
1756
glusterd_mgmt_v3_lock_peers_cbk,
1757
(xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
1759
gf_msg_debug(this->name, 0, "Returning %d", ret);
1762
if (req.dict.dict_val)
1763
GF_FREE(req.dict.dict_val);
1768
glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data)
1770
gd1_mgmt_v3_unlock_req req = {
1774
glusterd_peerinfo_t *peerinfo = NULL;
1775
dict_t *dict = NULL;
1776
uuid_t *txn_id = NULL;
1780
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1782
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1783
"Key=peerinfo", NULL);
1787
// peerinfo should not be in payload
1788
dict_del_sizen(dict, "peerinfo");
1790
glusterd_get_uuid(&req.uuid);
1792
ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
1793
&req.dict.dict_len);
1795
gf_smsg(this->name, GF_LOG_ERROR, errno,
1796
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1800
/* Sending valid transaction ID to peers */
1801
ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1803
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1804
"Failed to get transaction id.");
1807
gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1808
gf_uuid_copy(req.txn_id, *txn_id);
1812
frame = create_frame(this, this->ctx->pool);
1818
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1819
if (!frame->cookie) {
1820
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
1824
gf_uuid_copy(frame->cookie, req.txn_id);
1826
ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt_v3,
1827
GLUSTERD_MGMT_V3_UNLOCK, NULL, this,
1828
glusterd_mgmt_v3_unlock_peers_cbk,
1829
(xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
1831
gf_msg_debug(this->name, 0, "Returning %d", ret);
1835
if (req.dict.dict_val)
1836
GF_FREE(req.dict.dict_val);
1841
glusterd_cluster_unlock(call_frame_t *frame, xlator_t *this, void *data)
1843
gd1_mgmt_cluster_lock_req req = {
1847
glusterd_peerinfo_t *peerinfo = NULL;
1848
call_frame_t *dummy_frame = NULL;
1852
glusterd_get_uuid(&req.uuid);
1854
dummy_frame = create_frame(this, this->ctx->pool);
1858
ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
1859
peerinfo->mgmt, GLUSTERD_MGMT_CLUSTER_UNLOCK,
1860
NULL, this, glusterd_cluster_unlock_cbk,
1861
(xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
1863
gf_msg_debug(this->name, 0, "Returning %d", ret);
1865
if (ret && dummy_frame)
1866
STACK_DESTROY(dummy_frame->root);
1872
glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
1874
gd1_mgmt_stage_op_req req = {
1880
glusterd_peerinfo_t *peerinfo = NULL;
1881
dict_t *dict = NULL;
1882
uuid_t *txn_id = NULL;
1886
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1888
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1889
"Key=peerinfo", NULL);
1893
// peerinfo should not be in payload
1894
dict_del_sizen(dict, "peerinfo");
1896
glusterd_get_uuid(&req.uuid);
1897
req.op = glusterd_op_get_op();
1899
ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
1901
gf_smsg(this->name, GF_LOG_ERROR, errno,
1902
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1905
/* Sending valid transaction ID to peers */
1906
ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1908
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1909
"Failed to get transaction id.");
1912
gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1916
frame = create_frame(this, this->ctx->pool);
1922
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1923
if (!frame->cookie) {
1924
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
1928
gf_uuid_copy(frame->cookie, *txn_id);
1930
ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt,
1931
GLUSTERD_MGMT_STAGE_OP, NULL, this,
1932
glusterd_stage_op_cbk,
1933
(xdrproc_t)xdr_gd1_mgmt_stage_op_req);
1936
if (req.buf.buf_val)
1937
GF_FREE(req.buf.buf_val);
1939
gf_msg_debug(this->name, 0, "Returning %d", ret);
1944
glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
1946
gd1_mgmt_commit_op_req req = {
1952
glusterd_peerinfo_t *peerinfo = NULL;
1953
dict_t *dict = NULL;
1954
uuid_t *txn_id = NULL;
1958
ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1960
gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1961
"Key=peerinfo", NULL);
1965
// peerinfo should not be in payload
1966
dict_del_sizen(dict, "peerinfo");
1968
glusterd_get_uuid(&req.uuid);
1969
req.op = glusterd_op_get_op();
1971
ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
1973
gf_smsg(this->name, GF_LOG_ERROR, errno,
1974
GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1977
/* Sending valid transaction ID to peers */
1978
ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1980
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1981
"Failed to get transaction id.");
1984
gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1988
frame = create_frame(this, this->ctx->pool);
1994
frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1995
if (!frame->cookie) {
1996
gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
2000
gf_uuid_copy(frame->cookie, *txn_id);
2002
ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt,
2003
GLUSTERD_MGMT_COMMIT_OP, NULL, this,
2004
glusterd_commit_op_cbk,
2005
(xdrproc_t)xdr_gd1_mgmt_commit_op_req);
2008
if (req.buf.buf_val)
2009
GF_FREE(req.buf.buf_val);
2011
gf_msg_debug(this->name, 0, "Returning %d", ret);
2016
__glusterd_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
2019
gd1_mgmt_brick_op_rsp rsp = {0};
2021
int32_t op_ret = -1;
2022
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
2023
call_frame_t *frame = NULL;
2024
glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
2025
dict_t *dict = NULL;
2027
glusterd_req_ctx_t *req_ctx = NULL;
2028
glusterd_pending_node_t *node = NULL;
2029
xlator_t *this = THIS;
2030
uuid_t *txn_id = NULL;
2031
glusterd_conf_t *priv = NULL;
2033
priv = this->private;
2037
txn_id = &priv->global_txn_id;
2039
req_ctx = frame->local;
2041
if (-1 == req->rpc_status) {
2043
rsp.op_errno = EINVAL;
2044
/* use standard allocation because to keep uniformity
2046
rsp.op_errstr = strdup("error");
2047
event_type = GD_OP_EVENT_RCVD_RJT;
2051
ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
2053
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
2054
"Failed to decode brick op "
2055
"response received");
2057
rsp.op_errno = EINVAL;
2058
rsp.op_errstr = strdup("Unable to decode brick op response");
2059
event_type = GD_OP_EVENT_RCVD_RJT;
2063
if (rsp.output.output_len) {
2064
/* Unserialize the dictionary */
2067
ret = dict_unserialize(rsp.output.output_val, rsp.output.output_len,
2070
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
2072
"unserialize rsp-buffer to dictionary");
2073
event_type = GD_OP_EVENT_RCVD_RJT;
2076
dict->extra_stdfree = rsp.output.output_val;
2080
op_ret = rsp.op_ret;
2082
/* Add index to rsp_dict for GD_OP_STATUS_VOLUME */
2083
if (GD_OP_STATUS_VOLUME == req_ctx->op) {
2084
node = frame->cookie;
2085
index = node->index;
2086
ret = dict_set_int32_sizen(dict, "index", index);
2088
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
2089
"Error setting index on brick status rsp dict");
2091
event_type = GD_OP_EVENT_RCVD_RJT;
2097
if (req_ctx && req_ctx->dict) {
2098
ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
2099
gf_msg_debug(this->name, -ret, "transaction ID = %s",
2100
uuid_utoa(*txn_id));
2103
ev_ctx = GF_CALLOC(1, sizeof(*ev_ctx), gf_gld_mt_brick_rsp_ctx_t);
2106
event_type = GD_OP_EVENT_RCVD_RJT;
2107
ev_ctx->op_ret = op_ret;
2108
ev_ctx->op_errstr = gf_strdup(rsp.op_errstr);
2110
event_type = GD_OP_EVENT_RCVD_ACC;
2112
ev_ctx->pending_node = frame->cookie;
2113
ev_ctx->rsp_dict = dict;
2114
ev_ctx->commit_ctx = frame->local;
2115
ret = glusterd_op_sm_inject_event(event_type, txn_id, ev_ctx);
2118
glusterd_friend_sm();
2127
GF_FREE(ev_ctx->op_errstr);
2131
free(rsp.op_errstr); // malloced by xdr
2132
GLUSTERD_STACK_DESTROY(frame);
2137
glusterd_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
2140
return glusterd_big_locked_cbk(req, iov, count, myframe,
2141
__glusterd_brick_op_cbk);
2145
glusterd_brick_op(call_frame_t *frame, xlator_t *this, void *data)
2147
gd1_mgmt_brick_op_req *req = NULL;
2150
glusterd_conf_t *priv = NULL;
2151
call_frame_t *dummy_frame = NULL;
2152
char *op_errstr = NULL;
2153
int pending_bricks = 0;
2154
glusterd_pending_node_t *pending_node;
2155
glusterd_req_ctx_t *req_ctx = NULL;
2156
struct rpc_clnt *rpc = NULL;
2157
dict_t *op_ctx = NULL;
2158
uuid_t *txn_id = NULL;
2160
priv = this->private;
2163
txn_id = &priv->global_txn_id;
2167
CDS_INIT_LIST_HEAD(&opinfo.pending_bricks);
2169
ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
2171
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_BRICK_SELECT_FAIL,
2172
"Could not get transaction ID from dict, global"
2173
"transaction ID = %s",
2174
uuid_utoa(*txn_id));
2176
gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
2178
ret = glusterd_op_bricks_select(req_ctx->op, req_ctx->dict, &op_errstr,
2179
&opinfo.pending_bricks, NULL);
2182
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_SELECT_FAIL,
2183
"Failed to select bricks "
2184
"while performing brick op during 'Volume %s'",
2185
gd_op_list[opinfo.op]);
2186
opinfo.op_errstr = op_errstr;
2190
cds_list_for_each_entry(pending_node, &opinfo.pending_bricks, list)
2192
dummy_frame = create_frame(this, this->ctx->pool);
2196
if ((pending_node->type == GD_NODE_NFS) ||
2197
(pending_node->type == GD_NODE_QUOTAD) ||
2198
(pending_node->type == GD_NODE_SNAPD) ||
2199
(pending_node->type == GD_NODE_SCRUB) ||
2200
((pending_node->type == GD_NODE_SHD) &&
2201
(req_ctx->op == GD_OP_STATUS_VOLUME))) {
2202
ret = glusterd_node_op_build_payload(
2203
req_ctx->op, (gd1_mgmt_brick_op_req **)&req, req_ctx->dict);
2205
ret = glusterd_brick_op_build_payload(
2206
req_ctx->op, pending_node->node, (gd1_mgmt_brick_op_req **)&req,
2210
gf_msg(this->name, GF_LOG_ERROR, 0,
2211
GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
2213
"build op payload during "
2215
gd_op_list[req_ctx->op]);
2219
dummy_frame->local = data;
2220
dummy_frame->cookie = pending_node;
2222
rpc = glusterd_pending_node_get_rpc(pending_node);
2224
if (pending_node->type == GD_NODE_REBALANCE) {
2225
opinfo.brick_pending_count = 0;
2227
GF_FREE(req->input.input_val);
2230
GLUSTERD_STACK_DESTROY(dummy_frame);
2232
op_ctx = glusterd_op_get_ctx();
2235
glusterd_defrag_volume_node_rsp(req_ctx->dict, NULL, op_ctx);
2241
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
2243
"due to rpc failure.");
2247
ret = glusterd_submit_request(
2248
rpc, req, dummy_frame, priv->gfs_mgmt, req->op, NULL, this,
2249
glusterd_brick_op_cbk, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
2250
GF_FREE(req->input.input_val);
2257
glusterd_pending_node_put_rpc(pending_node);
2260
gf_msg_trace(this->name, 0,
2261
"Sent brick op req for operation "
2262
"'Volume %s' to %d bricks",
2263
gd_op_list[req_ctx->op], pending_bricks);
2264
opinfo.brick_pending_count = pending_bricks;
2269
opinfo.op_ret = ret;
2271
ret1 = glusterd_set_txn_opinfo(txn_id, &opinfo);
2273
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
2275
"transaction's opinfo");
2278
glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, txn_id, data);
2279
opinfo.op_ret = ret;
2282
gf_msg_debug(this->name, 0, "Returning %d", ret);
2286
struct rpc_clnt_procedure gd_brick_actors[GLUSTERD_BRICK_MAXVALUE] = {
2287
[GLUSTERD_BRICK_NULL] = {"NULL", NULL},
2288
[GLUSTERD_BRICK_OP] = {"BRICK_OP", glusterd_brick_op},
2291
struct rpc_clnt_procedure gd_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
2292
[GLUSTERD_FRIEND_NULL] = {"NULL", NULL},
2293
[GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", glusterd_rpc_probe},
2294
[GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", glusterd_rpc_friend_add},
2295
[GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE", glusterd_rpc_friend_remove},
2296
[GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd_rpc_friend_update},
2299
struct rpc_clnt_procedure gd_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
2300
[GLUSTERD_MGMT_NULL] = {"NULL", NULL},
2301
[GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd_cluster_lock},
2302
[GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
2303
glusterd_cluster_unlock},
2304
[GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_stage_op},
2305
[GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd_commit_op},
2308
struct rpc_clnt_procedure gd_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
2309
[GLUSTERD_MGMT_V3_NULL] = {"NULL", NULL},
2310
[GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_mgmt_v3_lock_peers},
2311
[GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK",
2312
glusterd_mgmt_v3_unlock_peers},
2315
struct rpc_clnt_program gd_mgmt_prog = {
2316
.progname = "glusterd mgmt",
2317
.prognum = GD_MGMT_PROGRAM,
2318
.progver = GD_MGMT_VERSION,
2319
.proctable = gd_mgmt_actors,
2320
.numproc = GLUSTERD_MGMT_MAXVALUE,
2323
struct rpc_clnt_program gd_brick_prog = {
2324
.progname = "brick operations",
2325
.prognum = GD_BRICK_PROGRAM,
2326
.progver = GD_BRICK_VERSION,
2327
.proctable = gd_brick_actors,
2328
.numproc = GLUSTERD_BRICK_MAXVALUE,
2331
struct rpc_clnt_program gd_peer_prog = {
2332
.progname = "Peer mgmt",
2333
.prognum = GD_FRIEND_PROGRAM,
2334
.progver = GD_FRIEND_VERSION,
2335
.proctable = gd_peer_actors,
2336
.numproc = GLUSTERD_FRIEND_MAXVALUE,
2339
struct rpc_clnt_program gd_mgmt_v3_prog = {
2340
.progname = "glusterd mgmt v3",
2341
.prognum = GD_MGMT_PROGRAM,
2342
.progver = GD_MGMT_V3_VERSION,
2343
.proctable = gd_mgmt_v3_actors,
2344
.numproc = GLUSTERD_MGMT_V3_MAXVALUE,