glusterfs

Форк
0
/
glusterd-rpc-ops.c 
2345 строк · 68.3 Кб
1
/*
2
   Copyright (c) 2010-2012 Red Hat, Inc. <http://www.redhat.com>
3
   This file is part of GlusterFS.
4

5
   This file is licensed to you under your choice of the GNU Lesser
6
   General Public License, version 3 or any later version (LGPLv3 or
7
   later), or the GNU General Public License, version 2 (GPLv2), in all
8
   cases as published by the Free Software Foundation.
9
*/
10

11
#include "rpc-clnt.h"
12

13
#include <glusterfs/compat-errno.h>
14
#include "glusterd-op-sm.h"
15
#include "glusterd-sm.h"
16
#include "glusterd-utils.h"
17
#include <glusterfs/common-utils.h>
18
#include "glusterd-messages.h"
19
#include "glusterd-snapshot-utils.h"
20
#include <sys/uio.h>
21

22
#define SERVER_PATH_MAX (16 * 1024)
23

24
#define GLUSTERD_STACK_DESTROY(frame)                                          \
25
    do {                                                                       \
26
        frame->local = NULL;                                                   \
27
        STACK_DESTROY(frame->root);                                            \
28
    } while (0)
29

30
extern glusterd_op_info_t opinfo;
31
extern uuid_t global_txn_id;
32

33
int32_t
34
glusterd_op_send_cli_response(glusterd_op_t op, int32_t op_ret,
35
                              int32_t op_errno, rpcsvc_request_t *req,
36
                              void *op_ctx, char *op_errstr)
37
{
38
    int32_t ret = -1;
39
    void *cli_rsp = NULL;
40
    dict_t *ctx = NULL;
41
    char *free_ptr = NULL;
42
    glusterd_conf_t *conf = NULL;
43
    xdrproc_t xdrproc = NULL;
44
    char *errstr = NULL;
45
    int32_t status = 0;
46
    int32_t count = 0;
47
    gf_cli_rsp rsp = {
48
        0,
49
    };
50
    xlator_t *this = THIS;
51

52
    conf = this->private;
53

54
    GF_ASSERT(conf);
55

56
    ctx = op_ctx;
57

58
    switch (op) {
59
        case GD_OP_REMOVE_BRICK: {
60
            if (ctx)
61
                ret = dict_get_str(ctx, "errstr", &errstr);
62
            break;
63
        }
64
        case GD_OP_RESET_VOLUME: {
65
            if (op_ret && !op_errstr)
66
                errstr = "Error while resetting options";
67
            break;
68
        }
69
        case GD_OP_REBALANCE:
70
        case GD_OP_DEFRAG_BRICK_VOLUME: {
71
            if (ctx) {
72
                ret = dict_get_int32(ctx, "status", &status);
73
                if (ret) {
74
                    gf_msg_trace(this->name, 0, "failed to get status");
75
                }
76
            }
77
            break;
78
        }
79
        case GD_OP_GSYNC_CREATE:
80
        case GD_OP_GSYNC_SET: {
81
            if (ctx) {
82
                ret = dict_get_str(ctx, "errstr", &errstr);
83
                ret = dict_set_str_sizen(ctx, "glusterd_workdir",
84
                                         conf->workdir);
85
                /* swallow error here, that will be re-triggered in cli */
86
            }
87
            break;
88
        }
89
        case GD_OP_PROFILE_VOLUME: {
90
            if (ctx && dict_get_int32(ctx, "count", &count)) {
91
                ret = dict_set_int32_sizen(ctx, "count", 0);
92
                if (ret) {
93
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
94
                           "failed to set count in dictionary");
95
                }
96
            }
97
            break;
98
        }
99
        case GD_OP_START_BRICK:
100
        case GD_OP_STOP_BRICK: {
101
            gf_msg_debug(this->name, 0, "op '%s' not supported",
102
                         gd_op_list[op]);
103
            break;
104
        }
105
        case GD_OP_NONE:
106
        case GD_OP_MAX: {
107
            gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_OP_UNSUPPORTED,
108
                   "invalid operation");
109
            break;
110
        }
111
        case GD_OP_CREATE_VOLUME:
112
        case GD_OP_START_VOLUME:
113
        case GD_OP_STOP_VOLUME:
114
        case GD_OP_DELETE_VOLUME:
115
        case GD_OP_DEFRAG_VOLUME:
116
        case GD_OP_ADD_BRICK:
117
        case GD_OP_LOG_ROTATE:
118
        case GD_OP_SYNC_VOLUME:
119
        case GD_OP_STATEDUMP_VOLUME:
120
        case GD_OP_REPLACE_BRICK:
121
        case GD_OP_STATUS_VOLUME:
122
        case GD_OP_SET_VOLUME:
123
        case GD_OP_LIST_VOLUME:
124
        case GD_OP_CLEARLOCKS_VOLUME:
125
        case GD_OP_HEAL_VOLUME:
126
        case GD_OP_QUOTA:
127
        case GD_OP_SNAP:
128
        case GD_OP_BARRIER:
129
        case GD_OP_BITROT:
130
        case GD_OP_SCRUB_STATUS:
131
        case GD_OP_SCRUB_ONDEMAND:
132
        case GD_OP_RESET_BRICK:
133
        case GD_OP_MAX_OPVERSION:
134
        case GD_OP_DETACH_NOT_STARTED:
135
        case GD_OP_GANESHA:
136
        case GD_OP_DETACH_TIER:
137
        case GD_OP_TIER_MIGRATE:
138
        case GD_OP_TIER_START_STOP:
139
        case GD_OP_TIER_STATUS:
140
        case GD_OP_DETACH_TIER_STATUS:
141
        case GD_OP_REMOVE_TIER_BRICK:
142
        case GD_OP_ADD_TIER_BRICK:
143

144
        {
145
            /*nothing specific to be done*/
146
            break;
147
        }
148
        case GD_OP_COPY_FILE: {
149
            if (ctx)
150
                ret = dict_get_str(ctx, "errstr", &errstr);
151
            break;
152
        }
153
        case GD_OP_SYS_EXEC: {
154
            if (ctx) {
155
                ret = dict_get_str(ctx, "errstr", &errstr);
156
                ret = dict_set_str_sizen(ctx, "glusterd_workdir",
157
                                         conf->workdir);
158
            }
159
            break;
160
        }
161
    }
162

163
    rsp.op_ret = op_ret;
164
    rsp.op_errno = op_errno;
165

166
    if (errstr)
167
        rsp.op_errstr = errstr;
168
    else if (op_errstr)
169
        rsp.op_errstr = op_errstr;
170

171
    if (!rsp.op_errstr)
172
        rsp.op_errstr = "";
173

174
    if (ctx) {
175
        ret = dict_allocate_and_serialize(ctx, &rsp.dict.dict_val,
176
                                          &rsp.dict.dict_len);
177
        if (ret < 0)
178
            gf_smsg(this->name, GF_LOG_ERROR, errno,
179
                    GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
180
        else
181
            free_ptr = rsp.dict.dict_val;
182
    }
183

184
    /* needed by 'rebalance status' */
185
    if (status)
186
        rsp.op_errno = status;
187

188
    cli_rsp = &rsp;
189
    xdrproc = (xdrproc_t)xdr_gf_cli_rsp;
190

191
    glusterd_to_cli(req, cli_rsp, NULL, 0, NULL, xdrproc, ctx);
192
    ret = 0;
193

194
    GF_FREE(free_ptr);
195
    gf_msg_debug(this->name, 0, "Returning %d", ret);
196
    return ret;
197
}
198

199
int
200
glusterd_big_locked_cbk(struct rpc_req *req, struct iovec *iov, int count,
201
                        void *myframe, fop_cbk_fn_t fn)
202
{
203
    glusterd_conf_t *priv = THIS->private;
204
    int ret = -1;
205

206
    synclock_lock(&priv->big_lock);
207
    ret = fn(req, iov, count, myframe);
208
    synclock_unlock(&priv->big_lock);
209

210
    return ret;
211
}
212

213
int
214
__glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
215
                     void *myframe)
216
{
217
    gd1_mgmt_probe_rsp rsp = {
218
        {0},
219
    };
220
    int ret = 0;
221
    glusterd_peerinfo_t *peerinfo = NULL;
222
    glusterd_friend_sm_event_t *event = NULL;
223
    glusterd_probe_ctx_t *ctx = NULL;
224
    xlator_t *this = THIS;
225

226
    if (-1 == req->rpc_status) {
227
        goto out;
228
    }
229

230
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
231
    if (ret < 0) {
232
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL, "error");
233
        // rsp.op_ret   = -1;
234
        // rsp.op_errno = EINVAL;
235
        goto out;
236
    }
237

238
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PROBE_REQ_RESP_RCVD,
239
           "Received probe resp from uuid: %s, host: %s", uuid_utoa(rsp.uuid),
240
           rsp.hostname);
241
    if (rsp.op_ret != 0) {
242
        ctx = ((call_frame_t *)myframe)->local;
243
        ((call_frame_t *)myframe)->local = NULL;
244

245
        GF_ASSERT(ctx);
246

247
        if (ctx->req) {
248
            glusterd_xfer_cli_probe_resp(ctx->req, rsp.op_ret, rsp.op_errno,
249
                                         rsp.op_errstr, ctx->hostname,
250
                                         ctx->port, ctx->dict);
251
        }
252

253
        glusterd_destroy_probe_ctx(ctx);
254
        (void)glusterd_friend_remove(rsp.uuid, rsp.hostname);
255
        ret = rsp.op_ret;
256
        goto out;
257
    }
258

259
    RCU_READ_LOCK;
260
    peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
261
    if (peerinfo == NULL) {
262
        RCU_READ_UNLOCK
263
        ret = -1;
264
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
265
               "Could not find peerd %s(%s)", rsp.hostname,
266
               uuid_utoa(rsp.uuid));
267
        goto out;
268
    }
269

270
    /*
271
     * In the case of a fresh probe rsp.uuid and peerinfo.uuid will not
272
     * match, as peerinfo->uuid will be NULL.
273
     *
274
     * In the case of a peer probe being done to add a new network to a
275
     * peer, rsp.uuid will match an existing peerinfo.uuid. If we have this
276
     * stage it means that the current address/hostname being used isn't
277
     * present in the found peerinfo. If it were, we would have found out
278
     * earlier in the probe process and wouldn't even reach till here. So,
279
     * we need to add the new hostname to the peer.
280
     *
281
     * This update should only be done when an explicit CLI probe
282
     * command was used to begin the probe process.
283
     */
284
    if (gf_uuid_compare(rsp.uuid, peerinfo->uuid) == 0) {
285
        ctx = ((call_frame_t *)myframe)->local;
286
        /* Presence of ctx->req implies this probe was started by a cli
287
         * probe command
288
         */
289
        if (ctx->req == NULL)
290
            goto cont;
291

292
        gf_msg_debug(this->name, 0,
293
                     "Adding address '%s' to "
294
                     "existing peer %s",
295
                     rsp.hostname, uuid_utoa(rsp.uuid));
296

297
        ret = glusterd_friend_remove(NULL, rsp.hostname);
298
        if (ret) {
299
            gf_msg(this->name, GF_LOG_ERROR, 0,
300
                   GD_MSG_STALE_PEERINFO_REMOVE_FAIL,
301
                   "Could not remove "
302
                   "stale peerinfo with name %s",
303
                   rsp.hostname);
304
            goto reply;
305
        }
306

307
        ret = glusterd_peer_hostname_update(peerinfo, rsp.hostname, _gf_false);
308
        if (ret) {
309
            gf_msg(this->name, GF_LOG_ERROR, 0,
310
                   GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
311
                   "Couldn't add hostname to peer list");
312
            goto reply;
313
        }
314

315
        /* Injecting EVENT_NEW_NAME to send update */
316
        ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_NEW_NAME, &event);
317
        if (!ret) {
318
            event->peername = gf_strdup(peerinfo->hostname);
319
            gf_uuid_copy(event->peerid, peerinfo->uuid);
320

321
            ret = glusterd_friend_sm_inject_event(event);
322
        }
323
        rsp.op_errno = GF_PROBE_FRIEND;
324

325
    reply:
326
        ctx = ((call_frame_t *)myframe)->local;
327
        ((call_frame_t *)myframe)->local = NULL;
328

329
        if (!ctx) {
330
            ret = -1;
331
            goto unlock;
332
        }
333

334
        if (ctx->req) {
335
            glusterd_xfer_cli_probe_resp(ctx->req, ret, rsp.op_errno,
336
                                         rsp.op_errstr, ctx->hostname,
337
                                         ctx->port, ctx->dict);
338
        }
339

340
        glusterd_destroy_probe_ctx(ctx);
341

342
        goto unlock;
343

344
    } else if (strncasecmp(rsp.hostname, peerinfo->hostname, 1024)) {
345
        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_HOST_PRESENT_ALREADY,
346
               "Host: %s  with uuid: %s "
347
               "already present in cluster with alias hostname: %s",
348
               rsp.hostname, uuid_utoa(rsp.uuid), peerinfo->hostname);
349

350
        ctx = ((call_frame_t *)myframe)->local;
351
        ((call_frame_t *)myframe)->local = NULL;
352

353
        if (!ctx) {
354
            ret = -1;
355
            goto unlock;
356
        }
357

358
        rsp.op_errno = GF_PROBE_FRIEND;
359
        if (ctx->req) {
360
            glusterd_xfer_cli_probe_resp(ctx->req, rsp.op_ret, rsp.op_errno,
361
                                         rsp.op_errstr, ctx->hostname,
362
                                         ctx->port, ctx->dict);
363
        }
364

365
        glusterd_destroy_probe_ctx(ctx);
366
        (void)glusterd_friend_remove(NULL, rsp.hostname);
367
        ret = rsp.op_ret;
368

369
        goto unlock;
370
    }
371

372
cont:
373
    gf_uuid_copy(peerinfo->uuid, rsp.uuid);
374

375
    ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_INIT_FRIEND_REQ, &event);
376

377
    if (ret) {
378
        RCU_READ_UNLOCK;
379
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_NEW_FRIEND_SM_EVENT_GET_FAIL,
380
               "Unable to get event");
381
        goto out;
382
    }
383

384
    event->peername = gf_strdup(peerinfo->hostname);
385
    gf_uuid_copy(event->peerid, peerinfo->uuid);
386

387
    event->ctx = ((call_frame_t *)myframe)->local;
388
    ((call_frame_t *)myframe)->local = NULL;
389
    ret = glusterd_friend_sm_inject_event(event);
390

391
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_REQ_RESP_RCVD,
392
           "Received resp to probe req");
393

394
unlock:
395
    RCU_READ_UNLOCK;
396

397
out:
398
    free(rsp.hostname);  // malloced by xdr
399
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
400

401
    /* Attempt to start the state machine. Needed as no state machine could
402
     * be running at time this RPC reply was received
403
     */
404
    if (!ret) {
405
        glusterd_friend_sm();
406
        glusterd_op_sm();
407
    }
408

409
    return ret;
410
}
411

412
int
413
glusterd_probe_cbk(struct rpc_req *req, struct iovec *iov, int count,
414
                   void *myframe)
415
{
416
    return glusterd_big_locked_cbk(req, iov, count, myframe,
417
                                   __glusterd_probe_cbk);
418
}
419

420
int
421
__glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
422
                          void *myframe)
423
{
424
    gd1_mgmt_friend_rsp rsp = {
425
        {0},
426
    };
427
    int ret = -1;
428
    glusterd_friend_sm_event_t *event = NULL;
429
    glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
430
    glusterd_peerinfo_t *peerinfo = NULL;
431
    int32_t op_ret = -1;
432
    int32_t op_errno = EINVAL;
433
    glusterd_probe_ctx_t *ctx = NULL;
434
    glusterd_friend_update_ctx_t *ev_ctx = NULL;
435

436
    if (-1 == req->rpc_status) {
437
        rsp.op_ret = -1;
438
        rsp.op_errno = EINVAL;
439
        goto out;
440
    }
441

442
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
443
    if (ret < 0) {
444
        gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_RES_DECODE_FAIL,
445
               "error");
446
        rsp.op_ret = -1;
447
        rsp.op_errno = EINVAL;
448
        goto out;
449
    }
450

451
    op_ret = rsp.op_ret;
452
    op_errno = rsp.op_errno;
453

454
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
455
           "Received %s from uuid: %s, host: %s, port: %d",
456
           (op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
457
           rsp.port);
458

459
    RCU_READ_LOCK;
460

461
    peerinfo = glusterd_peerinfo_find(rsp.uuid, rsp.hostname);
462
    if (peerinfo == NULL) {
463
        RCU_READ_UNLOCK
464
        ret = -1;
465
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
466
               "received friend add response from"
467
               " unknown peer uuid: %s",
468
               uuid_utoa(rsp.uuid));
469
        goto out;
470
    }
471

472
    if (op_ret)
473
        event_type = GD_FRIEND_EVENT_RCVD_RJT;
474
    else
475
        event_type = GD_FRIEND_EVENT_RCVD_ACC;
476

477
    ret = glusterd_friend_sm_new_event(event_type, &event);
478

479
    if (ret) {
480
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
481
               "Unable to get event");
482
        goto unlock;
483
    }
484

485
    ev_ctx = GF_CALLOC(1, sizeof(*ev_ctx), gf_gld_mt_friend_update_ctx_t);
486
    if (!ev_ctx) {
487
        ret = -1;
488
        goto unlock;
489
    }
490

491
    gf_uuid_copy(ev_ctx->uuid, rsp.uuid);
492
    ev_ctx->hostname = gf_strdup(rsp.hostname);
493

494
    event->peername = gf_strdup(peerinfo->hostname);
495
    gf_uuid_copy(event->peerid, peerinfo->uuid);
496
    event->ctx = ev_ctx;
497
    ret = glusterd_friend_sm_inject_event(event);
498

499
unlock:
500
    RCU_READ_UNLOCK;
501
out:
502
    ctx = ((call_frame_t *)myframe)->local;
503
    ((call_frame_t *)myframe)->local = NULL;
504

505
    if (ctx && ctx->req) {
506
        /*reverse probe doesn't have req*/
507
        ret = glusterd_xfer_cli_probe_resp(ctx->req, op_ret, op_errno, NULL,
508
                                           ctx->hostname, ctx->port, ctx->dict);
509
    }
510
    if (!ret) {
511
        glusterd_friend_sm();
512
        glusterd_op_sm();
513
    }
514

515
    if (ctx)
516
        glusterd_destroy_probe_ctx(ctx);
517
    free(rsp.hostname);  // malloced by xdr
518
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
519
    return ret;
520
}
521

522
int
523
glusterd_friend_add_cbk(struct rpc_req *req, struct iovec *iov, int count,
524
                        void *myframe)
525
{
526
    return glusterd_big_locked_cbk(req, iov, count, myframe,
527
                                   __glusterd_friend_add_cbk);
528
}
529

530
int
531
__glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
532
                             void *myframe)
533
{
534
    gd1_mgmt_friend_rsp rsp = {
535
        {0},
536
    };
537
    glusterd_conf_t *conf = NULL;
538
    int ret = -1;
539
    glusterd_friend_sm_event_t *event = NULL;
540
    glusterd_friend_sm_event_type_t event_type = GD_FRIEND_EVENT_NONE;
541
    glusterd_peerinfo_t *peerinfo = NULL;
542
    int32_t op_ret = -1;
543
    int32_t op_errno = 0;
544
    glusterd_probe_ctx_t *ctx = NULL;
545
    gf_boolean_t move_sm_now = _gf_true;
546

547
    conf = THIS->private;
548
    GF_ASSERT(conf);
549

550
    ctx = ((call_frame_t *)myframe)->local;
551
    ((call_frame_t *)myframe)->local = NULL;
552
    if (!ctx) {
553
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
554
               "Unable to get glusterd probe context");
555
        goto out;
556
    }
557
    if (-1 == req->rpc_status) {
558
        rsp.op_ret = -1;
559
        rsp.op_errno = EINVAL;
560
        move_sm_now = _gf_false;
561
        goto inject;
562
    }
563

564
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
565
    if (ret < 0) {
566
        gf_msg("glusterd", GF_LOG_ERROR, errno, GD_MSG_RES_DECODE_FAIL,
567
               "error");
568
        rsp.op_ret = -1;
569
        rsp.op_errno = EINVAL;
570
        goto respond;
571
    }
572

573
    op_ret = rsp.op_ret;
574
    op_errno = rsp.op_errno;
575

576
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
577
           "Received %s from uuid: %s, host: %s, port: %d",
578
           (op_ret) ? "RJT" : "ACC", uuid_utoa(rsp.uuid), rsp.hostname,
579
           rsp.port);
580

581
inject:
582
    RCU_READ_LOCK;
583

584
    peerinfo = glusterd_peerinfo_find(rsp.uuid, ctx->hostname);
585
    if (peerinfo == NULL) {
586
        // can happen as part of rpc clnt connection cleanup
587
        // when the frame timeout happens after 30 minutes
588
        goto unlock;
589
    }
590

591
    event_type = GD_FRIEND_EVENT_REMOVE_FRIEND;
592

593
    ret = glusterd_friend_sm_new_event(event_type, &event);
594

595
    if (ret) {
596
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
597
               "Unable to get event");
598
        goto unlock;
599
    }
600
    event->peername = gf_strdup(peerinfo->hostname);
601
    gf_uuid_copy(event->peerid, peerinfo->uuid);
602

603
    ret = glusterd_friend_sm_inject_event(event);
604

605
    if (ret)
606
        goto unlock;
607

608
    /*friend_sm would be moved on CLNT_DISCONNECT, consequently
609
      cleaning up peerinfo. Else, we run the risk of triggering
610
      a clnt_destroy within saved_frames_unwind.
611
    */
612
    op_ret = 0;
613

614
unlock:
615
    RCU_READ_UNLOCK;
616

617
respond:
618
    ret = glusterd_xfer_cli_deprobe_resp(ctx->req, op_ret, op_errno, NULL,
619
                                         ctx->hostname, ctx->dict);
620
    if (!ret && move_sm_now) {
621
        glusterd_friend_sm();
622
        glusterd_op_sm();
623
    }
624

625
    glusterd_broadcast_friend_delete(ctx->hostname, NULL);
626
    glusterd_destroy_probe_ctx(ctx);
627
out:
628
    free(rsp.hostname);  // malloced by xdr
629
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
630
    return ret;
631
}
632

633
int
634
glusterd_friend_remove_cbk(struct rpc_req *req, struct iovec *iov, int count,
635
                           void *myframe)
636
{
637
    return glusterd_big_locked_cbk(req, iov, count, myframe,
638
                                   __glusterd_friend_remove_cbk);
639
}
640

641
int32_t
642
__glusterd_friend_update_cbk(struct rpc_req *req, struct iovec *iov, int count,
643
                             void *myframe)
644
{
645
    int ret = -1;
646
    gd1_mgmt_friend_update_rsp rsp = {
647
        {0},
648
    };
649
    xlator_t *this = THIS;
650

651
    GF_ASSERT(req);
652

653
    if (-1 == req->rpc_status) {
654
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE, "RPC Error");
655
        goto out;
656
    }
657

658
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
659
    if (ret < 0) {
660
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
661
               "Failed to serialize friend"
662
               " update response");
663
        goto out;
664
    }
665

666
    ret = 0;
667
out:
668
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
669
           "Received %s from uuid: %s", (ret) ? "RJT" : "ACC",
670
           uuid_utoa(rsp.uuid));
671

672
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
673
    return ret;
674
}
675

676
int
677
glusterd_friend_update_cbk(struct rpc_req *req, struct iovec *iov, int count,
678
                           void *myframe)
679
{
680
    return glusterd_big_locked_cbk(req, iov, count, myframe,
681
                                   __glusterd_friend_update_cbk);
682
}
683

684
int32_t
685
__glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
686
                            void *myframe)
687
{
688
    gd1_mgmt_cluster_lock_rsp rsp = {
689
        {0},
690
    };
691
    int ret = -1;
692
    int32_t op_ret = -1;
693
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
694
    xlator_t *this = THIS;
695
    uuid_t *txn_id = NULL;
696
    glusterd_conf_t *priv = NULL;
697
    char *err_str = NULL;
698

699
    priv = this->private;
700
    GF_ASSERT(priv);
701
    GF_ASSERT(req);
702

703
    txn_id = &priv->global_txn_id;
704

705
    if (-1 == req->rpc_status) {
706
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_LOCK_RESP_FROM_PEER,
707
               "Lock response is not "
708
               "received from one of the peer");
709
        err_str = "Lock response is not received from one of the peer";
710
        glusterd_set_opinfo(err_str, ENETRESET, -1);
711
        event_type = GD_OP_EVENT_RCVD_RJT;
712
        goto out;
713
    }
714

715
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
716
    if (ret < 0) {
717
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
718
               "Failed to decode "
719
               "cluster lock response received from peer");
720
        err_str =
721
            "Failed to decode cluster lock response received from"
722
            " peer";
723
        glusterd_set_opinfo(err_str, EINVAL, -1);
724
        event_type = GD_OP_EVENT_RCVD_RJT;
725
        goto out;
726
    }
727

728
    op_ret = rsp.op_ret;
729

730
    if (op_ret) {
731
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_LOCK_FROM_UUID_REJCT,
732
               "Received lock RJT from uuid: %s", uuid_utoa(rsp.uuid));
733
    } else {
734
        gf_msg_debug(this->name, 0, "Received lock ACC from uuid: %s",
735
                     uuid_utoa(rsp.uuid));
736
    }
737

738
    RCU_READ_LOCK;
739
    ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
740
    RCU_READ_UNLOCK;
741

742
    if (ret) {
743
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
744
               "cluster lock response received from unknown peer: %s."
745
               "Ignoring response",
746
               uuid_utoa(rsp.uuid));
747
        err_str = "cluster lock response received from unknown peer";
748
        goto out;
749
    }
750

751
    if (op_ret) {
752
        event_type = GD_OP_EVENT_RCVD_RJT;
753
        opinfo.op_ret = op_ret;
754
        opinfo.op_errstr = gf_strdup(
755
            "Another transaction could be in "
756
            "progress. Please try again after"
757
            " some time.");
758
    } else {
759
        event_type = GD_OP_EVENT_RCVD_ACC;
760
    }
761

762
out:
763

764
    ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
765
    if (ret)
766
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
767
               "Unable to set "
768
               "transaction's opinfo");
769

770
    ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
771

772
    if (!ret) {
773
        glusterd_friend_sm();
774
        glusterd_op_sm();
775
    }
776

777
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
778
    return ret;
779
}
780

781
int32_t
782
glusterd_cluster_lock_cbk(struct rpc_req *req, struct iovec *iov, int count,
783
                          void *myframe)
784
{
785
    return glusterd_big_locked_cbk(req, iov, count, myframe,
786
                                   __glusterd_cluster_lock_cbk);
787
}
788

789
void
790
glusterd_set_opinfo(char *errstr, int32_t op_errno, int32_t op_ret)
791
{
792
    opinfo.op_errstr = gf_strdup(errstr);
793
    opinfo.op_errno = op_errno;
794
    opinfo.op_ret = op_ret;
795
}
796

797
static int32_t
798
glusterd_mgmt_v3_lock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
799
                                   int count, void *myframe)
800
{
801
    gd1_mgmt_v3_lock_rsp rsp = {
802
        {0},
803
    };
804
    int ret = -1;
805
    int32_t op_ret = -1;
806
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
807
    xlator_t *this = THIS;
808
    call_frame_t *frame = NULL;
809
    uuid_t *txn_id = NULL;
810
    char *err_str = NULL;
811

812
    GF_ASSERT(req);
813

814
    frame = myframe;
815
    txn_id = frame->cookie;
816
    frame->cookie = NULL;
817

818
    if (-1 == req->rpc_status) {
819
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_LOCK_RESP_FROM_PEER,
820
               "Lock response is not "
821
               "received from one of the peer");
822
        err_str = "Lock response is not received from one of the peer";
823
        glusterd_set_opinfo(err_str, ENETRESET, -1);
824
        event_type = GD_OP_EVENT_RCVD_RJT;
825
        goto out;
826
    }
827

828
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
829
    if (ret < 0) {
830
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
831
               "Failed to decode "
832
               "mgmt_v3 lock response received from peer");
833
        err_str =
834
            "Failed to decode mgmt_v3 lock response received from"
835
            " peer";
836
        glusterd_set_opinfo(err_str, EINVAL, -1);
837
        event_type = GD_OP_EVENT_RCVD_RJT;
838
        goto out;
839
    }
840

841
    op_ret = rsp.op_ret;
842

843
    txn_id = &rsp.txn_id;
844

845
    if (op_ret) {
846
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_FROM_UUID_REJCT,
847
               "Received mgmt_v3 lock RJT from uuid: %s", uuid_utoa(rsp.uuid));
848
    } else {
849
        gf_msg_debug(this->name, 0, "Received mgmt_v3 lock ACC from uuid: %s",
850
                     uuid_utoa(rsp.uuid));
851
    }
852

853
    RCU_READ_LOCK;
854
    ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
855
    RCU_READ_UNLOCK;
856

857
    if (ret) {
858
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
859
               "mgmt_v3 lock response received "
860
               "from unknown peer: %s. Ignoring response",
861
               uuid_utoa(rsp.uuid));
862
        goto out;
863
    }
864

865
    if (op_ret) {
866
        event_type = GD_OP_EVENT_RCVD_RJT;
867
        opinfo.op_ret = op_ret;
868
        opinfo.op_errstr = gf_strdup(
869
            "Another transaction could be in "
870
            "progress. Please try again after"
871
            " some time.");
872
    } else {
873
        event_type = GD_OP_EVENT_RCVD_ACC;
874
    }
875

876
out:
877

878
    ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
879
    if (ret)
880
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
881
               "Unable to set "
882
               "transaction's opinfo");
883

884
    ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
885
    if (!ret) {
886
        glusterd_friend_sm();
887
        glusterd_op_sm();
888
    }
889

890
    GF_FREE(frame->cookie);
891
    GLUSTERD_STACK_DESTROY(frame);
892
    return ret;
893
}
894

895
int32_t
896
glusterd_mgmt_v3_lock_peers_cbk(struct rpc_req *req, struct iovec *iov,
897
                                int count, void *myframe)
898
{
899
    return glusterd_big_locked_cbk(req, iov, count, myframe,
900
                                   glusterd_mgmt_v3_lock_peers_cbk_fn);
901
}
902

903
static int32_t
904
glusterd_mgmt_v3_unlock_peers_cbk_fn(struct rpc_req *req, struct iovec *iov,
905
                                     int count, void *myframe)
906
{
907
    gd1_mgmt_v3_unlock_rsp rsp = {
908
        {0},
909
    };
910
    int ret = -1;
911
    int32_t op_ret = -1;
912
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
913
    xlator_t *this = THIS;
914
    call_frame_t *frame = NULL;
915
    uuid_t *txn_id = NULL;
916
    char *err_str = NULL;
917

918
    GF_ASSERT(req);
919

920
    frame = myframe;
921
    txn_id = frame->cookie;
922
    frame->cookie = NULL;
923

924
    if (-1 == req->rpc_status) {
925
        err_str = "Unlock response not received from one of the peer.";
926
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
927
               "UnLock response is not received from one of the peer");
928
        glusterd_set_opinfo(err_str, 0, 0);
929
        event_type = GD_OP_EVENT_RCVD_RJT;
930
        goto out;
931
    }
932

933
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
934
    if (ret < 0) {
935
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
936
               "Failed to decode mgmt_v3 unlock response received from"
937
               "peer");
938
        err_str =
939
            "Failed to decode mgmt_v3 unlock response received "
940
            "from peer";
941
        glusterd_set_opinfo(err_str, 0, 0);
942
        event_type = GD_OP_EVENT_RCVD_RJT;
943
        goto out;
944
    }
945

946
    op_ret = rsp.op_ret;
947

948
    txn_id = &rsp.txn_id;
949

950
    if (op_ret) {
951
        gf_msg(
952
            this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FROM_UUID_REJCT,
953
            "Received mgmt_v3 unlock RJT from uuid: %s", uuid_utoa(rsp.uuid));
954
    } else {
955
        gf_msg_debug(this->name, 0, "Received mgmt_v3 unlock ACC from uuid: %s",
956
                     uuid_utoa(rsp.uuid));
957
    }
958

959
    RCU_READ_LOCK;
960
    ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
961
    RCU_READ_UNLOCK;
962

963
    if (ret) {
964
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
965
               "mgmt_v3 unlock response received "
966
               "from unknown peer: %s. Ignoring response",
967
               uuid_utoa(rsp.uuid));
968
        goto out;
969
    }
970

971
    if (op_ret) {
972
        event_type = GD_OP_EVENT_RCVD_RJT;
973
        opinfo.op_ret = op_ret;
974
        opinfo.op_errstr = gf_strdup(
975
            "Another transaction could be in "
976
            "progress. Please try again after"
977
            " some time.");
978
    } else {
979
        event_type = GD_OP_EVENT_RCVD_ACC;
980
    }
981

982
out:
983

984
    ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
985
    if (ret)
986
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
987
               "Unable to set "
988
               "transaction's opinfo");
989

990
    ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
991

992
    if (!ret) {
993
        glusterd_friend_sm();
994
        glusterd_op_sm();
995
    }
996

997
    GF_FREE(frame->cookie);
998
    GLUSTERD_STACK_DESTROY(frame);
999
    return ret;
1000
}
1001

1002
int32_t
1003
glusterd_mgmt_v3_unlock_peers_cbk(struct rpc_req *req, struct iovec *iov,
1004
                                  int count, void *myframe)
1005
{
1006
    return glusterd_big_locked_cbk(req, iov, count, myframe,
1007
                                   glusterd_mgmt_v3_unlock_peers_cbk_fn);
1008
}
1009

1010
int32_t
1011
__glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
1012
                              void *myframe)
1013
{
1014
    gd1_mgmt_cluster_lock_rsp rsp = {
1015
        {0},
1016
    };
1017
    int ret = -1;
1018
    int32_t op_ret = -1;
1019
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
1020
    xlator_t *this = THIS;
1021
    uuid_t *txn_id = NULL;
1022
    glusterd_conf_t *priv = NULL;
1023
    char *err_str = NULL;
1024

1025
    priv = this->private;
1026
    GF_ASSERT(priv);
1027
    GF_ASSERT(req);
1028

1029
    txn_id = &priv->global_txn_id;
1030

1031
    if (-1 == req->rpc_status) {
1032
        err_str = "Unlock response not received from one of the peer.";
1033
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
1034
               "UnLock response is not received from one of the peer");
1035
        glusterd_set_opinfo(err_str, 0, 0);
1036
        event_type = GD_OP_EVENT_RCVD_RJT;
1037
        goto out;
1038
    }
1039

1040
    ret = xdr_to_generic(*iov, &rsp,
1041
                         (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
1042
    if (ret < 0) {
1043
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
1044
               "Failed to decode unlock response received from peer");
1045
        err_str =
1046
            "Failed to decode cluster unlock response received "
1047
            "from peer";
1048
        glusterd_set_opinfo(err_str, 0, 0);
1049
        event_type = GD_OP_EVENT_RCVD_RJT;
1050
        goto out;
1051
    }
1052

1053
    op_ret = rsp.op_ret;
1054

1055
    if (op_ret) {
1056
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNLOCK_FROM_UUID_REJCT,
1057
               "Received unlock RJT from uuid: %s", uuid_utoa(rsp.uuid));
1058
    } else {
1059
        gf_msg_debug(this->name, 0, "Received unlock ACC from uuid: %s",
1060
                     uuid_utoa(rsp.uuid));
1061
    }
1062

1063
    RCU_READ_LOCK;
1064
    ret = (glusterd_peerinfo_find(rsp.uuid, NULL) == NULL);
1065
    RCU_READ_UNLOCK;
1066

1067
    if (ret) {
1068
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_CLUSTER_UNLOCK_FAILED,
1069
               "Unlock response received from unknown peer %s",
1070
               uuid_utoa(rsp.uuid));
1071
        goto out;
1072
    }
1073

1074
    if (op_ret) {
1075
        event_type = GD_OP_EVENT_RCVD_RJT;
1076
        opinfo.op_ret = op_ret;
1077
    } else {
1078
        event_type = GD_OP_EVENT_RCVD_ACC;
1079
    }
1080

1081
out:
1082

1083
    ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
1084
    if (ret)
1085
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
1086
               "Unable to set "
1087
               "transaction's opinfo");
1088

1089
    ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
1090

1091
    if (!ret) {
1092
        glusterd_friend_sm();
1093
        glusterd_op_sm();
1094
    }
1095

1096
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
1097
    return ret;
1098
}
1099

1100
int32_t
1101
glusterd_cluster_unlock_cbk(struct rpc_req *req, struct iovec *iov, int count,
1102
                            void *myframe)
1103
{
1104
    return glusterd_big_locked_cbk(req, iov, count, myframe,
1105
                                   __glusterd_cluster_unlock_cbk);
1106
}
1107

1108
int32_t
1109
__glusterd_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1110
                        void *myframe)
1111
{
1112
    gd1_mgmt_stage_op_rsp rsp = {
1113
        {0},
1114
    };
1115
    int ret = -1;
1116
    int32_t op_ret = -1;
1117
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
1118
    glusterd_peerinfo_t *peerinfo = NULL;
1119
    dict_t *dict = NULL;
1120
    char *peer_str = NULL;
1121
    xlator_t *this = THIS;
1122
    uuid_t *txn_id = NULL;
1123
    call_frame_t *frame = NULL;
1124

1125
    GF_ASSERT(req);
1126
    GF_ASSERT(myframe);
1127

1128
    frame = myframe;
1129
    txn_id = frame->cookie;
1130

1131
    if (-1 == req->rpc_status) {
1132
        rsp.op_ret = -1;
1133
        rsp.op_errno = EINVAL;
1134
        /* use standard allocation because to keep uniformity
1135
           in freeing it */
1136
        rsp.op_errstr = strdup("error");
1137
        goto out;
1138
    }
1139

1140
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
1141
    if (ret < 0) {
1142
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
1143
               "Failed to decode stage "
1144
               "response received from peer");
1145
        rsp.op_ret = -1;
1146
        rsp.op_errno = EINVAL;
1147
        /* use standard allocation because to keep uniformity
1148
           in freeing it */
1149
        rsp.op_errstr = strdup(
1150
            "Failed to decode stage response "
1151
            "received from peer.");
1152
        goto out;
1153
    }
1154

1155
    if (rsp.dict.dict_len) {
1156
        /* Unserialize the dictionary */
1157
        dict = dict_new();
1158

1159
        ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
1160
        if (ret < 0) {
1161
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1162
                   "failed to "
1163
                   "unserialize rsp-buffer to dictionary");
1164
            event_type = GD_OP_EVENT_RCVD_RJT;
1165
            goto out;
1166
        } else {
1167
            dict->extra_stdfree = rsp.dict.dict_val;
1168
        }
1169
    }
1170

1171
out:
1172
    op_ret = rsp.op_ret;
1173

1174
    if (op_ret) {
1175
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STAGE_FROM_UUID_REJCT,
1176
               "Received stage RJT from uuid: %s", uuid_utoa(rsp.uuid));
1177
    } else {
1178
        gf_msg_debug(this->name, 0, "Received stage ACC from uuid: %s",
1179
                     uuid_utoa(rsp.uuid));
1180
    }
1181

1182
    RCU_READ_LOCK;
1183
    peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
1184
    if (peerinfo == NULL) {
1185
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
1186
               "Stage response received "
1187
               "from unknown peer: %s. Ignoring response.",
1188
               uuid_utoa(rsp.uuid));
1189
    }
1190

1191
    if (op_ret) {
1192
        event_type = GD_OP_EVENT_RCVD_RJT;
1193
        opinfo.op_ret = op_ret;
1194
        if (strcmp("", rsp.op_errstr)) {
1195
            opinfo.op_errstr = gf_strdup(rsp.op_errstr);
1196
        } else {
1197
            if (peerinfo)
1198
                peer_str = peerinfo->hostname;
1199
            else
1200
                peer_str = uuid_utoa(rsp.uuid);
1201
            char err_str[2048];
1202
            snprintf(err_str, sizeof(err_str), OPERRSTR_STAGE_FAIL, peer_str);
1203
            opinfo.op_errstr = gf_strdup(err_str);
1204
        }
1205
    } else {
1206
        event_type = GD_OP_EVENT_RCVD_ACC;
1207
    }
1208

1209
    RCU_READ_UNLOCK;
1210

1211
    ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
1212
    if (ret)
1213
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
1214
               "Unable to set "
1215
               "transaction's opinfo");
1216

1217
    ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
1218

1219
    if (!ret) {
1220
        glusterd_friend_sm();
1221
        glusterd_op_sm();
1222
    }
1223

1224
    free(rsp.op_errstr);  // malloced by xdr
1225
    if (dict) {
1226
        if (!dict->extra_stdfree && rsp.dict.dict_val)
1227
            free(rsp.dict.dict_val);  // malloced by xdr
1228
        dict_unref(dict);
1229
    } else {
1230
        free(rsp.dict.dict_val);  // malloced by xdr
1231
    }
1232
    GF_FREE(frame->cookie);
1233
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
1234
    return ret;
1235
}
1236

1237
int32_t
1238
glusterd_stage_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1239
                      void *myframe)
1240
{
1241
    return glusterd_big_locked_cbk(req, iov, count, myframe,
1242
                                   __glusterd_stage_op_cbk);
1243
}
1244

1245
int32_t
1246
__glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1247
                         void *myframe)
1248
{
1249
    gd1_mgmt_commit_op_rsp rsp = {
1250
        {0},
1251
    };
1252
    int ret = -1;
1253
    int32_t op_ret = -1;
1254
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
1255
    glusterd_peerinfo_t *peerinfo = NULL;
1256
    dict_t *dict = NULL;
1257
    char *peer_str = NULL;
1258
    xlator_t *this = THIS;
1259
    uuid_t *txn_id = NULL;
1260
    glusterd_op_info_t txn_op_info = {
1261
        GD_OP_STATE_DEFAULT,
1262
    };
1263
    call_frame_t *frame = NULL;
1264

1265
    GF_ASSERT(req);
1266
    GF_ASSERT(myframe);
1267

1268
    frame = myframe;
1269
    txn_id = frame->cookie;
1270

1271
    if (-1 == req->rpc_status) {
1272
        rsp.op_ret = -1;
1273
        rsp.op_errno = EINVAL;
1274
        /* use standard allocation because to keep uniformity
1275
           in freeing it */
1276
        rsp.op_errstr = strdup("error");
1277
        event_type = GD_OP_EVENT_RCVD_RJT;
1278
        goto out;
1279
    }
1280

1281
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
1282
    if (ret < 0) {
1283
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
1284
               "Failed to decode commit "
1285
               "response received from peer");
1286
        rsp.op_ret = -1;
1287
        rsp.op_errno = EINVAL;
1288
        /* use standard allocation because to keep uniformity
1289
           in freeing it */
1290
        rsp.op_errstr = strdup(
1291
            "Failed to decode commit response "
1292
            "received from peer.");
1293
        event_type = GD_OP_EVENT_RCVD_RJT;
1294
        goto out;
1295
    }
1296

1297
    if (rsp.dict.dict_len) {
1298
        /* Unserialize the dictionary */
1299
        dict = dict_new();
1300

1301
        ret = dict_unserialize(rsp.dict.dict_val, rsp.dict.dict_len, &dict);
1302
        if (ret < 0) {
1303
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1304
                   "failed to "
1305
                   "unserialize rsp-buffer to dictionary");
1306
            event_type = GD_OP_EVENT_RCVD_RJT;
1307
            goto out;
1308
        } else {
1309
            dict->extra_stdfree = rsp.dict.dict_val;
1310
        }
1311
    }
1312

1313
    op_ret = rsp.op_ret;
1314

1315
    if (op_ret) {
1316
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_FROM_UUID_REJCT,
1317
               "Received commit RJT from uuid: %s", uuid_utoa(rsp.uuid));
1318
    } else {
1319
        gf_msg_debug(this->name, 0, "Received commit ACC from uuid: %s",
1320
                     uuid_utoa(rsp.uuid));
1321
    }
1322

1323
    ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
1324
    if (ret) {
1325
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_GET_FAIL,
1326
               "Failed to get txn_op_info "
1327
               "for txn_id = %s",
1328
               uuid_utoa(*txn_id));
1329
    }
1330

1331
    RCU_READ_LOCK;
1332
    peerinfo = glusterd_peerinfo_find(rsp.uuid, NULL);
1333
    if (peerinfo == NULL) {
1334
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_RESP_FROM_UNKNOWN_PEER,
1335
               "Commit response for "
1336
               "'Volume %s' received from unknown peer: %s",
1337
               gd_op_list[opinfo.op], uuid_utoa(rsp.uuid));
1338
    }
1339

1340
    if (op_ret) {
1341
        event_type = GD_OP_EVENT_RCVD_RJT;
1342
        opinfo.op_ret = op_ret;
1343
        if (strcmp("", rsp.op_errstr)) {
1344
            opinfo.op_errstr = gf_strdup(rsp.op_errstr);
1345
        } else {
1346
            if (peerinfo)
1347
                peer_str = peerinfo->hostname;
1348
            else
1349
                peer_str = uuid_utoa(rsp.uuid);
1350
            char err_str[2048];
1351
            snprintf(err_str, sizeof(err_str), OPERRSTR_COMMIT_FAIL, peer_str);
1352
            opinfo.op_errstr = gf_strdup(err_str);
1353
        }
1354
        if (!opinfo.op_errstr) {
1355
            goto unlock;
1356
        }
1357
    } else {
1358
        event_type = GD_OP_EVENT_RCVD_ACC;
1359
        GF_ASSERT(rsp.op == txn_op_info.op);
1360

1361
        switch (rsp.op) {
1362
            case GD_OP_PROFILE_VOLUME:
1363
                ret = glusterd_profile_volume_use_rsp_dict(txn_op_info.op_ctx,
1364
                                                           dict);
1365
                if (ret)
1366
                    goto unlock;
1367
                break;
1368

1369
            case GD_OP_REBALANCE:
1370
            case GD_OP_DEFRAG_BRICK_VOLUME:
1371
                ret = glusterd_volume_rebalance_use_rsp_dict(txn_op_info.op_ctx,
1372
                                                             dict);
1373
                if (ret)
1374
                    goto unlock;
1375
                break;
1376

1377
            default:
1378
                break;
1379
        }
1380
    }
1381
unlock:
1382
    RCU_READ_UNLOCK;
1383

1384
out:
1385

1386
    ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
1387
    if (ret)
1388
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
1389
               "Unable to set "
1390
               "transaction's opinfo");
1391

1392
    ret = glusterd_op_sm_inject_event(event_type, txn_id, NULL);
1393

1394
    if (!ret) {
1395
        glusterd_friend_sm();
1396
        glusterd_op_sm();
1397
    }
1398

1399
    if (dict)
1400
        dict_unref(dict);
1401
    free(rsp.op_errstr);  // malloced by xdr
1402
    GF_FREE(frame->cookie);
1403
    GLUSTERD_STACK_DESTROY(((call_frame_t *)myframe));
1404
    return ret;
1405
}
1406

1407
int32_t
1408
glusterd_commit_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
1409
                       void *myframe)
1410
{
1411
    return glusterd_big_locked_cbk(req, iov, count, myframe,
1412
                                   __glusterd_commit_op_cbk);
1413
}
1414

1415
int32_t
1416
glusterd_rpc_probe(call_frame_t *frame, xlator_t *this, void *data)
1417
{
1418
    gd1_mgmt_probe_req req = {
1419
        {0},
1420
    };
1421
    int ret = 0;
1422
    int port = 0;
1423
    char *hostname = NULL;
1424
    glusterd_peerinfo_t *peerinfo = NULL;
1425
    dict_t *dict = NULL;
1426

1427
    if (!frame || !data) {
1428
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
1429
        ret = -1;
1430
        goto out;
1431
    }
1432

1433
    dict = data;
1434

1435
    ret = dict_get_str(dict, "hostname", &hostname);
1436
    if (ret) {
1437
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1438
                "Key=hostname", NULL);
1439
        goto out;
1440
    }
1441
    ret = dict_get_int32(dict, "port", &port);
1442
    if (ret) {
1443
        gf_smsg(this->name, GF_LOG_DEBUG, -ret, GD_MSG_DICT_GET_FAILED,
1444
                "Key=port", NULL);
1445
        port = GF_DEFAULT_BASE_PORT;
1446
    }
1447

1448
    ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1449
    if (ret) {
1450
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1451
                "Key=peerinfo", NULL);
1452
        goto out;
1453
    }
1454

1455
    gf_uuid_copy(req.uuid, MY_UUID);
1456
    req.hostname = gf_strdup(hostname);
1457
    req.port = port;
1458

1459
    ret = glusterd_submit_request(
1460
        peerinfo->rpc, &req, frame, peerinfo->peer, GLUSTERD_PROBE_QUERY, NULL,
1461
        this, glusterd_probe_cbk, (xdrproc_t)xdr_gd1_mgmt_probe_req);
1462

1463
out:
1464
    GF_FREE(req.hostname);
1465
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1466
    return ret;
1467
}
1468

1469
int32_t
1470
glusterd_rpc_friend_add(call_frame_t *frame, xlator_t *this, void *data)
1471
{
1472
    gd1_mgmt_friend_req req = {
1473
        {0},
1474
    };
1475
    int ret = 0;
1476
    glusterd_peerinfo_t *peerinfo = NULL;
1477
    glusterd_friend_sm_event_t *event = NULL;
1478
    dict_t *peer_data = NULL;
1479

1480
    if (!frame || !data) {
1481
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
1482
        ret = -1;
1483
        goto out;
1484
    }
1485

1486
    event = data;
1487

1488
    RCU_READ_LOCK;
1489

1490
    peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
1491
    if (!peerinfo) {
1492
        RCU_READ_UNLOCK;
1493
        ret = -1;
1494
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
1495
               "Could not find peer %s(%s)", event->peername,
1496
               uuid_utoa(event->peerid));
1497
        goto out;
1498
    }
1499

1500
    req.hostname = gf_strdup(peerinfo->hostname);
1501
    req.port = peerinfo->port;
1502

1503
    RCU_READ_UNLOCK;
1504

1505
    gf_uuid_copy(req.uuid, MY_UUID);
1506

1507
    peer_data = dict_new();
1508
    if (!peer_data) {
1509
        gf_smsg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
1510
                NULL);
1511
        errno = ENOMEM;
1512
        goto out;
1513
    }
1514

1515
    ret = dict_set_dynstr_with_alloc(peer_data, "hostname_in_cluster",
1516
                                     peerinfo->hostname);
1517
    if (ret) {
1518
        gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
1519
               "Unable to add hostname of the peer");
1520
        goto out;
1521
    }
1522

1523
    ret = glusterd_add_missed_snaps_to_export_dict(peer_data);
1524
    if (ret) {
1525
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MISSED_SNAP_LIST_STORE_FAIL,
1526
               "Unable to add list of missed snapshots "
1527
               "in the peer_data dict for handshake");
1528
        goto out;
1529
    }
1530

1531
    ret = glusterd_add_snapshots_to_export_dict(peer_data);
1532
    if (ret) {
1533
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAP_LIST_SET_FAIL,
1534
               "Unable to add list of snapshots "
1535
               "in the peer_data dict for handshake");
1536
        goto out;
1537
    }
1538

1539
    /* Don't add any key-value in peer_data dictionary after call this function
1540
     */
1541
    ret = glusterd_add_volumes_to_export_dict(peer_data, &req.vols.vols_val,
1542
                                              &req.vols.vols_len);
1543
    if (ret) {
1544
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1545
               "Unable to add list of volumes "
1546
               "in the peer_data dict for handshake");
1547
        goto out;
1548
    }
1549

1550
    if (!req.vols.vols_len) {
1551
        ret = dict_allocate_and_serialize(peer_data, &req.vols.vols_val,
1552
                                          &req.vols.vols_len);
1553
        if (ret) {
1554
            gf_smsg(this->name, GF_LOG_ERROR, errno,
1555
                    GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1556
            goto out;
1557
        }
1558
    }
1559

1560
    ret = glusterd_submit_request(
1561
        peerinfo->rpc, &req, frame, peerinfo->peer, GLUSTERD_FRIEND_ADD, NULL,
1562
        this, glusterd_friend_add_cbk, (xdrproc_t)xdr_gd1_mgmt_friend_req);
1563

1564
out:
1565
    GF_FREE(req.vols.vols_val);
1566
    GF_FREE(req.hostname);
1567

1568
    if (peer_data)
1569
        dict_unref(peer_data);
1570

1571
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1572
    return ret;
1573
}
1574

1575
int32_t
1576
glusterd_rpc_friend_remove(call_frame_t *frame, xlator_t *this, void *data)
1577
{
1578
    gd1_mgmt_friend_req req = {
1579
        {0},
1580
    };
1581
    int ret = 0;
1582
    glusterd_peerinfo_t *peerinfo = NULL;
1583
    glusterd_friend_sm_event_t *event = NULL;
1584

1585
    if (!frame || !data) {
1586
        ret = -1;
1587
        goto out;
1588
    }
1589

1590
    event = data;
1591

1592
    RCU_READ_LOCK;
1593

1594
    peerinfo = glusterd_peerinfo_find(event->peerid, event->peername);
1595
    if (!peerinfo) {
1596
        RCU_READ_UNLOCK;
1597
        ret = -1;
1598
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_NOT_FOUND,
1599
               "Could not find peer %s(%s)", event->peername,
1600
               uuid_utoa(event->peerid));
1601
        goto out;
1602
    }
1603

1604
    gf_uuid_copy(req.uuid, MY_UUID);
1605
    req.hostname = gf_strdup(peerinfo->hostname);
1606
    req.port = peerinfo->port;
1607

1608
    ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->peer,
1609
                                  GLUSTERD_FRIEND_REMOVE, NULL, this,
1610
                                  glusterd_friend_remove_cbk,
1611
                                  (xdrproc_t)xdr_gd1_mgmt_friend_req);
1612

1613
    RCU_READ_UNLOCK;
1614
out:
1615
    GF_FREE(req.hostname);
1616

1617
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1618
    return ret;
1619
}
1620

1621
int32_t
1622
glusterd_rpc_friend_update(call_frame_t *frame, xlator_t *this, void *data)
1623
{
1624
    gd1_mgmt_friend_update req = {
1625
        {0},
1626
    };
1627
    int ret = 0;
1628
    dict_t *friends = NULL;
1629
    call_frame_t *dummy_frame = NULL;
1630
    glusterd_peerinfo_t *peerinfo = NULL;
1631

1632
    friends = data;
1633
    if (!friends)
1634
        goto out;
1635

1636
    ret = dict_get_ptr(friends, "peerinfo", VOID(&peerinfo));
1637
    if (ret)
1638
        goto out;
1639
    /* Don't want to send the pointer over */
1640
    dict_del_sizen(friends, "peerinfo");
1641

1642
    ret = dict_allocate_and_serialize(friends, &req.friends.friends_val,
1643
                                      &req.friends.friends_len);
1644
    if (ret)
1645
        goto out;
1646

1647
    gf_uuid_copy(req.uuid, MY_UUID);
1648

1649
    dummy_frame = create_frame(this, this->ctx->pool);
1650
    ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
1651
                                  peerinfo->peer, GLUSTERD_FRIEND_UPDATE, NULL,
1652
                                  this, glusterd_friend_update_cbk,
1653
                                  (xdrproc_t)xdr_gd1_mgmt_friend_update);
1654

1655
out:
1656
    GF_FREE(req.friends.friends_val);
1657

1658
    if (ret && dummy_frame)
1659
        STACK_DESTROY(dummy_frame->root);
1660

1661
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1662
    return ret;
1663
}
1664

1665
int32_t
1666
glusterd_cluster_lock(call_frame_t *frame, xlator_t *this, void *data)
1667
{
1668
    gd1_mgmt_cluster_lock_req req = {
1669
        {0},
1670
    };
1671
    int ret = -1;
1672
    glusterd_peerinfo_t *peerinfo = NULL;
1673
    call_frame_t *dummy_frame = NULL;
1674

1675
    peerinfo = data;
1676

1677
    glusterd_get_uuid(&req.uuid);
1678

1679
    dummy_frame = create_frame(this, this->ctx->pool);
1680
    if (!dummy_frame)
1681
        goto out;
1682

1683
    ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
1684
                                  peerinfo->mgmt, GLUSTERD_MGMT_CLUSTER_LOCK,
1685
                                  NULL, this, glusterd_cluster_lock_cbk,
1686
                                  (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
1687
out:
1688
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1689

1690
    if (ret && dummy_frame)
1691
        STACK_DESTROY(dummy_frame->root);
1692
    return ret;
1693
}
1694

1695
int32_t
1696
glusterd_mgmt_v3_lock_peers(call_frame_t *frame, xlator_t *this, void *data)
1697
{
1698
    gd1_mgmt_v3_lock_req req = {
1699
        {0},
1700
    };
1701
    int ret = -1;
1702
    glusterd_peerinfo_t *peerinfo = NULL;
1703
    dict_t *dict = NULL;
1704
    uuid_t *txn_id = NULL;
1705

1706
    dict = data;
1707

1708
    ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1709
    if (ret) {
1710
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1711
                "Key=peerinfo", NULL);
1712
        goto out;
1713
    }
1714

1715
    // peerinfo should not be in payload
1716
    dict_del_sizen(dict, "peerinfo");
1717

1718
    glusterd_get_uuid(&req.uuid);
1719

1720
    ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
1721
                                      &req.dict.dict_len);
1722
    if (ret) {
1723
        gf_smsg(this->name, GF_LOG_ERROR, 0,
1724
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1725
        goto out;
1726
    }
1727

1728
    /* Sending valid transaction ID to peers */
1729
    ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1730
    if (ret) {
1731
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1732
               "Failed to get transaction id.");
1733
        goto out;
1734
    } else {
1735
        gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1736
        gf_uuid_copy(req.txn_id, *txn_id);
1737
    }
1738

1739
    if (!frame)
1740
        frame = create_frame(this, this->ctx->pool);
1741

1742
    if (!frame) {
1743
        ret = -1;
1744
        goto out;
1745
    }
1746
    frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1747
    if (!frame->cookie) {
1748
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
1749
        ret = -1;
1750
        goto out;
1751
    }
1752
    gf_uuid_copy(frame->cookie, req.txn_id);
1753

1754
    ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt_v3,
1755
                                  GLUSTERD_MGMT_V3_LOCK, NULL, this,
1756
                                  glusterd_mgmt_v3_lock_peers_cbk,
1757
                                  (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
1758
out:
1759
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1760
    if (dict)
1761
        dict_unref(dict);
1762
    if (req.dict.dict_val)
1763
        GF_FREE(req.dict.dict_val);
1764
    return ret;
1765
}
1766

1767
int32_t
1768
glusterd_mgmt_v3_unlock_peers(call_frame_t *frame, xlator_t *this, void *data)
1769
{
1770
    gd1_mgmt_v3_unlock_req req = {
1771
        {0},
1772
    };
1773
    int ret = -1;
1774
    glusterd_peerinfo_t *peerinfo = NULL;
1775
    dict_t *dict = NULL;
1776
    uuid_t *txn_id = NULL;
1777

1778
    dict = data;
1779

1780
    ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1781
    if (ret) {
1782
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1783
                "Key=peerinfo", NULL);
1784
        goto out;
1785
    }
1786

1787
    // peerinfo should not be in payload
1788
    dict_del_sizen(dict, "peerinfo");
1789

1790
    glusterd_get_uuid(&req.uuid);
1791

1792
    ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
1793
                                      &req.dict.dict_len);
1794
    if (ret) {
1795
        gf_smsg(this->name, GF_LOG_ERROR, errno,
1796
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1797
        goto out;
1798
    }
1799

1800
    /* Sending valid transaction ID to peers */
1801
    ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1802
    if (ret) {
1803
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1804
               "Failed to get transaction id.");
1805
        goto out;
1806
    } else {
1807
        gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1808
        gf_uuid_copy(req.txn_id, *txn_id);
1809
    }
1810

1811
    if (!frame)
1812
        frame = create_frame(this, this->ctx->pool);
1813

1814
    if (!frame) {
1815
        ret = -1;
1816
        goto out;
1817
    }
1818
    frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1819
    if (!frame->cookie) {
1820
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
1821
        ret = -1;
1822
        goto out;
1823
    }
1824
    gf_uuid_copy(frame->cookie, req.txn_id);
1825

1826
    ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt_v3,
1827
                                  GLUSTERD_MGMT_V3_UNLOCK, NULL, this,
1828
                                  glusterd_mgmt_v3_unlock_peers_cbk,
1829
                                  (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
1830
out:
1831
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1832
    if (dict)
1833
        dict_unref(dict);
1834

1835
    if (req.dict.dict_val)
1836
        GF_FREE(req.dict.dict_val);
1837
    return ret;
1838
}
1839

1840
int32_t
1841
glusterd_cluster_unlock(call_frame_t *frame, xlator_t *this, void *data)
1842
{
1843
    gd1_mgmt_cluster_lock_req req = {
1844
        {0},
1845
    };
1846
    int ret = -1;
1847
    glusterd_peerinfo_t *peerinfo = NULL;
1848
    call_frame_t *dummy_frame = NULL;
1849

1850
    peerinfo = data;
1851

1852
    glusterd_get_uuid(&req.uuid);
1853

1854
    dummy_frame = create_frame(this, this->ctx->pool);
1855
    if (!dummy_frame)
1856
        goto out;
1857

1858
    ret = glusterd_submit_request(peerinfo->rpc, &req, dummy_frame,
1859
                                  peerinfo->mgmt, GLUSTERD_MGMT_CLUSTER_UNLOCK,
1860
                                  NULL, this, glusterd_cluster_unlock_cbk,
1861
                                  (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
1862
out:
1863
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1864

1865
    if (ret && dummy_frame)
1866
        STACK_DESTROY(dummy_frame->root);
1867

1868
    return ret;
1869
}
1870

1871
int32_t
1872
glusterd_stage_op(call_frame_t *frame, xlator_t *this, void *data)
1873
{
1874
    gd1_mgmt_stage_op_req req = {
1875
        {
1876
            0,
1877
        },
1878
    };
1879
    int ret = -1;
1880
    glusterd_peerinfo_t *peerinfo = NULL;
1881
    dict_t *dict = NULL;
1882
    uuid_t *txn_id = NULL;
1883

1884
    dict = data;
1885

1886
    ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1887
    if (ret) {
1888
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1889
                "Key=peerinfo", NULL);
1890
        goto out;
1891
    }
1892

1893
    // peerinfo should not be in payload
1894
    dict_del_sizen(dict, "peerinfo");
1895

1896
    glusterd_get_uuid(&req.uuid);
1897
    req.op = glusterd_op_get_op();
1898

1899
    ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
1900
    if (ret) {
1901
        gf_smsg(this->name, GF_LOG_ERROR, errno,
1902
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1903
        goto out;
1904
    }
1905
    /* Sending valid transaction ID to peers */
1906
    ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1907
    if (ret) {
1908
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1909
               "Failed to get transaction id.");
1910
        goto out;
1911
    } else {
1912
        gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1913
    }
1914

1915
    if (!frame)
1916
        frame = create_frame(this, this->ctx->pool);
1917

1918
    if (!frame) {
1919
        ret = -1;
1920
        goto out;
1921
    }
1922
    frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1923
    if (!frame->cookie) {
1924
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
1925
        ret = -1;
1926
        goto out;
1927
    }
1928
    gf_uuid_copy(frame->cookie, *txn_id);
1929

1930
    ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt,
1931
                                  GLUSTERD_MGMT_STAGE_OP, NULL, this,
1932
                                  glusterd_stage_op_cbk,
1933
                                  (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
1934

1935
out:
1936
    if (req.buf.buf_val)
1937
        GF_FREE(req.buf.buf_val);
1938

1939
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1940
    return ret;
1941
}
1942

1943
int32_t
1944
glusterd_commit_op(call_frame_t *frame, xlator_t *this, void *data)
1945
{
1946
    gd1_mgmt_commit_op_req req = {
1947
        {
1948
            0,
1949
        },
1950
    };
1951
    int ret = -1;
1952
    glusterd_peerinfo_t *peerinfo = NULL;
1953
    dict_t *dict = NULL;
1954
    uuid_t *txn_id = NULL;
1955

1956
    dict = data;
1957

1958
    ret = dict_get_ptr(dict, "peerinfo", VOID(&peerinfo));
1959
    if (ret) {
1960
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1961
                "Key=peerinfo", NULL);
1962
        goto out;
1963
    }
1964

1965
    // peerinfo should not be in payload
1966
    dict_del_sizen(dict, "peerinfo");
1967

1968
    glusterd_get_uuid(&req.uuid);
1969
    req.op = glusterd_op_get_op();
1970

1971
    ret = dict_allocate_and_serialize(dict, &req.buf.buf_val, &req.buf.buf_len);
1972
    if (ret) {
1973
        gf_smsg(this->name, GF_LOG_ERROR, errno,
1974
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1975
        goto out;
1976
    }
1977
    /* Sending valid transaction ID to peers */
1978
    ret = dict_get_bin(dict, "transaction_id", (void **)&txn_id);
1979
    if (ret) {
1980
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
1981
               "Failed to get transaction id.");
1982
        goto out;
1983
    } else {
1984
        gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(*txn_id));
1985
    }
1986

1987
    if (!frame)
1988
        frame = create_frame(this, this->ctx->pool);
1989

1990
    if (!frame) {
1991
        ret = -1;
1992
        goto out;
1993
    }
1994
    frame->cookie = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
1995
    if (!frame->cookie) {
1996
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
1997
        ret = -1;
1998
        goto out;
1999
    }
2000
    gf_uuid_copy(frame->cookie, *txn_id);
2001

2002
    ret = glusterd_submit_request(peerinfo->rpc, &req, frame, peerinfo->mgmt,
2003
                                  GLUSTERD_MGMT_COMMIT_OP, NULL, this,
2004
                                  glusterd_commit_op_cbk,
2005
                                  (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
2006

2007
out:
2008
    if (req.buf.buf_val)
2009
        GF_FREE(req.buf.buf_val);
2010

2011
    gf_msg_debug(this->name, 0, "Returning %d", ret);
2012
    return ret;
2013
}
2014

2015
int32_t
2016
__glusterd_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
2017
                        void *myframe)
2018
{
2019
    gd1_mgmt_brick_op_rsp rsp = {0};
2020
    int ret = -1;
2021
    int32_t op_ret = -1;
2022
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
2023
    call_frame_t *frame = NULL;
2024
    glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
2025
    dict_t *dict = NULL;
2026
    int index = 0;
2027
    glusterd_req_ctx_t *req_ctx = NULL;
2028
    glusterd_pending_node_t *node = NULL;
2029
    xlator_t *this = THIS;
2030
    uuid_t *txn_id = NULL;
2031
    glusterd_conf_t *priv = NULL;
2032

2033
    priv = this->private;
2034
    GF_ASSERT(priv);
2035
    GF_ASSERT(req);
2036

2037
    txn_id = &priv->global_txn_id;
2038
    frame = myframe;
2039
    req_ctx = frame->local;
2040

2041
    if (-1 == req->rpc_status) {
2042
        rsp.op_ret = -1;
2043
        rsp.op_errno = EINVAL;
2044
        /* use standard allocation because to keep uniformity
2045
           in freeing it */
2046
        rsp.op_errstr = strdup("error");
2047
        event_type = GD_OP_EVENT_RCVD_RJT;
2048
        goto out;
2049
    }
2050

2051
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
2052
    if (ret < 0) {
2053
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RES_DECODE_FAIL,
2054
               "Failed to decode brick op "
2055
               "response received");
2056
        rsp.op_ret = -1;
2057
        rsp.op_errno = EINVAL;
2058
        rsp.op_errstr = strdup("Unable to decode brick op response");
2059
        event_type = GD_OP_EVENT_RCVD_RJT;
2060
        goto out;
2061
    }
2062

2063
    if (rsp.output.output_len) {
2064
        /* Unserialize the dictionary */
2065
        dict = dict_new();
2066

2067
        ret = dict_unserialize(rsp.output.output_val, rsp.output.output_len,
2068
                               &dict);
2069
        if (ret < 0) {
2070
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
2071
                   "Failed to "
2072
                   "unserialize rsp-buffer to dictionary");
2073
            event_type = GD_OP_EVENT_RCVD_RJT;
2074
            goto out;
2075
        } else {
2076
            dict->extra_stdfree = rsp.output.output_val;
2077
        }
2078
    }
2079

2080
    op_ret = rsp.op_ret;
2081

2082
    /* Add index to rsp_dict for GD_OP_STATUS_VOLUME */
2083
    if (GD_OP_STATUS_VOLUME == req_ctx->op) {
2084
        node = frame->cookie;
2085
        index = node->index;
2086
        ret = dict_set_int32_sizen(dict, "index", index);
2087
        if (ret) {
2088
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
2089
                   "Error setting index on brick status rsp dict");
2090
            rsp.op_ret = -1;
2091
            event_type = GD_OP_EVENT_RCVD_RJT;
2092
            goto out;
2093
        }
2094
    }
2095
out:
2096

2097
    if (req_ctx && req_ctx->dict) {
2098
        ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
2099
        gf_msg_debug(this->name, -ret, "transaction ID = %s",
2100
                     uuid_utoa(*txn_id));
2101
    }
2102

2103
    ev_ctx = GF_CALLOC(1, sizeof(*ev_ctx), gf_gld_mt_brick_rsp_ctx_t);
2104
    if (ev_ctx) {
2105
        if (op_ret) {
2106
            event_type = GD_OP_EVENT_RCVD_RJT;
2107
            ev_ctx->op_ret = op_ret;
2108
            ev_ctx->op_errstr = gf_strdup(rsp.op_errstr);
2109
        } else {
2110
            event_type = GD_OP_EVENT_RCVD_ACC;
2111
        }
2112
        ev_ctx->pending_node = frame->cookie;
2113
        ev_ctx->rsp_dict = dict;
2114
        ev_ctx->commit_ctx = frame->local;
2115
        ret = glusterd_op_sm_inject_event(event_type, txn_id, ev_ctx);
2116
    }
2117
    if (!ret) {
2118
        glusterd_friend_sm();
2119
        glusterd_op_sm();
2120
    }
2121

2122
    if (ret) {
2123
        if (dict) {
2124
            dict_unref(dict);
2125
        }
2126
        if (ev_ctx) {
2127
            GF_FREE(ev_ctx->op_errstr);
2128
            GF_FREE(ev_ctx);
2129
        }
2130
    }
2131
    free(rsp.op_errstr);  // malloced by xdr
2132
    GLUSTERD_STACK_DESTROY(frame);
2133
    return ret;
2134
}
2135

2136
int32_t
2137
glusterd_brick_op_cbk(struct rpc_req *req, struct iovec *iov, int count,
2138
                      void *myframe)
2139
{
2140
    return glusterd_big_locked_cbk(req, iov, count, myframe,
2141
                                   __glusterd_brick_op_cbk);
2142
}
2143

2144
int32_t
2145
glusterd_brick_op(call_frame_t *frame, xlator_t *this, void *data)
2146
{
2147
    gd1_mgmt_brick_op_req *req = NULL;
2148
    int ret = 0;
2149
    int ret1 = 0;
2150
    glusterd_conf_t *priv = NULL;
2151
    call_frame_t *dummy_frame = NULL;
2152
    char *op_errstr = NULL;
2153
    int pending_bricks = 0;
2154
    glusterd_pending_node_t *pending_node;
2155
    glusterd_req_ctx_t *req_ctx = NULL;
2156
    struct rpc_clnt *rpc = NULL;
2157
    dict_t *op_ctx = NULL;
2158
    uuid_t *txn_id = NULL;
2159

2160
    priv = this->private;
2161
    GF_ASSERT(priv);
2162

2163
    txn_id = &priv->global_txn_id;
2164

2165
    req_ctx = data;
2166
    GF_ASSERT(req_ctx);
2167
    CDS_INIT_LIST_HEAD(&opinfo.pending_bricks);
2168

2169
    ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
2170
    if (ret) {
2171
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_BRICK_SELECT_FAIL,
2172
               "Could not get transaction ID from dict, global"
2173
               "transaction ID = %s",
2174
               uuid_utoa(*txn_id));
2175
    } else {
2176
        gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
2177
    }
2178
    ret = glusterd_op_bricks_select(req_ctx->op, req_ctx->dict, &op_errstr,
2179
                                    &opinfo.pending_bricks, NULL);
2180

2181
    if (ret) {
2182
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_SELECT_FAIL,
2183
               "Failed to select bricks "
2184
               "while performing brick op during 'Volume %s'",
2185
               gd_op_list[opinfo.op]);
2186
        opinfo.op_errstr = op_errstr;
2187
        goto out;
2188
    }
2189

2190
    cds_list_for_each_entry(pending_node, &opinfo.pending_bricks, list)
2191
    {
2192
        dummy_frame = create_frame(this, this->ctx->pool);
2193
        if (!dummy_frame)
2194
            continue;
2195

2196
        if ((pending_node->type == GD_NODE_NFS) ||
2197
            (pending_node->type == GD_NODE_QUOTAD) ||
2198
            (pending_node->type == GD_NODE_SNAPD) ||
2199
            (pending_node->type == GD_NODE_SCRUB) ||
2200
            ((pending_node->type == GD_NODE_SHD) &&
2201
             (req_ctx->op == GD_OP_STATUS_VOLUME))) {
2202
            ret = glusterd_node_op_build_payload(
2203
                req_ctx->op, (gd1_mgmt_brick_op_req **)&req, req_ctx->dict);
2204
        } else {
2205
            ret = glusterd_brick_op_build_payload(
2206
                req_ctx->op, pending_node->node, (gd1_mgmt_brick_op_req **)&req,
2207
                req_ctx->dict);
2208
        }
2209
        if (ret || !req) {
2210
            gf_msg(this->name, GF_LOG_ERROR, 0,
2211
                   GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
2212
                   "Failed to "
2213
                   "build op payload during "
2214
                   "'Volume %s'",
2215
                   gd_op_list[req_ctx->op]);
2216
            goto out;
2217
        }
2218

2219
        dummy_frame->local = data;
2220
        dummy_frame->cookie = pending_node;
2221

2222
        rpc = glusterd_pending_node_get_rpc(pending_node);
2223
        if (!rpc) {
2224
            if (pending_node->type == GD_NODE_REBALANCE) {
2225
                opinfo.brick_pending_count = 0;
2226
                ret = 0;
2227
                GF_FREE(req->input.input_val);
2228
                GF_FREE(req);
2229
                req = NULL;
2230
                GLUSTERD_STACK_DESTROY(dummy_frame);
2231

2232
                op_ctx = glusterd_op_get_ctx();
2233
                if (!op_ctx)
2234
                    goto out;
2235
                glusterd_defrag_volume_node_rsp(req_ctx->dict, NULL, op_ctx);
2236

2237
                goto out;
2238
            }
2239

2240
            ret = -1;
2241
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
2242
                   "Brick Op failed "
2243
                   "due to rpc failure.");
2244
            goto out;
2245
        }
2246

2247
        ret = glusterd_submit_request(
2248
            rpc, req, dummy_frame, priv->gfs_mgmt, req->op, NULL, this,
2249
            glusterd_brick_op_cbk, (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
2250
        GF_FREE(req->input.input_val);
2251
        GF_FREE(req);
2252
        req = NULL;
2253

2254
        if (!ret)
2255
            pending_bricks++;
2256

2257
        glusterd_pending_node_put_rpc(pending_node);
2258
    }
2259

2260
    gf_msg_trace(this->name, 0,
2261
                 "Sent brick op req for operation "
2262
                 "'Volume %s' to %d bricks",
2263
                 gd_op_list[req_ctx->op], pending_bricks);
2264
    opinfo.brick_pending_count = pending_bricks;
2265

2266
out:
2267

2268
    if (ret)
2269
        opinfo.op_ret = ret;
2270

2271
    ret1 = glusterd_set_txn_opinfo(txn_id, &opinfo);
2272
    if (ret1)
2273
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
2274
               "Unable to set "
2275
               "transaction's opinfo");
2276

2277
    if (ret) {
2278
        glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, txn_id, data);
2279
        opinfo.op_ret = ret;
2280
    }
2281

2282
    gf_msg_debug(this->name, 0, "Returning %d", ret);
2283
    return ret;
2284
}
2285

2286
struct rpc_clnt_procedure gd_brick_actors[GLUSTERD_BRICK_MAXVALUE] = {
2287
    [GLUSTERD_BRICK_NULL] = {"NULL", NULL},
2288
    [GLUSTERD_BRICK_OP] = {"BRICK_OP", glusterd_brick_op},
2289
};
2290

2291
struct rpc_clnt_procedure gd_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
2292
    [GLUSTERD_FRIEND_NULL] = {"NULL", NULL},
2293
    [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", glusterd_rpc_probe},
2294
    [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", glusterd_rpc_friend_add},
2295
    [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE", glusterd_rpc_friend_remove},
2296
    [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd_rpc_friend_update},
2297
};
2298

2299
struct rpc_clnt_procedure gd_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
2300
    [GLUSTERD_MGMT_NULL] = {"NULL", NULL},
2301
    [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", glusterd_cluster_lock},
2302
    [GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
2303
                                      glusterd_cluster_unlock},
2304
    [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_stage_op},
2305
    [GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", glusterd_commit_op},
2306
};
2307

2308
struct rpc_clnt_procedure gd_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
2309
    [GLUSTERD_MGMT_V3_NULL] = {"NULL", NULL},
2310
    [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_mgmt_v3_lock_peers},
2311
    [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK",
2312
                                 glusterd_mgmt_v3_unlock_peers},
2313
};
2314

2315
struct rpc_clnt_program gd_mgmt_prog = {
2316
    .progname = "glusterd mgmt",
2317
    .prognum = GD_MGMT_PROGRAM,
2318
    .progver = GD_MGMT_VERSION,
2319
    .proctable = gd_mgmt_actors,
2320
    .numproc = GLUSTERD_MGMT_MAXVALUE,
2321
};
2322

2323
struct rpc_clnt_program gd_brick_prog = {
2324
    .progname = "brick operations",
2325
    .prognum = GD_BRICK_PROGRAM,
2326
    .progver = GD_BRICK_VERSION,
2327
    .proctable = gd_brick_actors,
2328
    .numproc = GLUSTERD_BRICK_MAXVALUE,
2329
};
2330

2331
struct rpc_clnt_program gd_peer_prog = {
2332
    .progname = "Peer mgmt",
2333
    .prognum = GD_FRIEND_PROGRAM,
2334
    .progver = GD_FRIEND_VERSION,
2335
    .proctable = gd_peer_actors,
2336
    .numproc = GLUSTERD_FRIEND_MAXVALUE,
2337
};
2338

2339
struct rpc_clnt_program gd_mgmt_v3_prog = {
2340
    .progname = "glusterd mgmt v3",
2341
    .prognum = GD_MGMT_PROGRAM,
2342
    .progver = GD_MGMT_V3_VERSION,
2343
    .proctable = gd_mgmt_v3_actors,
2344
    .numproc = GLUSTERD_MGMT_V3_MAXVALUE,
2345
};
2346

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.