glusterfs

Форк
0
/
glusterd-handler.c 
6888 строк · 207.2 Кб
1
/*
2
   Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
3
   This file is part of GlusterFS.
4

5
   This file is licensed to you under your choice of the GNU Lesser
6
   General Public License, version 3 or any later version (LGPLv3 or
7
   later), or the GNU General Public License, version 2 (GPLv2), in all
8
   cases as published by the Free Software Foundation.
9
*/
10
#include <inttypes.h>
11

12
#include <glusterfs/glusterfs.h>
13
#include <glusterfs/compat.h>
14
#include <glusterfs/dict.h>
15
#include <glusterfs/logging.h>
16
#include <glusterfs/syscall.h>
17
#include <glusterfs/timer.h>
18
#include <glusterfs/compat.h>
19
#include <glusterfs/compat-errno.h>
20
#include <glusterfs/run.h>
21
#include "glusterd-mem-types.h"
22
#include "glusterd-op-sm.h"
23
#include "glusterd-utils.h"
24
#include "glusterd-mgmt.h"
25
#include "glusterd-server-quorum.h"
26
#include "glusterd-store.h"
27
#include "glusterd-locks.h"
28
#include "glusterd-snapshot-utils.h"
29
#include "glusterd-geo-rep.h"
30

31
#include "glusterd-mountbroker.h"
32
#include "glusterd-messages.h"
33
#include "glusterd-errno.h"
34

35
#include <inttypes.h>
36

37
#include "glusterd-syncop.h"
38
#include "glusterd-messages.h"
39
#include "protocol-utils.h"
40

41
#define STATUS_STRLEN 128
42

43
enum gf_deprobe_resp {
44
    GF_DEPROBE_SUCCESS,
45
    GF_DEPROBE_LOCALHOST,
46
    GF_DEPROBE_NOT_FRIEND,
47
    GF_DEPROBE_BRICK_EXIST,
48
    GF_DEPROBE_FRIEND_DOWN,
49
    GF_DEPROBE_QUORUM_NOT_MET,
50
    GF_DEPROBE_FRIEND_DETACHING,
51
    GF_DEPROBE_SNAP_BRICK_EXIST,
52
};
53

54
static int volcount;
55

56
static int
57
glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
58
                         rpc_clnt_event_t event, void *data);
59

60
static int
61
glusterd_handle_tier(rpcsvc_request_t *req);
62

63
static int32_t
64
glusterd_get_volumes(rpcsvc_request_t *req, dict_t *dict, int32_t flags);
65

66
static int32_t
67
glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags);
68

69
static int
70
glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
71
                       uuid_t uuid, dict_t *dict, int *op_errno);
72

73
static int
74
glusterd_friend_add(const char *hoststr, int port,
75
                    glusterd_friend_sm_state_t state, uuid_t *uuid,
76
                    glusterd_peerinfo_t **friend, gf_boolean_t restore,
77
                    glusterd_peerctx_args_t *args);
78

79
static int
80
glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
81
                     dict_t *dict, int *op_errno);
82

83
int
84
glusterd_big_locked_notify(struct rpc_clnt *rpc, void *mydata,
85
                           rpc_clnt_event_t event, void *data,
86
                           rpc_clnt_notify_t notify_fn)
87
{
88
    glusterd_conf_t *priv = THIS->private;
89
    int ret = -1;
90

91
    synclock_lock(&priv->big_lock);
92
    ret = notify_fn(rpc, mydata, event, data);
93
    synclock_unlock(&priv->big_lock);
94

95
    return ret;
96
}
97

98
int
99
glusterd_big_locked_handler(rpcsvc_request_t *req, rpcsvc_actor actor_fn)
100
{
101
    glusterd_conf_t *priv = THIS->private;
102
    int ret = -1;
103

104
    synclock_lock(&priv->big_lock);
105
    ret = actor_fn(req);
106
    synclock_unlock(&priv->big_lock);
107

108
    return ret;
109
}
110

111
static char *specific_key_suffix[] = {".quota-cksum", ".ckusm", ".version",
112
                                      ".quota-version", ".name"};
113

114
static int
115
glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
116
                           int port, gd1_mgmt_friend_req *friend_req)
117
{
118
    int ret = -1;
119
    glusterd_peerinfo_t *peerinfo = NULL;
120
    glusterd_friend_sm_event_t *event = NULL;
121
    glusterd_friend_req_ctx_t *ctx = NULL;
122
    char rhost[UNIX_PATH_MAX + 1] = {0};
123
    dict_t *dict = NULL;
124
    dict_t *peer_ver = NULL;
125
    int totcount = sizeof(specific_key_suffix) / sizeof(specific_key_suffix[0]);
126

127
    if (!port)
128
        port = GF_DEFAULT_BASE_PORT;
129

130
    ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
131

132
    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
133
    dict = dict_new();
134
    peer_ver = dict_new();
135

136
    RCU_READ_LOCK;
137

138
    if (!ctx || !dict || !peer_ver) {
139
        gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
140
               "Unable to allocate memory");
141
        ret = -1;
142
        goto out;
143
    }
144

145
    peerinfo = glusterd_peerinfo_find(uuid, rhost);
146

147
    if (peerinfo == NULL) {
148
        gf_event(EVENT_PEER_REJECT, "peer=%s", hostname);
149
        ret = glusterd_xfer_friend_add_resp(req, hostname, rhost, port, -1,
150
                                            GF_PROBE_UNKNOWN_PEER);
151
        if (friend_req->vols.vols_val) {
152
            free(friend_req->vols.vols_val);
153
            friend_req->vols.vols_val = NULL;
154
        }
155
        goto out;
156
    }
157

158
    ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_RCVD_FRIEND_REQ, &event);
159

160
    if (ret) {
161
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
162
               "event generation failed: %d", ret);
163
        goto out;
164
    }
165

166
    event->peername = gf_strdup(peerinfo->hostname);
167
    gf_uuid_copy(event->peerid, peerinfo->uuid);
168

169
    gf_uuid_copy(ctx->uuid, uuid);
170
    if (hostname)
171
        ctx->hostname = gf_strdup(hostname);
172
    ctx->req = req;
173

174
    ret = dict_unserialize_specific_keys(
175
        friend_req->vols.vols_val, friend_req->vols.vols_len, &dict,
176
        specific_key_suffix, &peer_ver, totcount);
177

178
    if (ret) {
179
        gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
180
                NULL);
181
        goto out;
182
    } else
183
        dict->extra_stdfree = friend_req->vols.vols_val;
184

185
    ctx->vols = dict;
186
    ctx->peer_ver = peer_ver;
187
    event->ctx = ctx;
188

189
    ret = glusterd_friend_sm_inject_event(event);
190
    if (ret) {
191
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
192
               "Unable to inject event %d, "
193
               "ret = %d",
194
               event->event, ret);
195
        goto out;
196
    }
197

198
    ret = 0;
199
    if (peerinfo && (0 == peerinfo->connected))
200
        ret = GLUSTERD_CONNECTION_AWAITED;
201

202
out:
203
    RCU_READ_UNLOCK;
204

205
    if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
206
        if (ctx && ctx->hostname)
207
            GF_FREE(ctx->hostname);
208
        GF_FREE(ctx);
209
        if (dict) {
210
            if ((!dict->extra_stdfree) && friend_req->vols.vols_val)
211
                free(friend_req->vols.vols_val);
212
            dict_unref(dict);
213
        } else {
214
            free(friend_req->vols.vols_val);
215
        }
216
        if (peer_ver)
217
            dict_unref(peer_ver);
218
        if (event)
219
            GF_FREE(event->peername);
220
        GF_FREE(event);
221
    }
222

223
    return ret;
224
}
225

226
static int
227
glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
228
                             int port)
229
{
230
    int ret = -1;
231
    glusterd_peerinfo_t *peerinfo = NULL;
232
    glusterd_friend_sm_event_t *event = NULL;
233
    glusterd_friend_req_ctx_t *ctx = NULL;
234

235
    if (!port)
236
        port = GF_DEFAULT_BASE_PORT;
237

238
    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
239

240
    RCU_READ_LOCK;
241

242
    peerinfo = glusterd_peerinfo_find(uuid, hostname);
243

244
    if (peerinfo == NULL) {
245
        RCU_READ_UNLOCK;
246
        gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
247
               "Received remove-friend from unknown peer %s", hostname);
248
        ret = glusterd_xfer_friend_remove_resp(req, hostname, port);
249
        goto out;
250
    }
251

252
    ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_RCVD_REMOVE_FRIEND,
253
                                       &event);
254

255
    if (ret) {
256
        RCU_READ_UNLOCK;
257
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
258
               "event generation failed: %d", ret);
259
        goto out;
260
    }
261

262
    if (hostname)
263
        event->peername = gf_strdup(hostname);
264

265
    gf_uuid_copy(event->peerid, uuid);
266

267
    if (!ctx) {
268
        RCU_READ_UNLOCK;
269
        ret = -1;
270
        gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
271
               "Unable to allocate memory");
272
        goto out;
273
    }
274

275
    gf_uuid_copy(ctx->uuid, uuid);
276
    if (hostname)
277
        ctx->hostname = gf_strdup(hostname);
278
    ctx->req = req;
279

280
    event->ctx = ctx;
281

282
    ret = glusterd_friend_sm_inject_event(event);
283

284
    if (ret) {
285
        RCU_READ_UNLOCK;
286
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
287
               "Unable to inject event %d, "
288
               "ret = %d",
289
               event->event, ret);
290
        goto out;
291
    }
292

293
    RCU_READ_UNLOCK;
294

295
    return 0;
296

297
out:
298

299
    if (0 != ret) {
300
        if (ctx && ctx->hostname)
301
            GF_FREE(ctx->hostname);
302
        GF_FREE(ctx);
303
        if (event)
304
            GF_FREE(event->peername);
305
        GF_FREE(event);
306
    }
307

308
    return ret;
309
}
310

311
struct args_pack {
312
    dict_t *dict;
313
    int vol_count;
314
    int opt_count;
315
};
316

317
static int
318
_build_option_key(dict_t *d, char *k, data_t *v, void *tmp)
319
{
320
    char reconfig_key[256] = {
321
        0,
322
    };
323
    int keylen;
324
    struct args_pack *pack = NULL;
325
    int ret = -1;
326
    glusterd_conf_t *priv = NULL;
327

328
    priv = THIS->private;
329
    GF_ASSERT(priv);
330

331
    pack = tmp;
332
    if (strcmp(k, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
333
        return 0;
334

335
    if (priv->op_version > GD_OP_VERSION_MIN) {
336
        if ((strcmp(k, "features.limit-usage") == 0) ||
337
            (strcmp(k, "features.soft-limit") == 0))
338
            return 0;
339
    }
340

341
    /* snap-max-hard-limit and snap-max-soft-limit are system   *
342
     * options set and managed by snapshot config option. Hence *
343
     * they should not be displayed in gluster volume info.     *
344
     */
345
    if ((strcmp(k, "snap-max-hard-limit") == 0) ||
346
        (strcmp(k, "snap-max-soft-limit") == 0))
347
        return 0;
348

349
    keylen = snprintf(reconfig_key, sizeof(reconfig_key), "volume%d.option.%s",
350
                      pack->vol_count, k);
351
    ret = dict_set_strn(pack->dict, reconfig_key, keylen, v->data);
352
    if (0 == ret)
353
        pack->opt_count++;
354

355
    return 0;
356
}
357

358
int
359
glusterd_add_arbiter_info_to_bricks(glusterd_volinfo_t *volinfo,
360
                                    dict_t *volumes, int count)
361
{
362
    char key[64] = {
363
        0,
364
    };
365
    int keylen;
366
    int i = 0;
367
    int ret = 0;
368

369
    if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
370
        return 0;
371
    for (i = 1; i <= volinfo->brick_count; i++) {
372
        if (i % volinfo->replica_count != 0)
373
            continue;
374
        keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", count,
375
                          i);
376
        ret = dict_set_int32n(volumes, key, keylen, 1);
377
        if (ret)
378
            return ret;
379
    }
380
    return 0;
381
}
382

383
static int
384
glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
385
                                   int count)
386
{
387
    int ret = -1;
388
    char key[64] = {
389
        0,
390
    };
391
    int keylen;
392
    glusterd_brickinfo_t *brickinfo = NULL;
393
    glusterd_brickinfo_t *ta_brickinfo = NULL;
394
    char *buf = NULL;
395
    int i = 1;
396
    dict_t *dict = NULL;
397
    glusterd_conf_t *priv = NULL;
398
    char *volume_id_str = NULL;
399
    struct args_pack pack = {
400
        0,
401
    };
402
    xlator_t *this = THIS;
403
    int32_t len = 0;
404

405
    char ta_brick[4096] = {
406
        0,
407
    };
408

409
    GF_ASSERT(volinfo);
410
    GF_ASSERT(volumes);
411

412
    priv = this->private;
413

414
    GF_ASSERT(priv);
415

416
    keylen = snprintf(key, sizeof(key), "volume%d.name", count);
417
    ret = dict_set_strn(volumes, key, keylen, volinfo->volname);
418
    if (ret) {
419
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
420
                "Key=%s", key, NULL);
421
        goto out;
422
    }
423

424
    keylen = snprintf(key, sizeof(key), "volume%d.type", count);
425
    ret = dict_set_int32n(volumes, key, keylen, volinfo->type);
426
    if (ret) {
427
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
428
                "Key=%s", key, NULL);
429
        goto out;
430
    }
431

432
    keylen = snprintf(key, sizeof(key), "volume%d.status", count);
433
    ret = dict_set_int32n(volumes, key, keylen, volinfo->status);
434
    if (ret) {
435
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
436
                "Key=%s", key, NULL);
437
        goto out;
438
    }
439

440
    keylen = snprintf(key, sizeof(key), "volume%d.brick_count", count);
441
    ret = dict_set_int32n(volumes, key, keylen, volinfo->brick_count);
442
    if (ret) {
443
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
444
                "Key=%s", key, NULL);
445
        goto out;
446
    }
447

448
    keylen = snprintf(key, sizeof(key), "volume%d.dist_count", count);
449
    ret = dict_set_int32n(volumes, key, keylen,
450
                          volinfo->brick_count / volinfo->dist_leaf_count);
451
    if (ret) {
452
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
453
                "Key=%s", key, NULL);
454
        goto out;
455
    }
456

457
    keylen = snprintf(key, sizeof(key), "volume%d.replica_count", count);
458
    ret = dict_set_int32n(volumes, key, keylen, volinfo->replica_count);
459
    if (ret) {
460
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
461
                "Key=%s", key, NULL);
462
        goto out;
463
    }
464

465
    keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", count);
466
    ret = dict_set_int32n(volumes, key, keylen, volinfo->disperse_count);
467
    if (ret) {
468
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
469
                "Key=%s", key, NULL);
470
        goto out;
471
    }
472

473
    keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", count);
474
    ret = dict_set_int32n(volumes, key, keylen, volinfo->redundancy_count);
475
    if (ret) {
476
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
477
                "Key=%s", key, NULL);
478
        goto out;
479
    }
480

481
    keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", count);
482
    ret = dict_set_int32n(volumes, key, keylen, volinfo->arbiter_count);
483
    if (ret) {
484
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
485
                "Key=%s", key, NULL);
486
        goto out;
487
    }
488

489
    keylen = snprintf(key, sizeof(key), "volume%d.transport", count);
490
    ret = dict_set_int32n(volumes, key, keylen, volinfo->transport_type);
491
    if (ret) {
492
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
493
                "Key=%s", key, NULL);
494
        goto out;
495
    }
496

497
    keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", count);
498
    ret = dict_set_int32n(volumes, key, keylen, volinfo->thin_arbiter_count);
499
    if (ret) {
500
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
501
                "Key=%s", key, NULL);
502
        goto out;
503
    }
504

505
    volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
506
    if (!volume_id_str) {
507
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
508
        goto out;
509
    }
510

511
    keylen = snprintf(key, sizeof(key), "volume%d.volume_id", count);
512
    ret = dict_set_dynstrn(volumes, key, keylen, volume_id_str);
513
    if (ret) {
514
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
515
                "Key=%s", key, NULL);
516
        goto out;
517
    }
518

519
    keylen = snprintf(key, sizeof(key), "volume%d.rebalance", count);
520
    ret = dict_set_int32n(volumes, key, keylen, volinfo->rebal.defrag_cmd);
521
    if (ret) {
522
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
523
                "Key=%s", key, NULL);
524
        goto out;
525
    }
526

527
    keylen = snprintf(key, sizeof(key), "volume%d.snap_count", count);
528
    ret = dict_set_int32n(volumes, key, keylen, volinfo->snap_count);
529
    if (ret) {
530
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
531
                "Key=%s", key, NULL);
532
        goto out;
533
    }
534

535
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
536
    {
537
        char brick[1024] = {
538
            0,
539
        };
540
        char brick_uuid[64] = {
541
            0,
542
        };
543
        len = snprintf(brick, sizeof(brick), "%s:%s", brickinfo->hostname,
544
                       brickinfo->path);
545
        if ((len < 0) || (len >= sizeof(brick))) {
546
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
547
            ret = -1;
548
            goto out;
549
        }
550
        buf = gf_strdup(brick);
551
        keylen = snprintf(key, sizeof(key), "volume%d.brick%d", count, i);
552
        ret = dict_set_dynstrn(volumes, key, keylen, buf);
553
        if (ret) {
554
            gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
555
                    "Key=%s", key, NULL);
556
            goto out;
557
        }
558
        keylen = snprintf(key, sizeof(key), "volume%d.brick%d.uuid", count, i);
559
        snprintf(brick_uuid, sizeof(brick_uuid), "%s",
560
                 uuid_utoa(brickinfo->uuid));
561
        buf = gf_strdup(brick_uuid);
562
        if (!buf) {
563
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
564
                    "brick_uuid=%s", brick_uuid, NULL);
565
            goto out;
566
        }
567
        ret = dict_set_dynstrn(volumes, key, keylen, buf);
568
        if (ret) {
569
            gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
570
                    "Key=%s", key, NULL);
571
            goto out;
572
        }
573

574
        i++;
575
    }
576
    if (volinfo->thin_arbiter_count == 1) {
577
        ta_brickinfo = list_first_entry(&volinfo->ta_bricks,
578
                                        glusterd_brickinfo_t, brick_list);
579
        len = snprintf(ta_brick, sizeof(ta_brick), "%s:%s",
580
                       ta_brickinfo->hostname, ta_brickinfo->path);
581
        if ((len < 0) || (len >= sizeof(ta_brick))) {
582
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
583
            ret = -1;
584
            goto out;
585
        }
586
        buf = gf_strdup(ta_brick);
587
        keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick",
588
                          count);
589
        ret = dict_set_dynstrn(volumes, key, keylen, buf);
590
        if (ret) {
591
            gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
592
                    "Key=%s", key, NULL);
593
            goto out;
594
        }
595
    }
596

597
    ret = glusterd_add_arbiter_info_to_bricks(volinfo, volumes, count);
598
    if (ret) {
599
        gf_smsg(this->name, GF_LOG_ERROR, errno,
600
                GD_MSG_ARBITER_BRICK_SET_INFO_FAIL, NULL);
601
        goto out;
602
    }
603

604
    dict = volinfo->dict;
605
    if (!dict) {
606
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
607
        ret = 0;
608
        goto out;
609
    }
610

611
    pack.dict = volumes;
612
    pack.vol_count = count;
613
    pack.opt_count = 0;
614
    dict_foreach(dict, _build_option_key, (void *)&pack);
615
    dict_foreach(priv->opts, _build_option_key, &pack);
616

617
    keylen = snprintf(key, sizeof(key), "volume%d.opt_count", pack.vol_count);
618
    ret = dict_set_int32n(volumes, key, keylen, pack.opt_count);
619
out:
620
    return ret;
621
}
622

623
int32_t
624
glusterd_op_txn_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
625
                      char *err_str, size_t err_len)
626
{
627
    int32_t ret = -1;
628
    dict_t *dict = NULL;
629
    xlator_t *this = THIS;
630
    glusterd_conf_t *priv = NULL;
631
    int32_t locked = 0;
632
    char *tmp = NULL;
633
    char *volname = NULL;
634
    uuid_t *txn_id = NULL;
635
    glusterd_op_info_t txn_op_info = {
636
        GD_OP_STATE_DEFAULT,
637
    };
638
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
639
    uint32_t op_errno = 0;
640
    time_t timeout = 0;
641

642
    GF_ASSERT(req);
643
    GF_ASSERT((op > GD_OP_NONE) && (op < GD_OP_MAX));
644
    GF_ASSERT(NULL != ctx);
645

646
    priv = this->private;
647
    GF_ASSERT(priv);
648

649
    dict = ctx;
650

651
    /* Generate a transaction-id for this operation and
652
     * save it in the dict. This transaction id distinguishes
653
     * each transaction, and helps separate opinfos in the
654
     * op state machine. */
655
    ret = glusterd_generate_txn_id(dict, &txn_id);
656
    if (ret) {
657
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_IDGEN_FAIL,
658
               "Failed to generate transaction id");
659
        goto out;
660
    }
661

662
    /* Save the MY_UUID as the originator_uuid. This originator_uuid
663
     * will be used by is_origin_glusterd() to determine if a node
664
     * is the originator node for a command. */
665
    ret = glusterd_set_originator_uuid(dict);
666
    if (ret) {
667
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUID_SET_FAIL,
668
               "Failed to set originator_uuid.");
669
        goto out;
670
    }
671

672
    /* If no volname is given as a part of the command, locks will
673
     * not be held */
674
    ret = dict_get_str(dict, "volname", &tmp);
675
    if (ret) {
676
        gf_msg(this->name, GF_LOG_INFO, -ret, GD_MSG_DICT_GET_FAILED,
677
               "No Volume name present. "
678
               "Locks not being held.");
679
        goto local_locking_done;
680
    } else {
681
        /* Use a copy of volname, as cli response will be
682
         * sent before the unlock, and the volname in the
683
         * dict, might be removed */
684
        volname = gf_strdup(tmp);
685
        if (!volname)
686
            goto out;
687
    }
688

689
    /* Cli will add timeout key to dict if the default timeout is
690
     * other than 2 minutes. Here we use this value to check whether
691
     * mgmt_v3_lock_timeout should be set to default value or we
692
     * need to change the value according to timeout value
693
     * i.e, timeout + 120 seconds. */
694
    ret = dict_get_time(dict, "timeout", &timeout);
695
    if (!ret)
696
        priv->mgmt_v3_lock_timeout = timeout + 120;
697

698
    ret = glusterd_mgmt_v3_lock(volname, MY_UUID, &op_errno, "vol");
699
    if (ret) {
700
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
701
               "Unable to acquire lock for %s", volname);
702
        snprintf(err_str, err_len,
703
                 "Another transaction is in progress for %s. "
704
                 "Please try again after some time.",
705
                 volname);
706
        /* Use a copy of volname, as cli response will be
707
         * sent before the unlock, and the volname in the
708
         * dict, might be removed */
709
        volname = gf_strdup(tmp);
710
        if (!volname)
711
            goto out;
712
    }
713

714
    locked = 1;
715
    gf_msg_debug(this->name, 0, "Acquired lock on localhost");
716

717
local_locking_done:
718
    /* If no volname is given as a part of the command, locks will
719
     * not be held, hence sending stage event. */
720
    if (volname)
721
        event_type = GD_OP_EVENT_START_LOCK;
722
    else {
723
        txn_op_info.state = GD_OP_STATE_LOCK_SENT;
724
        event_type = GD_OP_EVENT_ALL_ACC;
725
    }
726

727
    /* Save opinfo for this transaction with the transaction id. */
728
    glusterd_txn_opinfo_init(&txn_op_info, 0, (int *)&op, ctx, req);
729

730
    ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
731
    if (ret) {
732
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
733
               "Unable to set transaction's opinfo");
734
        if (ctx)
735
            dict_unref(ctx);
736
        goto out;
737
    }
738

739
    ret = glusterd_op_sm_inject_event(event_type, txn_id, ctx);
740
    if (ret) {
741
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
742
               "Failed to acquire cluster"
743
               " lock.");
744
        goto out;
745
    }
746

747
out:
748
    if (locked && ret) {
749
        ret = glusterd_mgmt_v3_unlock(volname, MY_UUID, "vol");
750
        if (ret)
751
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
752
                   "Unable to release lock for %s", volname);
753
        ret = -1;
754
    }
755

756
    if (volname)
757
        GF_FREE(volname);
758

759
    gf_msg_debug(this->name, 0, "Returning %d", ret);
760
    return ret;
761
}
762

763
int
764
__glusterd_handle_cluster_lock(rpcsvc_request_t *req)
765
{
766
    dict_t *op_ctx = NULL;
767
    int32_t ret = -1;
768
    gd1_mgmt_cluster_lock_req lock_req = {
769
        {0},
770
    };
771
    glusterd_op_lock_ctx_t *ctx = NULL;
772
    glusterd_op_sm_event_type_t op = GD_OP_EVENT_LOCK;
773
    glusterd_op_info_t txn_op_info = {
774
        GD_OP_STATE_DEFAULT,
775
    };
776
    glusterd_conf_t *priv = NULL;
777
    uuid_t *txn_id = NULL;
778
    xlator_t *this = THIS;
779

780
    priv = this->private;
781
    GF_ASSERT(priv);
782
    GF_ASSERT(req);
783

784
    txn_id = &priv->global_txn_id;
785

786
    ret = xdr_to_generic(req->msg[0], &lock_req,
787
                         (xdrproc_t)xdr_gd1_mgmt_cluster_lock_req);
788
    if (ret < 0) {
789
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
790
               "Failed to decode lock "
791
               "request received from peer");
792
        req->rpc_err = GARBAGE_ARGS;
793
        goto out;
794
    }
795

796
    gf_msg_debug(this->name, 0, "Received LOCK from uuid: %s",
797
                 uuid_utoa(lock_req.uuid));
798

799
    RCU_READ_LOCK;
800
    ret = (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL);
801
    RCU_READ_UNLOCK;
802
    if (ret) {
803
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
804
               "%s doesn't "
805
               "belong to the cluster. Ignoring request.",
806
               uuid_utoa(lock_req.uuid));
807
        ret = -1;
808
        goto out;
809
    }
810

811
    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
812

813
    if (!ctx) {
814
        // respond here
815
        return -1;
816
    }
817

818
    gf_uuid_copy(ctx->uuid, lock_req.uuid);
819
    ctx->req = req;
820
    ctx->dict = NULL;
821

822
    op_ctx = dict_new();
823
    if (!op_ctx) {
824
        ret = -1;
825
        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
826
               "Unable to set new dict");
827
        goto out;
828
    }
829

830
    glusterd_txn_opinfo_init(&txn_op_info, 0, (int *)&op, op_ctx, req);
831

832
    ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
833
    if (ret) {
834
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
835
               "Unable to set transaction's opinfo");
836
        dict_unref(txn_op_info.op_ctx);
837
        goto out;
838
    }
839

840
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_LOCK, txn_id, ctx);
841
    if (ret)
842
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
843
               "Failed to inject event GD_OP_EVENT_LOCK");
844

845
out:
846
    gf_msg_debug(this->name, 0, "Returning %d", ret);
847

848
    glusterd_friend_sm();
849
    glusterd_op_sm();
850

851
    if (ret)
852
        GF_FREE(ctx);
853

854
    return ret;
855
}
856

857
static int
858
glusterd_handle_cluster_lock(rpcsvc_request_t *req)
859
{
860
    return glusterd_big_locked_handler(req, __glusterd_handle_cluster_lock);
861
}
862

863
static int
864
glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid,
865
                        char *buf_val, size_t buf_len,
866
                        gf_gld_mem_types_t mem_type,
867
                        glusterd_req_ctx_t **req_ctx_out)
868
{
869
    int ret = -1;
870
    char str[50] = {
871
        0,
872
    };
873
    glusterd_req_ctx_t *req_ctx = NULL;
874
    dict_t *dict = NULL;
875
    xlator_t *this = THIS;
876

877
    gf_uuid_unparse(uuid, str);
878
    gf_msg_debug(this->name, 0, "Received op from uuid %s", str);
879

880
    dict = dict_new();
881
    if (!dict) {
882
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
883
        goto out;
884
    }
885

886
    req_ctx = GF_CALLOC(1, sizeof(*req_ctx), mem_type);
887
    if (!req_ctx) {
888
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
889
        goto out;
890
    }
891

892
    gf_uuid_copy(req_ctx->uuid, uuid);
893
    req_ctx->op = op;
894
    ret = dict_unserialize(buf_val, buf_len, &dict);
895
    if (ret) {
896
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
897
                NULL);
898
        goto out;
899
    }
900

901
    req_ctx->dict = dict;
902
    req_ctx->req = rpc_req;
903
    *req_ctx_out = req_ctx;
904
    ret = 0;
905
out:
906
    if (ret) {
907
        if (dict)
908
            dict_unref(dict);
909
        GF_FREE(req_ctx);
910
    }
911
    return ret;
912
}
913

914
int
915
__glusterd_handle_stage_op(rpcsvc_request_t *req)
916
{
917
    int32_t ret = -1;
918
    glusterd_req_ctx_t *req_ctx = NULL;
919
    gd1_mgmt_stage_op_req op_req = {
920
        {0},
921
    };
922
    xlator_t *this = THIS;
923
    uuid_t *txn_id = NULL;
924
    glusterd_op_info_t txn_op_info = {
925
        GD_OP_STATE_DEFAULT,
926
    };
927
    glusterd_op_sm_state_t state = GD_OP_STATE_DEFAULT;
928
    glusterd_conf_t *priv = NULL;
929

930
    priv = this->private;
931
    GF_ASSERT(priv);
932
    GF_ASSERT(req);
933

934
    txn_id = &priv->global_txn_id;
935

936
    ret = xdr_to_generic(req->msg[0], &op_req,
937
                         (xdrproc_t)xdr_gd1_mgmt_stage_op_req);
938
    if (ret < 0) {
939
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
940
               "Failed to decode stage "
941
               "request received from peer");
942
        req->rpc_err = GARBAGE_ARGS;
943
        goto out;
944
    }
945

946
    ret = glusterd_req_ctx_create(req, op_req.op, op_req.uuid,
947
                                  op_req.buf.buf_val, op_req.buf.buf_len,
948
                                  gf_gld_mt_op_stage_ctx_t, &req_ctx);
949
    if (ret) {
950
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_CTX_CREATE_FAIL,
951
               "Failed to create req_ctx");
952
        goto out;
953
    }
954

955
    ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
956
    gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
957

958
    RCU_READ_LOCK;
959
    ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
960
    RCU_READ_UNLOCK;
961
    if (ret) {
962
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
963
               "%s doesn't "
964
               "belong to the cluster. Ignoring request.",
965
               uuid_utoa(op_req.uuid));
966
        ret = -1;
967
        goto out;
968
    }
969

970
    /* In cases where there is no volname, the receivers won't have a
971
     * transaction opinfo created, as for those operations, the locking
972
     * phase where the transaction opinfos are created, won't be called.
973
     * skip_locking will be true for all such transaction and we clear
974
     * the txn_opinfo after the staging phase, except for geo-replication
975
     * operations where we need to access txn_opinfo in the later phases also.
976
     */
977
    ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
978
    if (ret) {
979
        gf_msg_debug(this->name, 0, "No transaction's opinfo set");
980

981
        state = GD_OP_STATE_LOCKED;
982
        glusterd_txn_opinfo_init(&txn_op_info, state, &op_req.op, req_ctx->dict,
983
                                 req);
984

985
        if (req_ctx->op != GD_OP_GSYNC_SET)
986
            txn_op_info.skip_locking = _gf_true;
987
        ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
988
        if (ret) {
989
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
990
                   "Unable to set transaction's opinfo");
991
            dict_unref(req_ctx->dict);
992
            goto out;
993
        }
994
    }
995

996
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_STAGE_OP, txn_id, req_ctx);
997
    if (ret)
998
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
999
               "Failed to inject event GD_OP_EVENT_STAGE_OP");
1000

1001
out:
1002
    free(op_req.buf.buf_val);  // malloced by xdr
1003
    glusterd_friend_sm();
1004
    glusterd_op_sm();
1005
    return ret;
1006
}
1007

1008
static int
1009
glusterd_handle_stage_op(rpcsvc_request_t *req)
1010
{
1011
    return glusterd_big_locked_handler(req, __glusterd_handle_stage_op);
1012
}
1013

1014
int
1015
__glusterd_handle_commit_op(rpcsvc_request_t *req)
1016
{
1017
    int32_t ret = -1;
1018
    glusterd_req_ctx_t *req_ctx = NULL;
1019
    gd1_mgmt_commit_op_req op_req = {
1020
        {0},
1021
    };
1022
    xlator_t *this = THIS;
1023
    uuid_t *txn_id = NULL;
1024
    glusterd_conf_t *priv = NULL;
1025

1026
    priv = this->private;
1027
    GF_ASSERT(priv);
1028
    GF_ASSERT(req);
1029

1030
    txn_id = &priv->global_txn_id;
1031

1032
    ret = xdr_to_generic(req->msg[0], &op_req,
1033
                         (xdrproc_t)xdr_gd1_mgmt_commit_op_req);
1034
    if (ret < 0) {
1035
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1036
               "Failed to decode commit "
1037
               "request received from peer");
1038
        req->rpc_err = GARBAGE_ARGS;
1039
        goto out;
1040
    }
1041

1042
    RCU_READ_LOCK;
1043
    ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
1044
    RCU_READ_UNLOCK;
1045
    if (ret) {
1046
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
1047
               "%s doesn't "
1048
               "belong to the cluster. Ignoring request.",
1049
               uuid_utoa(op_req.uuid));
1050
        ret = -1;
1051
        goto out;
1052
    }
1053

1054
    // the structures should always be equal
1055
    GF_ASSERT(sizeof(gd1_mgmt_commit_op_req) == sizeof(gd1_mgmt_stage_op_req));
1056
    ret = glusterd_req_ctx_create(req, op_req.op, op_req.uuid,
1057
                                  op_req.buf.buf_val, op_req.buf.buf_len,
1058
                                  gf_gld_mt_op_commit_ctx_t, &req_ctx);
1059
    if (ret)
1060
        goto out;
1061

1062
    ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
1063
    gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
1064

1065
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_COMMIT_OP, txn_id, req_ctx);
1066

1067
out:
1068
    free(op_req.buf.buf_val);  // malloced by xdr
1069
    glusterd_friend_sm();
1070
    glusterd_op_sm();
1071
    return ret;
1072
}
1073

1074
static int
1075
glusterd_handle_commit_op(rpcsvc_request_t *req)
1076
{
1077
    return glusterd_big_locked_handler(req, __glusterd_handle_commit_op);
1078
}
1079

1080
int
1081
__glusterd_handle_cli_probe(rpcsvc_request_t *req)
1082
{
1083
    int32_t ret = -1;
1084
    gf_cli_req cli_req = {
1085
        {
1086
            0,
1087
        },
1088
    };
1089
    glusterd_peerinfo_t *peerinfo = NULL;
1090
    gf_boolean_t run_fsm = _gf_true;
1091
    xlator_t *this = THIS;
1092
    char *bind_name = NULL;
1093
    dict_t *dict = NULL;
1094
    char *hostname = NULL;
1095
    int port = 0;
1096
    int op_errno = 0;
1097

1098
    GF_ASSERT(req);
1099

1100
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1101
    if (ret < 0) {
1102
        // failed to decode msg;
1103
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1104
               "xdr decoding error");
1105
        req->rpc_err = GARBAGE_ARGS;
1106
        goto out;
1107
    }
1108

1109
    if (cli_req.dict.dict_len) {
1110
        dict = dict_new();
1111

1112
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1113
                               &dict);
1114
        if (ret < 0) {
1115
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1116
                   "Failed to "
1117
                   "unserialize req-buffer to dictionary");
1118
            goto out;
1119
        }
1120
    }
1121

1122
    ret = dict_get_str(dict, "hostname", &hostname);
1123
    if (ret) {
1124
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1125
               "Failed to get hostname");
1126
        goto out;
1127
    }
1128

1129
    ret = dict_get_int32(dict, "port", &port);
1130
    if (ret) {
1131
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PORT_NOTFOUND_IN_DICT,
1132
               "Failed to get port");
1133
        goto out;
1134
    }
1135

1136
    if (glusterd_is_any_volume_in_server_quorum(this) &&
1137
        !does_gd_meet_server_quorum(this)) {
1138
        glusterd_xfer_cli_probe_resp(req, -1, GF_PROBE_QUORUM_NOT_MET, NULL,
1139
                                     hostname, port, dict);
1140
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
1141
               "Server quorum not met. Rejecting operation.");
1142
        ret = 0;
1143
        goto out;
1144
    }
1145

1146
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
1147
           "Received CLI probe req %s %d", hostname, port);
1148

1149
    if (dict_get_str(this->options, "transport.socket.bind-address",
1150
                     &bind_name) == 0) {
1151
        gf_msg_debug("glusterd", 0,
1152
                     "only checking probe address vs. bind address");
1153
        ret = gf_is_same_address(bind_name, hostname);
1154
    } else {
1155
        ret = glusterd_gf_is_local_addr(hostname);
1156
    }
1157
    if (ret) {
1158
        glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_LOCALHOST, NULL, hostname,
1159
                                     port, dict);
1160
        ret = 0;
1161
        goto out;
1162
    }
1163

1164
    RCU_READ_LOCK;
1165

1166
    peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
1167
    ret = (peerinfo && gd_peer_has_address(peerinfo, hostname));
1168

1169
    RCU_READ_UNLOCK;
1170

1171
    if (ret) {
1172
        gf_msg_debug("glusterd", 0,
1173
                     "Probe host %s port %d "
1174
                     "already a peer",
1175
                     hostname, port);
1176
        glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_FRIEND, NULL, hostname,
1177
                                     port, dict);
1178
        ret = 0;
1179
        goto out;
1180
    }
1181

1182
    ret = glusterd_probe_begin(req, hostname, port, dict, &op_errno);
1183

1184
    if (ret == GLUSTERD_CONNECTION_AWAITED) {
1185
        // fsm should be run after connection establishes
1186
        run_fsm = _gf_false;
1187
        ret = 0;
1188

1189
    } else if (ret == -1) {
1190
        glusterd_xfer_cli_probe_resp(req, -1, op_errno, NULL, hostname, port,
1191
                                     dict);
1192
        goto out;
1193
    }
1194

1195
out:
1196
    free(cli_req.dict.dict_val);
1197

1198
    if (run_fsm) {
1199
        glusterd_friend_sm();
1200
        glusterd_op_sm();
1201
    }
1202

1203
    return ret;
1204
}
1205

1206
static int
1207
glusterd_handle_cli_probe(rpcsvc_request_t *req)
1208
{
1209
    return glusterd_big_locked_handler(req, __glusterd_handle_cli_probe);
1210
}
1211

1212
/* Checks if the given peer contains bricks belonging to the given volume.
1213
 * Returns,
1214
 *   2 - if peer contains all the bricks
1215
 *   1 - if peer contains at least 1 brick
1216
 *   0 - if peer contains no bricks
1217
 */
1218
static int
1219
glusterd_friend_contains_snap_bricks(glusterd_snap_t *snapinfo,
1220
                                     uuid_t friend_uuid)
1221
{
1222
    int ret = -1;
1223
    glusterd_volinfo_t *volinfo = NULL;
1224
    glusterd_brickinfo_t *brickinfo = NULL;
1225
    int count = 0;
1226

1227
    GF_VALIDATE_OR_GOTO("glusterd", snapinfo, out);
1228

1229
    cds_list_for_each_entry(volinfo, &snapinfo->volumes, vol_list)
1230
    {
1231
        cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
1232
        {
1233
            if (!gf_uuid_compare(brickinfo->uuid, friend_uuid)) {
1234
                count++;
1235
            }
1236
        }
1237
    }
1238

1239
    if (count > 0)
1240
        ret = 1;
1241
    else
1242
        ret = 0;
1243

1244
out:
1245
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
1246
    return ret;
1247
}
1248

1249
int
1250
__glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
1251
{
1252
    int32_t ret = -1;
1253
    gf_cli_req cli_req = {
1254
        {
1255
            0,
1256
        },
1257
    };
1258
    uuid_t uuid = {0};
1259
    int op_errno = 0;
1260
    xlator_t *this = THIS;
1261
    glusterd_conf_t *priv = NULL;
1262
    dict_t *dict = NULL;
1263
    char *hostname = NULL;
1264
    int port = 0;
1265
    int flags = 0;
1266
    glusterd_volinfo_t *volinfo = NULL;
1267
    glusterd_volinfo_t *tmp = NULL;
1268
    glusterd_snap_t *snapinfo = NULL;
1269
    glusterd_snap_t *tmpsnap = NULL;
1270
    gf_boolean_t need_free = _gf_false;
1271

1272
    priv = this->private;
1273
    GF_ASSERT(priv);
1274
    GF_ASSERT(req);
1275

1276
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1277
    if (ret < 0) {
1278
        // failed to decode msg;
1279
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1280
               "Failed to decode "
1281
               "request received from cli");
1282
        req->rpc_err = GARBAGE_ARGS;
1283
        goto out;
1284
    }
1285

1286
    if (cli_req.dict.dict_len) {
1287
        dict = dict_new();
1288

1289
        if (dict) {
1290
            need_free = _gf_true;
1291
        } else {
1292
            ret = -1;
1293
            goto out;
1294
        }
1295

1296
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1297
                               &dict);
1298
        if (ret < 0) {
1299
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1300
                   "Failed to "
1301
                   "unserialize req-buffer to dictionary");
1302
            goto out;
1303
        }
1304
    }
1305

1306
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
1307
           "Received CLI deprobe req");
1308

1309
    ret = dict_get_str(dict, "hostname", &hostname);
1310
    if (ret) {
1311
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
1312
               "Failed to get hostname");
1313
        goto out;
1314
    }
1315

1316
    ret = dict_get_int32(dict, "port", &port);
1317
    if (ret) {
1318
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PORT_NOTFOUND_IN_DICT,
1319
               "Failed to get port");
1320
        goto out;
1321
    }
1322
    ret = dict_get_int32(dict, "flags", &flags);
1323
    if (ret) {
1324
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
1325
               "Failed to get flags");
1326
        goto out;
1327
    }
1328

1329
    ret = glusterd_hostname_to_uuid(hostname, uuid);
1330
    if (ret) {
1331
        op_errno = GF_DEPROBE_NOT_FRIEND;
1332
        goto out;
1333
    }
1334

1335
    if (!gf_uuid_compare(uuid, MY_UUID)) {
1336
        op_errno = GF_DEPROBE_LOCALHOST;
1337
        ret = -1;
1338
        goto out;
1339
    }
1340

1341
    if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1342
        /* Check if peers are connected, except peer being
1343
         * detached*/
1344
        if (!glusterd_chk_peers_connected_befriended(uuid)) {
1345
            ret = -1;
1346
            op_errno = GF_DEPROBE_FRIEND_DOWN;
1347
            goto out;
1348
        }
1349
    }
1350

1351
    /* Check for if volumes exist with some bricks on the peer being
1352
     * detached. It's not a problem if a volume contains none or all
1353
     * of its bricks on the peer being detached
1354
     */
1355
    cds_list_for_each_entry_safe(volinfo, tmp, &priv->volumes, vol_list)
1356
    {
1357
        ret = glusterd_friend_contains_vol_bricks(volinfo, uuid);
1358
        if (ret == 1) {
1359
            op_errno = GF_DEPROBE_BRICK_EXIST;
1360
            goto out;
1361
        }
1362
    }
1363

1364
    cds_list_for_each_entry_safe(snapinfo, tmpsnap, &priv->snapshots, snap_list)
1365
    {
1366
        ret = glusterd_friend_contains_snap_bricks(snapinfo, uuid);
1367
        if (ret == 1) {
1368
            op_errno = GF_DEPROBE_SNAP_BRICK_EXIST;
1369
            goto out;
1370
        }
1371
    }
1372
    if (!(flags & GF_CLI_FLAG_OP_FORCE)) {
1373
        if (glusterd_is_any_volume_in_server_quorum(this) &&
1374
            !does_gd_meet_server_quorum(this)) {
1375
            gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
1376
                   "Server quorum not met. Rejecting operation.");
1377
            ret = -1;
1378
            op_errno = GF_DEPROBE_QUORUM_NOT_MET;
1379
            goto out;
1380
        }
1381
    }
1382

1383
    if (!gf_uuid_is_null(uuid)) {
1384
        ret = glusterd_deprobe_begin(req, hostname, port, uuid, dict,
1385
                                     &op_errno);
1386
    } else {
1387
        ret = glusterd_deprobe_begin(req, hostname, port, NULL, dict,
1388
                                     &op_errno);
1389
    }
1390

1391
    need_free = _gf_false;
1392

1393
out:
1394
    free(cli_req.dict.dict_val);
1395

1396
    if (ret) {
1397
        ret = glusterd_xfer_cli_deprobe_resp(req, ret, op_errno, NULL, hostname,
1398
                                             dict);
1399
        if (need_free) {
1400
            dict_unref(dict);
1401
        }
1402
    }
1403

1404
    glusterd_friend_sm();
1405
    glusterd_op_sm();
1406

1407
    return ret;
1408
}
1409

1410
static int
1411
glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
1412
{
1413
    return glusterd_big_locked_handler(req, __glusterd_handle_cli_deprobe);
1414
}
1415

1416
int
1417
__glusterd_handle_cli_list_friends(rpcsvc_request_t *req)
1418
{
1419
    int32_t ret = -1;
1420
    gf1_cli_peer_list_req cli_req = {
1421
        0,
1422
    };
1423
    dict_t *dict = NULL;
1424

1425
    GF_ASSERT(req);
1426

1427
    ret = xdr_to_generic(req->msg[0], &cli_req,
1428
                         (xdrproc_t)xdr_gf1_cli_peer_list_req);
1429
    if (ret < 0) {
1430
        // failed to decode msg;
1431
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1432
               "Failed to decode "
1433
               "request received from cli");
1434
        req->rpc_err = GARBAGE_ARGS;
1435
        goto out;
1436
    }
1437

1438
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_CLI_REQ_RECVD,
1439
           "Received cli list req");
1440

1441
    if (cli_req.dict.dict_len) {
1442
        /* Unserialize the dictionary */
1443
        dict = dict_new();
1444

1445
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1446
                               &dict);
1447
        if (ret < 0) {
1448
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1449
                   "failed to "
1450
                   "unserialize req-buffer to dictionary");
1451
            goto out;
1452
        } else {
1453
            dict->extra_stdfree = cli_req.dict.dict_val;
1454
        }
1455
    }
1456

1457
    ret = glusterd_list_friends(req, dict, cli_req.flags);
1458

1459
out:
1460
    if (dict)
1461
        dict_unref(dict);
1462

1463
    glusterd_friend_sm();
1464
    glusterd_op_sm();
1465

1466
    return ret;
1467
}
1468

1469
static int
1470
glusterd_handle_cli_list_friends(rpcsvc_request_t *req)
1471
{
1472
    return glusterd_big_locked_handler(req, __glusterd_handle_cli_list_friends);
1473
}
1474

1475
static int
1476
__glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
1477
{
1478
    int32_t ret = -1;
1479
    gf_cli_req cli_req = {{
1480
        0,
1481
    }};
1482
    int32_t flags = 0;
1483
    dict_t *dict = NULL;
1484
    xlator_t *this = THIS;
1485

1486
    GF_ASSERT(req);
1487

1488
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1489
    if (ret < 0) {
1490
        // failed to decode msg;
1491
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1492
               "Failed to decode "
1493
               "request received from cli");
1494
        req->rpc_err = GARBAGE_ARGS;
1495
        goto out;
1496
    }
1497

1498
    gf_msg(this->name, GF_LOG_DEBUG, 0, GD_MSG_GET_VOL_REQ_RCVD,
1499
           "Received get vol req");
1500

1501
    if (cli_req.dict.dict_len) {
1502
        /* Unserialize the dictionary */
1503
        dict = dict_new();
1504

1505
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1506
                               &dict);
1507
        if (ret < 0) {
1508
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1509
                   "failed to "
1510
                   "unserialize req-buffer to dictionary");
1511
            goto out;
1512
        } else {
1513
            dict->extra_stdfree = cli_req.dict.dict_val;
1514
        }
1515
    }
1516

1517
    ret = dict_get_int32(dict, "flags", &flags);
1518
    if (ret) {
1519
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
1520
               "failed to get flags");
1521
        goto out;
1522
    }
1523
    ret = glusterd_get_volumes(req, dict, flags);
1524

1525
out:
1526
    if (dict)
1527
        dict_unref(dict);
1528

1529
    glusterd_friend_sm();
1530
    glusterd_op_sm();
1531

1532
    return ret;
1533
}
1534

1535
static int
1536
glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
1537
{
1538
    return glusterd_big_locked_handler(req, __glusterd_handle_cli_get_volume);
1539
}
1540

1541
int
1542
__glusterd_handle_cli_uuid_reset(rpcsvc_request_t *req)
1543
{
1544
    int ret = -1;
1545
    dict_t *dict = NULL;
1546
    xlator_t *this = THIS;
1547
    glusterd_conf_t *priv = NULL;
1548
    uuid_t uuid = {0};
1549
    gf_cli_rsp rsp = {
1550
        0,
1551
    };
1552
    gf_cli_req cli_req = {{
1553
        0,
1554
    }};
1555
    char msg_str[128] = {
1556
        0,
1557
    };
1558

1559
    GF_ASSERT(req);
1560

1561
    priv = this->private;
1562
    GF_ASSERT(priv);
1563

1564
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1565
    if (ret < 0) {
1566
        // failed to decode msg;
1567
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1568
               "Failed to decode "
1569
               "request received from cli");
1570
        req->rpc_err = GARBAGE_ARGS;
1571
        goto out;
1572
    }
1573

1574
    gf_msg_debug("glusterd", 0, "Received uuid reset req");
1575

1576
    if (cli_req.dict.dict_len) {
1577
        /* Unserialize the dictionary */
1578
        dict = dict_new();
1579

1580
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1581
                               &dict);
1582
        if (ret < 0) {
1583
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1584
                   "failed to "
1585
                   "unserialize req-buffer to dictionary");
1586
            snprintf(msg_str, sizeof(msg_str),
1587
                     "Unable to decode "
1588
                     "the buffer");
1589
            goto out;
1590
        } else {
1591
            dict->extra_stdfree = cli_req.dict.dict_val;
1592
        }
1593
    }
1594

1595
    /* In the above section if dict_unserialize is successful, ret is set
1596
     * to zero.
1597
     */
1598
    ret = -1;
1599
    // Do not allow peer reset if there are any volumes in the cluster
1600
    if (!cds_list_empty(&priv->volumes)) {
1601
        snprintf(msg_str, sizeof(msg_str),
1602
                 "volumes are already "
1603
                 "present in the cluster. Resetting uuid is not "
1604
                 "allowed");
1605
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLS_ALREADY_PRESENT, "%s",
1606
               msg_str);
1607
        goto out;
1608
    }
1609

1610
    // Do not allow peer reset if trusted storage pool is already formed
1611
    if (!cds_list_empty(&priv->peers)) {
1612
        snprintf(msg_str, sizeof(msg_str),
1613
                 "trusted storage pool "
1614
                 "has been already formed. Please detach this peer "
1615
                 "from the pool and reset its uuid.");
1616
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_TSP_ALREADY_FORMED, "%s",
1617
               msg_str);
1618
        goto out;
1619
    }
1620

1621
    gf_uuid_copy(uuid, priv->uuid);
1622
    ret = glusterd_uuid_generate_save();
1623

1624
    if (!gf_uuid_compare(uuid, MY_UUID)) {
1625
        snprintf(msg_str, sizeof(msg_str),
1626
                 "old uuid and the new uuid"
1627
                 " are same. Try gluster peer reset again");
1628
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UUIDS_SAME_RETRY, "%s",
1629
               msg_str);
1630
        ret = -1;
1631
        goto out;
1632
    }
1633

1634
out:
1635
    if (ret) {
1636
        rsp.op_ret = -1;
1637
        if (msg_str[0] == '\0')
1638
            snprintf(msg_str, sizeof(msg_str),
1639
                     "Operation "
1640
                     "failed");
1641
        rsp.op_errstr = msg_str;
1642
        ret = 0;
1643
    } else {
1644
        rsp.op_errstr = "";
1645
    }
1646

1647
    glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
1648

1649
    return ret;
1650
}
1651

1652
int
1653
glusterd_handle_cli_uuid_reset(rpcsvc_request_t *req)
1654
{
1655
    return glusterd_big_locked_handler(req, __glusterd_handle_cli_uuid_reset);
1656
}
1657

1658
int
1659
__glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
1660
{
1661
    int ret = -1;
1662
    dict_t *dict = NULL;
1663
    dict_t *rsp_dict = NULL;
1664
    xlator_t *this = THIS;
1665
    gf_cli_rsp rsp = {
1666
        0,
1667
    };
1668
    gf_cli_req cli_req = {{
1669
        0,
1670
    }};
1671
    char err_str[64] = {
1672
        0,
1673
    };
1674
    char uuid_str[64] = {
1675
        0,
1676
    };
1677

1678
    GF_ASSERT(req);
1679

1680
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1681
    if (ret < 0) {
1682
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
1683
               "Failed to decode "
1684
               "request received from cli");
1685
        req->rpc_err = GARBAGE_ARGS;
1686
        goto out;
1687
    }
1688

1689
    gf_msg_debug("glusterd", 0, "Received uuid get req");
1690

1691
    if (cli_req.dict.dict_len) {
1692
        dict = dict_new();
1693
        if (!dict) {
1694
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
1695
                    NULL);
1696
            ret = -1;
1697
            goto out;
1698
        }
1699

1700
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1701
                               &dict);
1702
        if (ret < 0) {
1703
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1704
                   "failed to "
1705
                   "unserialize req-buffer to dictionary");
1706
            snprintf(err_str, sizeof(err_str),
1707
                     "Unable to decode "
1708
                     "the buffer");
1709
            goto out;
1710

1711
        } else {
1712
            dict->extra_stdfree = cli_req.dict.dict_val;
1713
        }
1714
    }
1715

1716
    rsp_dict = dict_new();
1717
    if (!rsp_dict) {
1718
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
1719
        ret = -1;
1720
        goto out;
1721
    }
1722

1723
    uuid_utoa_r(MY_UUID, uuid_str);
1724
    ret = dict_set_str_sizen(rsp_dict, "uuid", uuid_str);
1725
    if (ret) {
1726
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1727
               "Failed to set uuid in "
1728
               "dictionary.");
1729
        goto out;
1730
    }
1731

1732
    ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
1733
                                      &rsp.dict.dict_len);
1734
    if (ret) {
1735
        gf_smsg(this->name, GF_LOG_ERROR, errno,
1736
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
1737
        goto out;
1738
    }
1739
    ret = 0;
1740
out:
1741
    if (ret) {
1742
        rsp.op_ret = -1;
1743
        if (err_str[0] == '\0')
1744
            snprintf(err_str, sizeof(err_str),
1745
                     "Operation "
1746
                     "failed");
1747
        rsp.op_errstr = err_str;
1748

1749
    } else {
1750
        rsp.op_errstr = "";
1751
    }
1752

1753
    glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
1754

1755
    if (rsp_dict)
1756
        dict_unref(rsp_dict);
1757
    GF_FREE(rsp.dict.dict_val);
1758

1759
    return 0;
1760
}
1761
int
1762
glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
1763
{
1764
    return glusterd_big_locked_handler(req, __glusterd_handle_cli_uuid_get);
1765
}
1766

1767
int
1768
__glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
1769
{
1770
    int ret = -1;
1771
    dict_t *dict = NULL;
1772
    glusterd_conf_t *priv = NULL;
1773
    glusterd_volinfo_t *volinfo = NULL;
1774
    int count = 0;
1775
    char key[64] = {
1776
        0,
1777
    };
1778
    int keylen;
1779
    gf_cli_rsp rsp = {
1780
        0,
1781
    };
1782

1783
    GF_ASSERT(req);
1784

1785
    priv = THIS->private;
1786
    GF_ASSERT(priv);
1787

1788
    dict = dict_new();
1789
    if (!dict) {
1790
        gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
1791
        goto out;
1792
    }
1793

1794
    cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
1795
    {
1796
        keylen = snprintf(key, sizeof(key), "volume%d", count);
1797
        ret = dict_set_strn(dict, key, keylen, volinfo->volname);
1798
        if (ret)
1799
            goto out;
1800
        count++;
1801
    }
1802

1803
    ret = dict_set_int32_sizen(dict, "count", count);
1804
    if (ret) {
1805
        gf_smsg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
1806
                "Key=count", NULL);
1807
        goto out;
1808
    }
1809

1810
    ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
1811
                                      &rsp.dict.dict_len);
1812
    if (ret)
1813
        goto out;
1814

1815
    ret = 0;
1816

1817
out:
1818
    rsp.op_ret = ret;
1819
    if (ret)
1820
        rsp.op_errstr = "Error listing volumes";
1821
    else
1822
        rsp.op_errstr = "";
1823

1824
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
1825
    ret = 0;
1826

1827
    if (dict)
1828
        dict_unref(dict);
1829

1830
    GF_FREE(rsp.dict.dict_val);
1831

1832
    glusterd_friend_sm();
1833
    glusterd_op_sm();
1834

1835
    return ret;
1836
}
1837

1838
static int
1839
glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
1840
{
1841
    return glusterd_big_locked_handler(req, __glusterd_handle_cli_list_volume);
1842
}
1843

1844
int32_t
1845
glusterd_op_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
1846
                  char *err_str, size_t err_len)
1847
{
1848
    int ret = -1;
1849

1850
    ret = glusterd_op_txn_begin(req, op, ctx, err_str, err_len);
1851

1852
    return ret;
1853
}
1854

1855
int
1856
__glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
1857
{
1858
    int32_t ret = -1;
1859
    gf_cli_req cli_req = {{
1860
        0,
1861
    }};
1862
    dict_t *dict = NULL;
1863
    glusterd_op_t cli_op = GD_OP_GANESHA;
1864
    char *op_errstr = NULL;
1865
    char err_str[2048] = {
1866
        0,
1867
    };
1868
    xlator_t *this = THIS;
1869

1870
    GF_ASSERT(req);
1871

1872
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1873
    if (ret < 0) {
1874
        snprintf(err_str, sizeof(err_str),
1875
                 "Failed to decode "
1876
                 "request received from cli");
1877
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
1878
               err_str);
1879
        req->rpc_err = GARBAGE_ARGS;
1880
        goto out;
1881
    }
1882

1883
    if (cli_req.dict.dict_len) {
1884
        /* Unserialize the dictionary */
1885
        dict = dict_new();
1886
        if (!dict) {
1887
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
1888
                    NULL);
1889
            ret = -1;
1890
            goto out;
1891
        }
1892

1893
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1894
                               &dict);
1895
        if (ret < 0) {
1896
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1897
                   "failed to "
1898
                   "unserialize req-buffer to dictionary");
1899
            snprintf(err_str, sizeof(err_str),
1900
                     "Unable to decode "
1901
                     "the command");
1902
            goto out;
1903
        } else {
1904
            dict->extra_stdfree = cli_req.dict.dict_val;
1905
        }
1906
    }
1907

1908
    gf_msg_trace(this->name, 0, "Received global option request");
1909

1910
    ret = glusterd_op_begin_synctask(req, GD_OP_GANESHA, dict);
1911
out:
1912
    if (ret) {
1913
        if (err_str[0] == '\0')
1914
            snprintf(err_str, sizeof(err_str), "Operation failed");
1915
        ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
1916
    }
1917
    if (op_errstr)
1918
        GF_FREE(op_errstr);
1919
    if (dict)
1920
        dict_unref(dict);
1921

1922
    return ret;
1923
}
1924

1925
int
1926
glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
1927
{
1928
    return glusterd_big_locked_handler(req, __glusterd_handle_ganesha_cmd);
1929
}
1930

1931
static int
1932
__glusterd_handle_reset_volume(rpcsvc_request_t *req)
1933
{
1934
    int32_t ret = -1;
1935
    gf_cli_req cli_req = {{
1936
        0,
1937
    }};
1938
    dict_t *dict = NULL;
1939
    glusterd_op_t cli_op = GD_OP_RESET_VOLUME;
1940
    char *volname = NULL;
1941
    char err_str[64] = {
1942
        0,
1943
    };
1944
    xlator_t *this = THIS;
1945

1946
    GF_ASSERT(req);
1947

1948
    gf_msg(this->name, GF_LOG_INFO, 0, 0, "Received reset vol req");
1949

1950
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
1951
    if (ret < 0) {
1952
        snprintf(err_str, sizeof(err_str),
1953
                 "Failed to decode request "
1954
                 "received from cli");
1955
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
1956
               err_str);
1957
        req->rpc_err = GARBAGE_ARGS;
1958
        goto out;
1959
    }
1960

1961
    if (cli_req.dict.dict_len) {
1962
        /* Unserialize the dictionary */
1963
        dict = dict_new();
1964

1965
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
1966
                               &dict);
1967
        if (ret < 0) {
1968
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
1969
                   "failed to "
1970
                   "unserialize req-buffer to dictionary");
1971
            snprintf(err_str, sizeof(err_str),
1972
                     "Unable to decode "
1973
                     "the command");
1974
            goto out;
1975
        } else {
1976
            dict->extra_stdfree = cli_req.dict.dict_val;
1977
        }
1978
    }
1979

1980
    ret = dict_get_str(dict, "volname", &volname);
1981
    if (ret) {
1982
        snprintf(err_str, sizeof(err_str),
1983
                 "Failed to get volume "
1984
                 "name");
1985
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
1986
               "%s", err_str);
1987
        goto out;
1988
    }
1989
    gf_msg_debug(this->name, 0,
1990
                 "Received volume reset request for "
1991
                 "volume %s",
1992
                 volname);
1993

1994
    ret = glusterd_op_begin_synctask(req, GD_OP_RESET_VOLUME, dict);
1995

1996
out:
1997
    if (ret) {
1998
        if (err_str[0] == '\0')
1999
            snprintf(err_str, sizeof(err_str), "Operation failed");
2000
        ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
2001
    }
2002

2003
    return ret;
2004
}
2005

2006
static int
2007
glusterd_handle_reset_volume(rpcsvc_request_t *req)
2008
{
2009
    return glusterd_big_locked_handler(req, __glusterd_handle_reset_volume);
2010
}
2011

2012
int
2013
__glusterd_handle_set_volume(rpcsvc_request_t *req)
2014
{
2015
    int32_t ret = -1;
2016
    gf_cli_req cli_req = {{
2017
        0,
2018
    }};
2019
    dict_t *dict = NULL;
2020
    glusterd_op_t cli_op = GD_OP_SET_VOLUME;
2021
    char *key = NULL;
2022
    char *value = NULL;
2023
    char *volname = NULL;
2024
    char *op_errstr = NULL;
2025
    gf_boolean_t help = _gf_false;
2026
    char err_str[2048] = {
2027
        0,
2028
    };
2029
    xlator_t *this = THIS;
2030

2031
    GF_ASSERT(req);
2032

2033
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2034
    if (ret < 0) {
2035
        snprintf(err_str, sizeof(err_str),
2036
                 "Failed to decode "
2037
                 "request received from cli");
2038
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
2039
               err_str);
2040
        req->rpc_err = GARBAGE_ARGS;
2041
        goto out;
2042
    }
2043

2044
    if (cli_req.dict.dict_len) {
2045
        /* Unserialize the dictionary */
2046
        dict = dict_new();
2047

2048
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
2049
                               &dict);
2050
        if (ret < 0) {
2051
            gf_msg(this->name, GF_LOG_ERROR, errno,
2052
                   GD_MSG_DICT_UNSERIALIZE_FAIL,
2053
                   "failed to "
2054
                   "unserialize req-buffer to dictionary");
2055
            snprintf(err_str, sizeof(err_str),
2056
                     "Unable to decode "
2057
                     "the command");
2058
            goto out;
2059
        } else {
2060
            dict->extra_stdfree = cli_req.dict.dict_val;
2061
        }
2062
    }
2063

2064
    ret = dict_get_str(dict, "volname", &volname);
2065
    if (ret) {
2066
        snprintf(err_str, sizeof(err_str),
2067
                 "Failed to get volume "
2068
                 "name while handling volume set command");
2069
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
2070
               err_str);
2071
        goto out;
2072
    }
2073

2074
    if (strcmp(volname, "help") == 0 || strcmp(volname, "help-xml") == 0) {
2075
        ret = glusterd_volset_help(dict, &op_errstr);
2076
        help = _gf_true;
2077
        goto out;
2078
    }
2079

2080
    ret = dict_get_str(dict, "key1", &key);
2081
    if (ret) {
2082
        snprintf(err_str, sizeof(err_str),
2083
                 "Failed to get key while"
2084
                 " handling volume set for %s",
2085
                 volname);
2086
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
2087
               err_str);
2088
        goto out;
2089
    }
2090

2091
    ret = dict_get_str(dict, "value1", &value);
2092
    if (ret) {
2093
        snprintf(err_str, sizeof(err_str),
2094
                 "Failed to get value while"
2095
                 " handling volume set for %s",
2096
                 volname);
2097
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
2098
               err_str);
2099
        goto out;
2100
    }
2101
    gf_msg_debug(this->name, 0,
2102
                 "Received volume set request for "
2103
                 "volume %s",
2104
                 volname);
2105

2106
    ret = glusterd_op_begin_synctask(req, GD_OP_SET_VOLUME, dict);
2107

2108
out:
2109
    if (help)
2110
        ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict,
2111
                                            (op_errstr) ? op_errstr : "");
2112
    else if (ret) {
2113
        if (err_str[0] == '\0')
2114
            snprintf(err_str, sizeof(err_str), "Operation failed");
2115
        ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
2116
    }
2117
    if (op_errstr)
2118
        GF_FREE(op_errstr);
2119

2120
    return ret;
2121
}
2122

2123
static int
2124
glusterd_handle_set_volume(rpcsvc_request_t *req)
2125
{
2126
    return glusterd_big_locked_handler(req, __glusterd_handle_set_volume);
2127
}
2128

2129
int
2130
__glusterd_handle_sync_volume(rpcsvc_request_t *req)
2131
{
2132
    int32_t ret = -1;
2133
    gf_cli_req cli_req = {{
2134
        0,
2135
    }};
2136
    dict_t *dict = NULL;
2137
    gf_cli_rsp cli_rsp = {0.};
2138
    char msg[2048] = {
2139
        0,
2140
    };
2141
    char *volname = NULL;
2142
    gf1_cli_sync_volume flags = 0;
2143
    char *hostname = NULL;
2144
    xlator_t *this = THIS;
2145

2146
    GF_ASSERT(req);
2147

2148
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
2149
    if (ret < 0) {
2150
        // failed to decode msg;
2151
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
2152
               "Failed to decode "
2153
               "request received from cli");
2154
        req->rpc_err = GARBAGE_ARGS;
2155
        goto out;
2156
    }
2157

2158
    if (cli_req.dict.dict_len) {
2159
        /* Unserialize the dictionary */
2160
        dict = dict_new();
2161

2162
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
2163
                               &dict);
2164
        if (ret < 0) {
2165
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
2166
                   "failed to "
2167
                   "unserialize req-buffer to dictionary");
2168
            snprintf(msg, sizeof(msg),
2169
                     "Unable to decode the "
2170
                     "command");
2171
            goto out;
2172
        } else {
2173
            dict->extra_stdfree = cli_req.dict.dict_val;
2174
        }
2175
    }
2176

2177
    ret = dict_get_str(dict, "hostname", &hostname);
2178
    if (ret) {
2179
        snprintf(msg, sizeof(msg), "Failed to get hostname");
2180
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_NOTFOUND_IN_DICT,
2181
               "%s", msg);
2182
        goto out;
2183
    }
2184

2185
    ret = dict_get_str(dict, "volname", &volname);
2186
    if (ret) {
2187
        ret = dict_get_int32(dict, "flags", (int32_t *)&flags);
2188
        if (ret) {
2189
            snprintf(msg, sizeof(msg), "Failed to get volume name or flags");
2190
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FLAGS_NOTFOUND_IN_DICT,
2191
                   "%s", msg);
2192
            goto out;
2193
        }
2194
    }
2195

2196
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_SYNC_REQ_RCVD,
2197
           "Received volume sync req "
2198
           "for volume %s",
2199
           (flags & GF_CLI_SYNC_ALL) ? "all" : volname);
2200

2201
    if (glusterd_gf_is_local_addr(hostname)) {
2202
        ret = -1;
2203
        snprintf(msg, sizeof(msg),
2204
                 "sync from localhost"
2205
                 " not allowed");
2206
        gf_msg(this->name, GF_LOG_ERROR, 0,
2207
               GD_MSG_SYNC_FROM_LOCALHOST_UNALLOWED, "%s", msg);
2208
        goto out;
2209
    }
2210

2211
    ret = glusterd_op_begin_synctask(req, GD_OP_SYNC_VOLUME, dict);
2212

2213
out:
2214
    if (ret) {
2215
        cli_rsp.op_ret = -1;
2216
        cli_rsp.op_errstr = msg;
2217
        if (msg[0] == '\0')
2218
            snprintf(msg, sizeof(msg), "Operation failed");
2219
        glusterd_to_cli(req, &cli_rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp,
2220
                        dict);
2221

2222
        ret = 0;  // sent error to cli, prevent second reply
2223
    }
2224

2225
    return ret;
2226
}
2227

2228
static int
2229
glusterd_handle_sync_volume(rpcsvc_request_t *req)
2230
{
2231
    return glusterd_big_locked_handler(req, __glusterd_handle_sync_volume);
2232
}
2233

2234
int
2235
glusterd_fsm_log_send_resp(rpcsvc_request_t *req, int op_ret, char *op_errstr,
2236
                           dict_t *dict)
2237
{
2238
    int ret = -1;
2239
    gf1_cli_fsm_log_rsp rsp = {0};
2240

2241
    GF_ASSERT(req);
2242
    GF_ASSERT(op_errstr);
2243

2244
    rsp.op_ret = op_ret;
2245
    rsp.op_errstr = op_errstr;
2246
    if (rsp.op_ret == 0) {
2247
        ret = dict_allocate_and_serialize(dict, &rsp.fsm_log.fsm_log_val,
2248
                                          &rsp.fsm_log.fsm_log_len);
2249
        if (ret < 0) {
2250
            gf_smsg("glusterd", GF_LOG_ERROR, errno,
2251
                    GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
2252
            return ret;
2253
        }
2254
    }
2255

2256
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2257
                                (xdrproc_t)xdr_gf1_cli_fsm_log_rsp);
2258
    GF_FREE(rsp.fsm_log.fsm_log_val);
2259

2260
    gf_msg_debug("glusterd", 0, "Responded, ret: %d", ret);
2261

2262
    return 0;
2263
}
2264

2265
static int
2266
glusterd_sm_tr_log_transition_add_to_dict(dict_t *dict,
2267
                                          glusterd_sm_tr_log_t *log, int i,
2268
                                          int count)
2269
{
2270
    int ret = -1;
2271
    char key[64] = "";
2272
    int keylen;
2273
    char timestr[GF_TIMESTR_SIZE] = "";
2274
    char *str = NULL;
2275

2276
    GF_ASSERT(dict);
2277
    GF_ASSERT(log);
2278

2279
    keylen = snprintf(key, sizeof(key), "log%d-old-state", count);
2280
    str = log->state_name_get(log->transitions[i].old_state);
2281
    ret = dict_set_strn(dict, key, keylen, str);
2282
    if (ret)
2283
        goto out;
2284

2285
    keylen = snprintf(key, sizeof(key), "log%d-event", count);
2286
    str = log->event_name_get(log->transitions[i].event);
2287
    ret = dict_set_strn(dict, key, keylen, str);
2288
    if (ret)
2289
        goto out;
2290

2291
    keylen = snprintf(key, sizeof(key), "log%d-new-state", count);
2292
    str = log->state_name_get(log->transitions[i].new_state);
2293
    ret = dict_set_strn(dict, key, keylen, str);
2294
    if (ret)
2295
        goto out;
2296

2297
    snprintf(key, sizeof(key), "log%d-time", count);
2298
    gf_time_fmt_FT(timestr, sizeof timestr, log->transitions[i].time);
2299
    ret = dict_set_dynstr_with_alloc(dict, key, timestr);
2300
    if (ret)
2301
        goto out;
2302

2303
out:
2304
    if (key[0] != '\0' && ret != 0)
2305
        gf_smsg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2306
                "Key=%s", key, NULL);
2307
    gf_msg_debug("glusterd", 0, "returning %d", ret);
2308
    return ret;
2309
}
2310

2311
static int
2312
glusterd_sm_tr_log_add_to_dict(dict_t *dict, glusterd_sm_tr_log_t *circular_log)
2313
{
2314
    int ret = -1;
2315
    int i = 0;
2316
    int start = 0;
2317
    int end = 0;
2318
    int index = 0;
2319
    char key[16] = {0};
2320
    glusterd_sm_tr_log_t *log = NULL;
2321
    int count = 0;
2322

2323
    GF_ASSERT(dict);
2324
    GF_ASSERT(circular_log);
2325

2326
    log = circular_log;
2327
    if (!log->count)
2328
        return 0;
2329

2330
    if (log->count == log->size)
2331
        start = log->current + 1;
2332

2333
    end = start + log->count;
2334
    for (i = start; i < end; i++, count++) {
2335
        index = i % log->count;
2336
        ret = glusterd_sm_tr_log_transition_add_to_dict(dict, log, index,
2337
                                                        count);
2338
        if (ret)
2339
            goto out;
2340
    }
2341

2342
    ret = snprintf(key, sizeof(key), "count");
2343
    ret = dict_set_int32n(dict, key, ret, log->count);
2344

2345
out:
2346
    gf_msg_debug("glusterd", 0, "returning %d", ret);
2347
    return ret;
2348
}
2349

2350
int
2351
__glusterd_handle_fsm_log(rpcsvc_request_t *req)
2352
{
2353
    int32_t ret = -1;
2354
    gf1_cli_fsm_log_req cli_req = {
2355
        0,
2356
    };
2357
    dict_t *dict = NULL;
2358
    xlator_t *this = THIS;
2359
    glusterd_conf_t *conf = NULL;
2360
    char msg[2048] = {0};
2361
    glusterd_peerinfo_t *peerinfo = NULL;
2362

2363
    GF_ASSERT(req);
2364

2365
    ret = xdr_to_generic(req->msg[0], &cli_req,
2366
                         (xdrproc_t)xdr_gf1_cli_fsm_log_req);
2367
    if (ret < 0) {
2368
        // failed to decode msg;
2369
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2370
               "Failed to decode "
2371
               "request received from client.");
2372
        req->rpc_err = GARBAGE_ARGS;
2373
        snprintf(msg, sizeof(msg), "Garbage request");
2374
        goto out;
2375
    }
2376

2377
    dict = dict_new();
2378
    if (!dict) {
2379
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
2380
        ret = -1;
2381
        goto out;
2382
    }
2383

2384
    if (strcmp("", cli_req.name) == 0) {
2385
        conf = this->private;
2386
        ret = glusterd_sm_tr_log_add_to_dict(dict, &conf->op_sm_log);
2387
    } else {
2388
        RCU_READ_LOCK;
2389

2390
        peerinfo = glusterd_peerinfo_find_by_hostname(cli_req.name);
2391
        if (!peerinfo) {
2392
            RCU_READ_UNLOCK;
2393
            ret = -1;
2394
            snprintf(msg, sizeof(msg), "%s is not a peer", cli_req.name);
2395
        } else {
2396
            ret = glusterd_sm_tr_log_add_to_dict(dict, &peerinfo->sm_log);
2397
            RCU_READ_UNLOCK;
2398
        }
2399
    }
2400

2401
out:
2402
    (void)glusterd_fsm_log_send_resp(req, ret, msg, dict);
2403
    free(cli_req.name);  // malloced by xdr
2404
    if (dict)
2405
        dict_unref(dict);
2406

2407
    glusterd_friend_sm();
2408
    glusterd_op_sm();
2409

2410
    return 0;  // send 0 to avoid double reply
2411
}
2412

2413
static int
2414
glusterd_handle_fsm_log(rpcsvc_request_t *req)
2415
{
2416
    return glusterd_big_locked_handler(req, __glusterd_handle_fsm_log);
2417
}
2418

2419
int
2420
glusterd_op_lock_send_resp(rpcsvc_request_t *req, int32_t status)
2421
{
2422
    gd1_mgmt_cluster_lock_rsp rsp = {
2423
        {0},
2424
    };
2425
    int ret = -1;
2426

2427
    GF_ASSERT(req);
2428
    glusterd_get_uuid(&rsp.uuid);
2429
    rsp.op_ret = status;
2430

2431
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2432
                                (xdrproc_t)xdr_gd1_mgmt_cluster_lock_rsp);
2433

2434
    gf_msg_debug(THIS->name, 0, "Responded to lock, ret: %d", ret);
2435

2436
    return 0;
2437
}
2438

2439
int
2440
glusterd_op_unlock_send_resp(rpcsvc_request_t *req, int32_t status)
2441
{
2442
    gd1_mgmt_cluster_unlock_rsp rsp = {
2443
        {0},
2444
    };
2445
    int ret = -1;
2446

2447
    GF_ASSERT(req);
2448
    rsp.op_ret = status;
2449
    glusterd_get_uuid(&rsp.uuid);
2450

2451
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2452
                                (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_rsp);
2453

2454
    gf_msg_debug(THIS->name, 0, "Responded to unlock, ret: %d", ret);
2455

2456
    return ret;
2457
}
2458

2459
int
2460
glusterd_op_mgmt_v3_lock_send_resp(rpcsvc_request_t *req, uuid_t *txn_id,
2461
                                   int32_t status)
2462
{
2463
    gd1_mgmt_v3_lock_rsp rsp = {
2464
        {0},
2465
    };
2466
    int ret = -1;
2467

2468
    GF_ASSERT(req);
2469
    GF_ASSERT(txn_id);
2470
    glusterd_get_uuid(&rsp.uuid);
2471
    rsp.op_ret = status;
2472
    if (rsp.op_ret)
2473
        rsp.op_errno = errno;
2474
    gf_uuid_copy(rsp.txn_id, *txn_id);
2475

2476
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2477
                                (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
2478

2479
    gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 lock, ret: %d", ret);
2480

2481
    return ret;
2482
}
2483

2484
int
2485
glusterd_op_mgmt_v3_unlock_send_resp(rpcsvc_request_t *req, uuid_t *txn_id,
2486
                                     int32_t status)
2487
{
2488
    gd1_mgmt_v3_unlock_rsp rsp = {
2489
        {0},
2490
    };
2491
    int ret = -1;
2492

2493
    GF_ASSERT(req);
2494
    GF_ASSERT(txn_id);
2495
    rsp.op_ret = status;
2496
    if (rsp.op_ret)
2497
        rsp.op_errno = errno;
2498
    glusterd_get_uuid(&rsp.uuid);
2499
    gf_uuid_copy(rsp.txn_id, *txn_id);
2500

2501
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2502
                                (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
2503

2504
    gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 unlock, ret: %d", ret);
2505

2506
    return ret;
2507
}
2508

2509
int
2510
__glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
2511
{
2512
    gd1_mgmt_cluster_unlock_req unlock_req = {
2513
        {0},
2514
    };
2515
    int32_t ret = -1;
2516
    glusterd_op_lock_ctx_t *ctx = NULL;
2517
    xlator_t *this = THIS;
2518
    uuid_t *txn_id = NULL;
2519
    glusterd_conf_t *priv = NULL;
2520

2521
    priv = this->private;
2522
    GF_ASSERT(priv);
2523
    GF_ASSERT(req);
2524

2525
    txn_id = &priv->global_txn_id;
2526

2527
    ret = xdr_to_generic(req->msg[0], &unlock_req,
2528
                         (xdrproc_t)xdr_gd1_mgmt_cluster_unlock_req);
2529
    if (ret < 0) {
2530
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2531
               "Failed to decode unlock "
2532
               "request received from peer");
2533
        req->rpc_err = GARBAGE_ARGS;
2534
        goto out;
2535
    }
2536

2537
    gf_msg_debug(this->name, 0, "Received UNLOCK from uuid: %s",
2538
                 uuid_utoa(unlock_req.uuid));
2539

2540
    RCU_READ_LOCK;
2541
    ret = (glusterd_peerinfo_find_by_uuid(unlock_req.uuid) == NULL);
2542
    RCU_READ_LOCK;
2543
    if (ret) {
2544
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
2545
               "%s doesn't "
2546
               "belong to the cluster. Ignoring request.",
2547
               uuid_utoa(unlock_req.uuid));
2548
        ret = -1;
2549
        goto out;
2550
    }
2551

2552
    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
2553

2554
    if (!ctx) {
2555
        // respond here
2556
        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
2557
               "No memory.");
2558
        return -1;
2559
    }
2560
    gf_uuid_copy(ctx->uuid, unlock_req.uuid);
2561
    ctx->req = req;
2562
    ctx->dict = NULL;
2563

2564
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_UNLOCK, txn_id, ctx);
2565

2566
out:
2567
    glusterd_friend_sm();
2568
    glusterd_op_sm();
2569

2570
    return ret;
2571
}
2572

2573
static int
2574
glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
2575
{
2576
    return glusterd_big_locked_handler(req, __glusterd_handle_cluster_unlock);
2577
}
2578

2579
int
2580
glusterd_op_stage_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
2581
                            char *op_errstr, dict_t *rsp_dict)
2582
{
2583
    gd1_mgmt_stage_op_rsp rsp = {
2584
        {0},
2585
    };
2586
    int ret = -1;
2587
    xlator_t *this = THIS;
2588

2589
    GF_ASSERT(req);
2590

2591
    rsp.op_ret = status;
2592
    glusterd_get_uuid(&rsp.uuid);
2593
    rsp.op = op;
2594
    if (op_errstr)
2595
        rsp.op_errstr = op_errstr;
2596
    else
2597
        rsp.op_errstr = "";
2598

2599
    ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
2600
                                      &rsp.dict.dict_len);
2601
    if (ret < 0) {
2602
        gf_smsg(this->name, GF_LOG_ERROR, errno,
2603
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
2604
        return ret;
2605
    }
2606

2607
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2608
                                (xdrproc_t)xdr_gd1_mgmt_stage_op_rsp);
2609

2610
    gf_msg_debug(this->name, 0, "Responded to stage, ret: %d", ret);
2611
    GF_FREE(rsp.dict.dict_val);
2612

2613
    return ret;
2614
}
2615

2616
int
2617
glusterd_op_commit_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
2618
                             char *op_errstr, dict_t *rsp_dict)
2619
{
2620
    gd1_mgmt_commit_op_rsp rsp = {
2621
        {0},
2622
    };
2623
    int ret = -1;
2624
    xlator_t *this = THIS;
2625

2626
    GF_ASSERT(req);
2627
    rsp.op_ret = status;
2628
    glusterd_get_uuid(&rsp.uuid);
2629
    rsp.op = op;
2630

2631
    if (op_errstr)
2632
        rsp.op_errstr = op_errstr;
2633
    else
2634
        rsp.op_errstr = "";
2635

2636
    if (rsp_dict) {
2637
        ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
2638
                                          &rsp.dict.dict_len);
2639
        if (ret < 0) {
2640
            gf_smsg(this->name, GF_LOG_ERROR, errno,
2641
                    GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
2642
            goto out;
2643
        }
2644
    }
2645

2646
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2647
                                (xdrproc_t)xdr_gd1_mgmt_commit_op_rsp);
2648

2649
    gf_msg_debug(this->name, 0, "Responded to commit, ret: %d", ret);
2650

2651
out:
2652
    GF_FREE(rsp.dict.dict_val);
2653
    return ret;
2654
}
2655

2656
int
2657
__glusterd_handle_incoming_friend_req(rpcsvc_request_t *req)
2658
{
2659
    int32_t ret = -1;
2660
    gd1_mgmt_friend_req friend_req = {
2661
        {0},
2662
    };
2663
    gf_boolean_t run_fsm = _gf_true;
2664

2665
    GF_ASSERT(req);
2666
    ret = xdr_to_generic(req->msg[0], &friend_req,
2667
                         (xdrproc_t)xdr_gd1_mgmt_friend_req);
2668
    if (ret < 0) {
2669
        // failed to decode msg;
2670
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2671
               "Failed to decode "
2672
               "request received from friend");
2673
        req->rpc_err = GARBAGE_ARGS;
2674
        goto out;
2675
    }
2676

2677
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_RCVD,
2678
           "Received probe from uuid: %s", uuid_utoa(friend_req.uuid));
2679
    ret = glusterd_handle_friend_req(req, friend_req.uuid, friend_req.hostname,
2680
                                     friend_req.port, &friend_req);
2681

2682
    if (ret == GLUSTERD_CONNECTION_AWAITED) {
2683
        // fsm should be run after connection establishes
2684
        run_fsm = _gf_false;
2685
        ret = 0;
2686
    }
2687

2688
out:
2689
    free(friend_req.hostname);  // malloced by xdr
2690

2691
    if (run_fsm) {
2692
        glusterd_friend_sm();
2693
        glusterd_op_sm();
2694
    }
2695

2696
    return ret;
2697
}
2698

2699
static int
2700
glusterd_handle_incoming_friend_req(rpcsvc_request_t *req)
2701
{
2702
    return glusterd_big_locked_handler(req,
2703
                                       __glusterd_handle_incoming_friend_req);
2704
}
2705

2706
int
2707
__glusterd_handle_incoming_unfriend_req(rpcsvc_request_t *req)
2708
{
2709
    int32_t ret = -1;
2710
    gd1_mgmt_friend_req friend_req = {
2711
        {0},
2712
    };
2713
    char remote_hostname[UNIX_PATH_MAX + 1] = {
2714
        0,
2715
    };
2716

2717
    GF_ASSERT(req);
2718
    ret = xdr_to_generic(req->msg[0], &friend_req,
2719
                         (xdrproc_t)xdr_gd1_mgmt_friend_req);
2720
    if (ret < 0) {
2721
        // failed to decode msg;
2722
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2723
               "Failed to decode "
2724
               "request received.");
2725
        req->rpc_err = GARBAGE_ARGS;
2726
        goto out;
2727
    }
2728

2729
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_UNFRIEND_REQ_RCVD,
2730
           "Received unfriend from uuid: %s", uuid_utoa(friend_req.uuid));
2731

2732
    ret = glusterd_remote_hostname_get(req, remote_hostname,
2733
                                       sizeof(remote_hostname));
2734
    if (ret) {
2735
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
2736
               "Unable to get the remote hostname");
2737
        goto out;
2738
    }
2739
    ret = glusterd_handle_unfriend_req(req, friend_req.uuid, remote_hostname,
2740
                                       friend_req.port);
2741

2742
out:
2743
    free(friend_req.hostname);       // malloced by xdr
2744
    free(friend_req.vols.vols_val);  // malloced by xdr
2745

2746
    glusterd_friend_sm();
2747
    glusterd_op_sm();
2748

2749
    return ret;
2750
}
2751

2752
static int
2753
glusterd_handle_incoming_unfriend_req(rpcsvc_request_t *req)
2754
{
2755
    return glusterd_big_locked_handler(req,
2756
                                       __glusterd_handle_incoming_unfriend_req);
2757
}
2758

2759
int
2760
glusterd_handle_friend_update_delete(dict_t *dict)
2761
{
2762
    char *hostname = NULL;
2763
    int32_t ret = -1;
2764

2765
    GF_ASSERT(dict);
2766

2767
    ret = dict_get_str(dict, "hostname", &hostname);
2768
    if (ret)
2769
        goto out;
2770

2771
    ret = glusterd_friend_remove(NULL, hostname);
2772

2773
out:
2774
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
2775
    return ret;
2776
}
2777

2778
int
2779
glusterd_peer_hostname_update(glusterd_peerinfo_t *peerinfo,
2780
                              const char *hostname, gf_boolean_t store_update)
2781
{
2782
    int ret = 0;
2783

2784
    GF_ASSERT(peerinfo);
2785
    GF_ASSERT(hostname);
2786

2787
    ret = gd_add_address_to_peer(peerinfo, hostname, _gf_true);
2788
    if (ret) {
2789
        gf_msg(THIS->name, GF_LOG_ERROR, 0,
2790
               GD_MSG_HOSTNAME_ADD_TO_PEERLIST_FAIL,
2791
               "Couldn't add address to the peer info");
2792
        goto out;
2793
    }
2794

2795
    if (store_update)
2796
        ret = glusterd_store_peerinfo(peerinfo);
2797

2798
    if (peerinfo->hostname != NULL) {
2799
        GF_FREE(peerinfo->hostname);
2800
    }
2801
    peerinfo->hostname = gf_strdup(hostname);
2802
    if (peerinfo->hostname == NULL) {
2803
        ret = -1;
2804
        goto out;
2805
    }
2806

2807
    if (peerinfo->rpc == NULL)
2808
        goto out;
2809

2810
    char *remote_hostname = NULL;
2811
    remote_hostname = gf_strdup(hostname);
2812
    if (remote_hostname == NULL) {
2813
        ret = -1;
2814
        goto out;
2815
    }
2816
    ret = dict_set_dynstr_sizen(peerinfo->rpc->conn.trans->options,
2817
                                "remote-host", remote_hostname);
2818
    if (ret) {
2819
        gf_msg_debug(THIS->name, 0, "failed to set remote-host with %s",
2820
                     remote_hostname);
2821
        GF_FREE(remote_hostname);
2822
    }
2823
out:
2824
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
2825
    return ret;
2826
}
2827

2828
int
2829
__glusterd_handle_friend_update(rpcsvc_request_t *req)
2830
{
2831
    int32_t ret = -1;
2832
    gd1_mgmt_friend_update friend_req = {
2833
        {0},
2834
    };
2835
    glusterd_peerinfo_t *peerinfo = NULL;
2836
    xlator_t *this = THIS;
2837
    gd1_mgmt_friend_update_rsp rsp = {
2838
        {0},
2839
    };
2840
    dict_t *dict = NULL;
2841
    char key[32] = {
2842
        0,
2843
    };
2844
    int keylen;
2845
    char *uuid_buf = NULL;
2846
    int i = 1;
2847
    int count = 0;
2848
    uuid_t uuid = {
2849
        0,
2850
    };
2851
    glusterd_peerctx_args_t args = {0};
2852
    int32_t op = 0;
2853

2854
    GF_ASSERT(req);
2855

2856
    ret = xdr_to_generic(req->msg[0], &friend_req,
2857
                         (xdrproc_t)xdr_gd1_mgmt_friend_update);
2858
    if (ret < 0) {
2859
        // failed to decode msg;
2860
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
2861
               "Failed to decode "
2862
               "request received");
2863
        req->rpc_err = GARBAGE_ARGS;
2864
        goto out;
2865
    }
2866

2867
    ret = 0;
2868
    RCU_READ_LOCK;
2869
    if (glusterd_peerinfo_find(friend_req.uuid, NULL) == NULL) {
2870
        ret = -1;
2871
    }
2872
    RCU_READ_UNLOCK;
2873
    if (ret) {
2874
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
2875
               "Received friend update request "
2876
               "from unknown peer %s",
2877
               uuid_utoa(friend_req.uuid));
2878
        gf_event(EVENT_UNKNOWN_PEER, "peer=%s", uuid_utoa(friend_req.uuid));
2879
        goto out;
2880
    }
2881

2882
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_FRIEND_UPDATE_RCVD,
2883
           "Received friend update from uuid: %s", uuid_utoa(friend_req.uuid));
2884

2885
    if (friend_req.friends.friends_len) {
2886
        /* Unserialize the dictionary */
2887
        dict = dict_new();
2888

2889
        ret = dict_unserialize(friend_req.friends.friends_val,
2890
                               friend_req.friends.friends_len, &dict);
2891
        if (ret < 0) {
2892
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
2893
                   "failed to "
2894
                   "unserialize req-buffer to dictionary");
2895
            goto out;
2896
        } else {
2897
            dict->extra_stdfree = friend_req.friends.friends_val;
2898
        }
2899
    }
2900

2901
    ret = dict_get_int32(dict, "count", &count);
2902
    if (ret) {
2903
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
2904
                "Key=count", NULL);
2905
        goto out;
2906
    }
2907

2908
    ret = dict_get_int32(dict, "op", &op);
2909
    if (ret) {
2910
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
2911
                "Key=op", NULL);
2912
        goto out;
2913
    }
2914

2915
    if (GD_FRIEND_UPDATE_DEL == op) {
2916
        (void)glusterd_handle_friend_update_delete(dict);
2917
        goto out;
2918
    }
2919

2920
    args.mode = GD_MODE_ON;
2921
    while (i <= count) {
2922
        keylen = snprintf(key, sizeof(key), "friend%d.uuid", i);
2923
        ret = dict_get_strn(dict, key, keylen, &uuid_buf);
2924
        if (ret)
2925
            goto out;
2926
        gf_uuid_parse(uuid_buf, uuid);
2927

2928
        if (!gf_uuid_compare(uuid, MY_UUID)) {
2929
            gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_UUID_RECEIVED,
2930
                   "Received my uuid as Friend");
2931
            i++;
2932
            continue;
2933
        }
2934

2935
        snprintf(key, sizeof(key), "friend%d", i);
2936

2937
        RCU_READ_LOCK;
2938
        peerinfo = glusterd_peerinfo_find(uuid, NULL);
2939
        if (peerinfo == NULL) {
2940
            /* Create a new peer and add it to the list as there is
2941
             * no existing peer with the uuid
2942
             */
2943
            peerinfo = gd_peerinfo_from_dict(dict, key);
2944
            if (peerinfo == NULL) {
2945
                ret = -1;
2946
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
2947
                       "Could not create peerinfo from dict "
2948
                       "for prefix %s",
2949
                       key);
2950
                goto unlock;
2951
            }
2952

2953
            /* As this is a new peer, it should be added as a
2954
             * friend.  The friend state machine will take care of
2955
             * correcting the state as required
2956
             */
2957
            peerinfo->state = GD_FRIEND_STATE_BEFRIENDED;
2958

2959
            ret = glusterd_friend_add_from_peerinfo(peerinfo, 0, &args);
2960
        } else {
2961
            /* As an existing peer was found, update it with the new
2962
             * information
2963
             */
2964
            ret = gd_update_peerinfo_from_dict(peerinfo, dict, key);
2965
            if (ret) {
2966
                gf_msg(this->name, GF_LOG_ERROR, 0,
2967
                       GD_MSG_PEER_INFO_UPDATE_FAIL,
2968
                       "Failed to "
2969
                       "update peer %s",
2970
                       peerinfo->hostname);
2971
                goto unlock;
2972
            }
2973
            ret = glusterd_store_peerinfo(peerinfo);
2974
            if (ret) {
2975
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
2976
                       "Failed to store peerinfo");
2977
                gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s",
2978
                         peerinfo->hostname);
2979
            }
2980
        }
2981
    unlock:
2982
        RCU_READ_UNLOCK;
2983
        if (ret)
2984
            break;
2985

2986
        peerinfo = NULL;
2987
        i++;
2988
    }
2989

2990
out:
2991
    gf_uuid_copy(rsp.uuid, MY_UUID);
2992
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
2993
                                (xdrproc_t)xdr_gd1_mgmt_friend_update_rsp);
2994
    if (dict) {
2995
        if (!dict->extra_stdfree && friend_req.friends.friends_val)
2996
            free(friend_req.friends.friends_val);  // malloced by xdr
2997
        dict_unref(dict);
2998
    } else {
2999
        free(friend_req.friends.friends_val);  // malloced by xdr
3000
    }
3001

3002
    if (peerinfo)
3003
        glusterd_peerinfo_cleanup(peerinfo);
3004

3005
    glusterd_friend_sm();
3006
    glusterd_op_sm();
3007

3008
    return ret;
3009
}
3010

3011
static int
3012
glusterd_handle_friend_update(rpcsvc_request_t *req)
3013
{
3014
    return glusterd_big_locked_handler(req, __glusterd_handle_friend_update);
3015
}
3016

3017
int
3018
__glusterd_handle_probe_query(rpcsvc_request_t *req)
3019
{
3020
    int32_t ret = -1;
3021
    xlator_t *this = THIS;
3022
    glusterd_conf_t *conf = NULL;
3023
    gd1_mgmt_probe_req probe_req = {
3024
        {0},
3025
    };
3026
    gd1_mgmt_probe_rsp rsp = {
3027
        {0},
3028
    };
3029
    glusterd_peerinfo_t *peerinfo = NULL;
3030
    glusterd_peerctx_args_t args = {0};
3031
    int port = 0;
3032
    char remote_hostname[UNIX_PATH_MAX + 1] = {
3033
        0,
3034
    };
3035

3036
    GF_ASSERT(req);
3037

3038
    ret = xdr_to_generic(req->msg[0], &probe_req,
3039
                         (xdrproc_t)xdr_gd1_mgmt_probe_req);
3040
    if (ret < 0) {
3041
        // failed to decode msg;
3042
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3043
               "Failed to decode probe "
3044
               "request");
3045
        req->rpc_err = GARBAGE_ARGS;
3046
        goto out;
3047
    }
3048

3049
    conf = this->private;
3050
    if (probe_req.port)
3051
        port = probe_req.port;
3052
    else
3053
        port = GF_DEFAULT_BASE_PORT;
3054

3055
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PROBE_RCVD,
3056
           "Received probe from uuid: %s", uuid_utoa(probe_req.uuid));
3057

3058
    /* Check for uuid collision and handle it in a user friendly way by
3059
     * sending the error.
3060
     */
3061
    if (!gf_uuid_compare(probe_req.uuid, MY_UUID)) {
3062
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_UUIDS_SAME_RETRY,
3063
               "Peer uuid %s is same as "
3064
               "local uuid. Please check the uuid of both the peers "
3065
               "from %s/%s",
3066
               uuid_utoa(probe_req.uuid), GLUSTERD_DEFAULT_WORKDIR,
3067
               GLUSTERD_INFO_FILE);
3068
        rsp.op_ret = -1;
3069
        rsp.op_errno = GF_PROBE_SAME_UUID;
3070
        rsp.port = port;
3071
        goto respond;
3072
    }
3073

3074
    ret = glusterd_remote_hostname_get(req, remote_hostname,
3075
                                       sizeof(remote_hostname));
3076
    if (ret) {
3077
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_HOSTNAME_RESOLVE_FAIL,
3078
               "Unable to get the remote hostname");
3079
        goto out;
3080
    }
3081

3082
    RCU_READ_LOCK;
3083
    peerinfo = glusterd_peerinfo_find(probe_req.uuid, remote_hostname);
3084
    if ((peerinfo == NULL) && (!cds_list_empty(&conf->peers))) {
3085
        rsp.op_ret = -1;
3086
        rsp.op_errno = GF_PROBE_ANOTHER_CLUSTER;
3087
    } else if (peerinfo == NULL) {
3088
        gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
3089
               "Unable to find peerinfo"
3090
               " for host: %s (%d)",
3091
               remote_hostname, port);
3092
        args.mode = GD_MODE_ON;
3093
        ret = glusterd_friend_add(remote_hostname, port,
3094
                                  GD_FRIEND_STATE_PROBE_RCVD, NULL, &peerinfo,
3095
                                  0, &args);
3096
        if (ret) {
3097
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_PEER_ADD_FAIL,
3098
                   "Failed to add peer %s", remote_hostname);
3099
            rsp.op_errno = GF_PROBE_ADD_FAILED;
3100
        }
3101
    }
3102
    RCU_READ_UNLOCK;
3103

3104
respond:
3105
    gf_uuid_copy(rsp.uuid, MY_UUID);
3106

3107
    rsp.hostname = probe_req.hostname;
3108
    rsp.op_errstr = "";
3109

3110
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3111
                          (xdrproc_t)xdr_gd1_mgmt_probe_rsp);
3112
    ret = 0;
3113

3114
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
3115
           "Responded to %s, op_ret: %d, "
3116
           "op_errno: %d, ret: %d",
3117
           remote_hostname, rsp.op_ret, rsp.op_errno, ret);
3118

3119
out:
3120
    free(probe_req.hostname);  // malloced by xdr
3121

3122
    glusterd_friend_sm();
3123
    glusterd_op_sm();
3124

3125
    return ret;
3126
}
3127

3128
static int
3129
glusterd_handle_probe_query(rpcsvc_request_t *req)
3130
{
3131
    return glusterd_big_locked_handler(req, __glusterd_handle_probe_query);
3132
}
3133

3134
int
3135
__glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
3136
{
3137
    int32_t ret = -1;
3138
    gf_cli_req cli_req = {{
3139
        0,
3140
    }};
3141
    dict_t *dict = NULL;
3142
    glusterd_op_t cli_op = GD_OP_PROFILE_VOLUME;
3143
    char *volname = NULL;
3144
    int32_t op = 0;
3145
    char err_str[64] = {
3146
        0,
3147
    };
3148
    xlator_t *this = THIS;
3149
    glusterd_conf_t *conf = NULL;
3150

3151
    GF_ASSERT(req);
3152
    conf = this->private;
3153
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
3154

3155
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
3156
    if (ret < 0) {
3157
        // failed to decode msg;
3158
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3159
               "Failed to decode "
3160
               "request received from cli");
3161
        req->rpc_err = GARBAGE_ARGS;
3162
        goto out;
3163
    }
3164

3165
    if (cli_req.dict.dict_len > 0) {
3166
        dict = dict_new();
3167
        if (!dict) {
3168
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
3169
                    NULL);
3170
            goto out;
3171
        }
3172
        dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
3173
    }
3174

3175
    ret = dict_get_str(dict, "volname", &volname);
3176
    if (ret) {
3177
        snprintf(err_str, sizeof(err_str),
3178
                 "Unable to get volume "
3179
                 "name");
3180
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
3181
               "%s", err_str);
3182
        goto out;
3183
    }
3184

3185
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_PROFILE_REQ_RCVD,
3186
           "Received volume profile req "
3187
           "for volume %s",
3188
           volname);
3189
    ret = dict_get_int32(dict, "op", &op);
3190
    if (ret) {
3191
        snprintf(err_str, sizeof(err_str), "Unable to get operation");
3192
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
3193
               err_str);
3194
        goto out;
3195
    }
3196

3197
    if (conf->op_version < GD_OP_VERSION_6_0) {
3198
        gf_msg_debug(this->name, 0,
3199
                     "The cluster is operating at "
3200
                     "version less than %d. Falling back "
3201
                     "to op-sm framework.",
3202
                     GD_OP_VERSION_6_0);
3203
        ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
3204
        glusterd_friend_sm();
3205
        glusterd_op_sm();
3206
    } else {
3207
        ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(
3208
            req, cli_op, dict);
3209
    }
3210

3211
out:
3212
    free(cli_req.dict.dict_val);
3213

3214
    if (ret) {
3215
        if (err_str[0] == '\0')
3216
            snprintf(err_str, sizeof(err_str), "Operation failed");
3217
        ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
3218
    }
3219

3220
    gf_msg_debug(this->name, 0, "Returning %d", ret);
3221
    return ret;
3222
}
3223

3224
static int
3225
glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
3226
{
3227
    return glusterd_big_locked_handler(req,
3228
                                       __glusterd_handle_cli_profile_volume);
3229
}
3230

3231
int
3232
__glusterd_handle_getwd(rpcsvc_request_t *req)
3233
{
3234
    int32_t ret = -1;
3235
    gf1_cli_getwd_rsp rsp = {
3236
        0,
3237
    };
3238
    glusterd_conf_t *priv = NULL;
3239

3240
    GF_ASSERT(req);
3241

3242
    priv = THIS->private;
3243
    GF_ASSERT(priv);
3244

3245
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_GETWD_REQ_RCVD,
3246
           "Received getwd req");
3247

3248
    rsp.wd = priv->workdir;
3249

3250
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3251
                          (xdrproc_t)xdr_gf1_cli_getwd_rsp);
3252
    ret = 0;
3253

3254
    glusterd_friend_sm();
3255
    glusterd_op_sm();
3256

3257
    return ret;
3258
}
3259

3260
static int
3261
glusterd_handle_getwd(rpcsvc_request_t *req)
3262
{
3263
    return glusterd_big_locked_handler(req, __glusterd_handle_getwd);
3264
}
3265

3266
int
3267
__glusterd_handle_mount(rpcsvc_request_t *req)
3268
{
3269
    gf1_cli_mount_req mnt_req = {
3270
        0,
3271
    };
3272
    gf1_cli_mount_rsp rsp = {
3273
        0,
3274
    };
3275
    dict_t *dict = NULL;
3276
    int ret = 0;
3277
    glusterd_conf_t *priv = NULL;
3278

3279
    GF_ASSERT(req);
3280
    priv = THIS->private;
3281

3282
    ret = xdr_to_generic(req->msg[0], &mnt_req,
3283
                         (xdrproc_t)xdr_gf1_cli_mount_req);
3284
    if (ret < 0) {
3285
        // failed to decode msg;
3286
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3287
               "Failed to decode mount "
3288
               "request received");
3289
        req->rpc_err = GARBAGE_ARGS;
3290
        rsp.op_ret = -1;
3291
        rsp.op_errno = EINVAL;
3292
        goto out;
3293
    }
3294

3295
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_MOUNT_REQ_RCVD,
3296
           "Received mount req");
3297

3298
    if (mnt_req.dict.dict_len) {
3299
        /* Unserialize the dictionary */
3300
        dict = dict_new();
3301

3302
        ret = dict_unserialize(mnt_req.dict.dict_val, mnt_req.dict.dict_len,
3303
                               &dict);
3304
        if (ret < 0) {
3305
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
3306
                   "failed to "
3307
                   "unserialize req-buffer to dictionary");
3308
            rsp.op_ret = -1;
3309
            rsp.op_errno = -EINVAL;
3310
            goto out;
3311
        } else {
3312
            dict->extra_stdfree = mnt_req.dict.dict_val;
3313
        }
3314
    }
3315

3316
    synclock_unlock(&priv->big_lock);
3317
    rsp.op_ret = glusterd_do_mount(mnt_req.label, dict, &rsp.path,
3318
                                   &rsp.op_errno);
3319
    synclock_lock(&priv->big_lock);
3320

3321
out:
3322
    if (!rsp.path)
3323
        rsp.path = gf_strdup("");
3324

3325
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3326
                          (xdrproc_t)xdr_gf1_cli_mount_rsp);
3327
    ret = 0;
3328

3329
    if (dict)
3330
        dict_unref(dict);
3331

3332
    GF_FREE(rsp.path);
3333

3334
    glusterd_friend_sm();
3335
    glusterd_op_sm();
3336

3337
    return ret;
3338
}
3339

3340
int
3341
glusterd_handle_mount(rpcsvc_request_t *req)
3342
{
3343
    return glusterd_big_locked_handler(req, __glusterd_handle_mount);
3344
}
3345

3346
int
3347
__glusterd_handle_umount(rpcsvc_request_t *req)
3348
{
3349
    gf1_cli_umount_req umnt_req = {
3350
        0,
3351
    };
3352
    gf1_cli_umount_rsp rsp = {
3353
        0,
3354
    };
3355
    char *mountbroker_root = NULL;
3356
    char mntp[PATH_MAX] = {
3357
        0,
3358
    };
3359
    char *path = NULL;
3360
    runner_t runner = {
3361
        0,
3362
    };
3363
    int ret = 0;
3364
    xlator_t *this = THIS;
3365
    gf_boolean_t dir_ok = _gf_false;
3366
    char *pdir = NULL;
3367
    char *t = NULL;
3368
    glusterd_conf_t *priv = NULL;
3369

3370
    GF_ASSERT(req);
3371
    priv = this->private;
3372

3373
    ret = xdr_to_generic(req->msg[0], &umnt_req,
3374
                         (xdrproc_t)xdr_gf1_cli_umount_req);
3375
    if (ret < 0) {
3376
        // failed to decode msg;
3377
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
3378
               "Failed to decode umount"
3379
               "request");
3380
        req->rpc_err = GARBAGE_ARGS;
3381
        rsp.op_ret = -1;
3382
        goto out;
3383
    }
3384

3385
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_UMOUNT_REQ_RCVD,
3386
           "Received umount req");
3387

3388
    if (dict_get_str(this->options, "mountbroker-root", &mountbroker_root) !=
3389
        0) {
3390
        rsp.op_errno = ENOENT;
3391
        goto out;
3392
    }
3393

3394
    /* check if it is allowed to umount path */
3395
    path = gf_strdup(umnt_req.path);
3396
    if (!path) {
3397
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
3398
        rsp.op_errno = ENOMEM;
3399
        goto out;
3400
    }
3401
    dir_ok = _gf_false;
3402
    pdir = dirname(path);
3403
    t = strtail(pdir, mountbroker_root);
3404
    if (t && *t == '/') {
3405
        t = strtail(++t, MB_HIVE);
3406
        if (t && !*t)
3407
            dir_ok = _gf_true;
3408
    }
3409
    GF_FREE(path);
3410
    if (!dir_ok) {
3411
        rsp.op_errno = EACCES;
3412
        goto out;
3413
    }
3414

3415
    synclock_unlock(&priv->big_lock);
3416

3417
    if (umnt_req.lazy) {
3418
        rsp.op_ret = gf_umount_lazy(this->name, umnt_req.path, 0);
3419
    } else {
3420
        runinit(&runner);
3421
        runner_add_args(&runner, _PATH_UMOUNT, umnt_req.path, NULL);
3422
        rsp.op_ret = runner_run(&runner);
3423
    }
3424

3425
    synclock_lock(&priv->big_lock);
3426
    if (rsp.op_ret == 0) {
3427
        if (realpath(umnt_req.path, mntp))
3428
            sys_rmdir(mntp);
3429
        else {
3430
            rsp.op_ret = -1;
3431
            rsp.op_errno = errno;
3432
        }
3433
        if (sys_unlink(umnt_req.path) != 0) {
3434
            rsp.op_ret = -1;
3435
            rsp.op_errno = errno;
3436
        }
3437
    }
3438

3439
out:
3440
    if (rsp.op_errno)
3441
        rsp.op_ret = -1;
3442

3443
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3444
                          (xdrproc_t)xdr_gf1_cli_umount_rsp);
3445
    ret = 0;
3446

3447
    glusterd_friend_sm();
3448
    glusterd_op_sm();
3449

3450
    return ret;
3451
}
3452

3453
int
3454
glusterd_handle_umount(rpcsvc_request_t *req)
3455
{
3456
    return glusterd_big_locked_handler(req, __glusterd_handle_umount);
3457
}
3458

3459
int
3460
glusterd_friend_remove(uuid_t uuid, char *hostname)
3461
{
3462
    int ret = -1;
3463
    glusterd_peerinfo_t *peerinfo = NULL;
3464

3465
    RCU_READ_LOCK;
3466

3467
    peerinfo = glusterd_peerinfo_find(uuid, hostname);
3468
    if (peerinfo == NULL) {
3469
        RCU_READ_UNLOCK;
3470
        goto out;
3471
    }
3472

3473
    ret = glusterd_friend_remove_cleanup_vols(peerinfo->uuid);
3474
    RCU_READ_UNLOCK;
3475
    if (ret)
3476
        gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
3477
               "Volumes cleanup failed");
3478
    /* Giving up the critical section here as glusterd_peerinfo_cleanup must
3479
     * be called from outside a critical section
3480
     */
3481
    ret = glusterd_peerinfo_cleanup(peerinfo);
3482
out:
3483
    gf_msg_debug(THIS->name, 0, "returning %d", ret);
3484
    /* coverity[LOCK] */
3485
    return ret;
3486
}
3487

3488
int
3489
glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
3490
                    rpc_clnt_notify_t notify_fn, void *notify_data,
3491
                    gf_boolean_t force)
3492
{
3493
    struct rpc_clnt *new_rpc = NULL;
3494
    int ret = -1;
3495
    xlator_t *this = THIS;
3496

3497
    GF_ASSERT(options);
3498
    GF_VALIDATE_OR_GOTO(this->name, rpc, out);
3499

3500
    if (force && rpc && *rpc) {
3501
        (void)rpc_clnt_unref(*rpc);
3502
        *rpc = NULL;
3503
    }
3504

3505
    /* TODO: is 32 enough? or more ? */
3506
    new_rpc = rpc_clnt_new(options, this, this->name, 16);
3507
    if (!new_rpc)
3508
        goto out;
3509

3510
    ret = rpc_clnt_register_notify(new_rpc, notify_fn, notify_data);
3511
    if (ret)
3512
        goto out;
3513
    ret = rpc_clnt_start(new_rpc);
3514
out:
3515
    if (ret) {
3516
        if (new_rpc) {
3517
            (void)rpc_clnt_unref(new_rpc);
3518
        }
3519
    } else {
3520
        *rpc = new_rpc;
3521
    }
3522

3523
    gf_msg_debug(this->name, 0, "returning %d", ret);
3524
    return ret;
3525
}
3526

3527
int
3528
glusterd_transport_inet_options_build(dict_t *dict, const char *hostname,
3529
                                      int port, char *af)
3530
{
3531
    xlator_t *this = THIS;
3532
    int32_t interval = -1;
3533
    int32_t time = -1;
3534
    int32_t timeout = -1;
3535
    int ret = 0;
3536

3537
    GF_ASSERT(dict);
3538
    GF_ASSERT(hostname);
3539

3540
    if (!port)
3541
        port = GLUSTERD_DEFAULT_PORT;
3542

3543
    /* Build default transport options */
3544
    ret = rpc_transport_inet_options_build(dict, hostname, port, af);
3545
    if (ret)
3546
        goto out;
3547

3548
    /* Set frame-timeout to 10mins. Default timeout of 30 mins is too long
3549
     * when compared to 2 mins for cli timeout. This ensures users don't
3550
     * wait too long after cli timesout before being able to resume normal
3551
     * operations
3552
     */
3553
    ret = dict_set_int32_sizen(dict, "frame-timeout", 600);
3554
    if (ret) {
3555
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3556
               "Failed to set frame-timeout");
3557
        goto out;
3558
    }
3559

3560
    /* Set keepalive options */
3561
    ret = dict_get_int32(this->options, "transport.socket.keepalive-interval",
3562
                         &interval);
3563
    if (ret) {
3564
        gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
3565
               "Failed to get socket keepalive-interval");
3566
    }
3567
    ret = dict_get_int32(this->options, "transport.socket.keepalive-time",
3568
                         &time);
3569
    if (ret) {
3570
        gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
3571
               "Failed to get socket keepalive-time");
3572
    }
3573
    ret = dict_get_int32(this->options, "transport.tcp-user-timeout", &timeout);
3574
    if (ret) {
3575
        gf_msg("glusterd", GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
3576
               "Failed to get tcp-user-timeout");
3577
    }
3578

3579
    if ((interval > 0) || (time > 0))
3580
        ret = rpc_transport_keepalive_options_set(dict, interval, time,
3581
                                                  timeout);
3582
out:
3583
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
3584
    return ret;
3585
}
3586

3587
int
3588
glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
3589
                           glusterd_peerctx_args_t *args)
3590
{
3591
    dict_t *options = NULL;
3592
    int ret = -1;
3593
    glusterd_peerctx_t *peerctx = NULL;
3594
    data_t *data = NULL;
3595
    char *af = NULL;
3596

3597
    peerctx = GF_CALLOC(1, sizeof(*peerctx), gf_gld_mt_peerctx_t);
3598
    if (!peerctx) {
3599
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
3600
        goto out;
3601
    }
3602

3603
    options = dict_new();
3604
    if (!options) {
3605
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
3606
        goto out;
3607
    }
3608

3609
    if (args)
3610
        peerctx->args = *args;
3611

3612
    gf_uuid_copy(peerctx->peerid, peerinfo->uuid);
3613
    peerctx->peername = gf_strdup(peerinfo->hostname);
3614
    peerctx->peerinfo_gen = peerinfo->generation; /* A peerinfos generation
3615
                                                     number can be used to
3616
                                                     uniquely identify a
3617
                                                     peerinfo */
3618

3619
    ret = dict_get_str(this->options, "transport.address-family", &af);
3620
    if (ret)
3621
        gf_log(this->name, GF_LOG_TRACE,
3622
               "option transport.address-family is not set in xlator options");
3623
    ret = glusterd_transport_inet_options_build(options, peerinfo->hostname,
3624
                                                peerinfo->port, af);
3625
    if (ret)
3626
        goto out;
3627

3628
    /*
3629
     * For simulated multi-node testing, we need to make sure that we
3630
     * create our RPC endpoint with the same address that the peer would
3631
     * use to reach us.
3632
     */
3633

3634
    if (this->options) {
3635
        data = dict_get_sizen(this->options, "transport.socket.bind-address");
3636
        if (data) {
3637
            ret = dict_set_sizen(options, "transport.socket.source-addr", data);
3638
        }
3639
        data = dict_get_sizen(this->options, "ping-timeout");
3640
        if (data) {
3641
            ret = dict_set_sizen(options, "ping-timeout", data);
3642
        }
3643
    }
3644

3645
    /* Enable encryption for the client connection if management encryption
3646
     * is enabled
3647
     */
3648
    if (this->ctx->secure_mgmt) {
3649
        ret = dict_set_sizen_str_sizen(options, "transport.socket.ssl-enabled",
3650
                                       "on");
3651
        if (ret) {
3652
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3653
                   "failed to set ssl-enabled in dict");
3654
            goto out;
3655
        }
3656

3657
        this->ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
3658
    }
3659

3660
    ret = glusterd_rpc_create(&peerinfo->rpc, options, glusterd_peer_rpc_notify,
3661
                              peerctx, _gf_false);
3662
    if (ret) {
3663
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_CREATE_FAIL,
3664
               "failed to create rpc for"
3665
               " peer %s",
3666
               peerinfo->hostname);
3667
        gf_event(EVENT_PEER_RPC_CREATE_FAILED, "peer=%s", peerinfo->hostname);
3668
        goto out;
3669
    }
3670
    peerctx = NULL;
3671
    ret = 0;
3672
out:
3673
    if (options)
3674
        dict_unref(options);
3675

3676
    GF_FREE(peerctx);
3677
    return ret;
3678
}
3679

3680
static int
3681
glusterd_friend_add(const char *hoststr, int port,
3682
                    glusterd_friend_sm_state_t state, uuid_t *uuid,
3683
                    glusterd_peerinfo_t **friend, gf_boolean_t restore,
3684
                    glusterd_peerctx_args_t *args)
3685
{
3686
    int ret = 0;
3687
    xlator_t *this = THIS;
3688
    glusterd_conf_t *conf = NULL;
3689

3690
    conf = this->private;
3691
    GF_ASSERT(conf);
3692
    GF_ASSERT(hoststr);
3693
    GF_ASSERT(friend);
3694

3695
    *friend = glusterd_peerinfo_new(state, uuid, hoststr, port);
3696
    if (*friend == NULL) {
3697
        ret = -1;
3698
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADD_FAIL, NULL);
3699
        goto out;
3700
    }
3701

3702
    /*
3703
     * We can't add to the list after calling glusterd_friend_rpc_create,
3704
     * even if it succeeds, because by then the callback to take it back
3705
     * off and free might have happened already (notably in the case of an
3706
     * invalid peer name).  That would mean we're adding something that had
3707
     * just been free, and we're likely to crash later.
3708
     */
3709
    cds_list_add_tail_rcu(&(*friend)->uuid_list, &conf->peers);
3710

3711
    // restore needs to first create the list of peers, then create rpcs
3712
    // to keep track of quorum in race-free manner. In restore for each peer
3713
    // rpc-create calls rpc_notify when the friend-list is partially
3714
    // constructed, leading to wrong quorum calculations.
3715
    if (!restore) {
3716
        ret = glusterd_store_peerinfo(*friend);
3717
        if (ret == 0) {
3718
            ret = glusterd_friend_rpc_create(this, *friend, args);
3719
        } else {
3720
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
3721
                   "Failed to store peerinfo");
3722
            gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s", (*friend)->hostname);
3723
        }
3724
    }
3725

3726
    if (ret) {
3727
        (void)glusterd_peerinfo_cleanup(*friend);
3728
        *friend = NULL;
3729
    }
3730

3731
out:
3732
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
3733
           "connect returned %d", ret);
3734
    return ret;
3735
}
3736

3737
/* glusterd_friend_add_from_peerinfo() adds a new peer into the local friends
3738
 * list from a pre created @peerinfo object. It otherwise works similarly to
3739
 * glusterd_friend_add()
3740
 */
3741
int
3742
glusterd_friend_add_from_peerinfo(glusterd_peerinfo_t *friend,
3743
                                  gf_boolean_t restore,
3744
                                  glusterd_peerctx_args_t *args)
3745
{
3746
    int ret = 0;
3747
    xlator_t *this = THIS;
3748
    glusterd_conf_t *conf = NULL;
3749

3750
    conf = this->private;
3751
    GF_ASSERT(conf);
3752

3753
    GF_VALIDATE_OR_GOTO(this->name, (friend != NULL), out);
3754

3755
    /*
3756
     * We can't add to the list after calling glusterd_friend_rpc_create,
3757
     * even if it succeeds, because by then the callback to take it back
3758
     * off and free might have happened already (notably in the case of an
3759
     * invalid peer name).  That would mean we're adding something that had
3760
     * just been free, and we're likely to crash later.
3761
     */
3762
    cds_list_add_tail_rcu(&friend->uuid_list, &conf->peers);
3763

3764
    // restore needs to first create the list of peers, then create rpcs
3765
    // to keep track of quorum in race-free manner. In restore for each peer
3766
    // rpc-create calls rpc_notify when the friend-list is partially
3767
    // constructed, leading to wrong quorum calculations.
3768
    if (!restore) {
3769
        ret = glusterd_store_peerinfo(friend);
3770
        if (ret == 0) {
3771
            ret = glusterd_friend_rpc_create(this, friend, args);
3772
        } else {
3773
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEERINFO_CREATE_FAIL,
3774
                   "Failed to store peerinfo");
3775
            gf_event(EVENT_PEER_STORE_FAILURE, "peer=%s", friend->hostname);
3776
        }
3777
    }
3778

3779
out:
3780
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CONNECT_RETURNED,
3781
           "connect returned %d", ret);
3782
    return ret;
3783
}
3784

3785
static int
3786
glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
3787
                     dict_t *dict, int *op_errno)
3788
{
3789
    int ret = -1;
3790
    glusterd_peerinfo_t *peerinfo = NULL;
3791
    glusterd_peerctx_args_t args = {0};
3792
    glusterd_friend_sm_event_t *event = NULL;
3793

3794
    GF_ASSERT(hoststr);
3795

3796
    RCU_READ_LOCK;
3797
    peerinfo = glusterd_peerinfo_find(NULL, hoststr);
3798

3799
    if (peerinfo == NULL) {
3800
        gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
3801
               "Unable to find peerinfo"
3802
               " for host: %s (%d)",
3803
               hoststr, port);
3804
        args.mode = GD_MODE_ON;
3805
        args.req = req;
3806
        args.dict = dict;
3807
        ret = glusterd_friend_add(hoststr, port, GD_FRIEND_STATE_DEFAULT, NULL,
3808
                                  &peerinfo, 0, &args);
3809
        if ((!ret) && (!peerinfo->connected)) {
3810
            ret = GLUSTERD_CONNECTION_AWAITED;
3811
        }
3812

3813
    } else if (peerinfo->connected &&
3814
               (GD_FRIEND_STATE_BEFRIENDED == peerinfo->state)) {
3815
        if (peerinfo->detaching) {
3816
            ret = -1;
3817
            if (op_errno)
3818
                *op_errno = GF_PROBE_FRIEND_DETACHING;
3819
            goto out;
3820
        }
3821
        ret = glusterd_peer_hostname_update(peerinfo, hoststr, _gf_false);
3822
        if (ret)
3823
            goto out;
3824
        // Injecting a NEW_NAME event to update cluster
3825
        ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_NEW_NAME, &event);
3826
        if (!ret) {
3827
            event->peername = gf_strdup(peerinfo->hostname);
3828
            gf_uuid_copy(event->peerid, peerinfo->uuid);
3829

3830
            ret = glusterd_friend_sm_inject_event(event);
3831
            glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_SUCCESS, NULL,
3832
                                         (char *)hoststr, port, dict);
3833
        }
3834
    } else {
3835
        glusterd_xfer_cli_probe_resp(req, 0, GF_PROBE_FRIEND, NULL,
3836
                                     (char *)hoststr, port, dict);
3837
        ret = 0;
3838
    }
3839

3840
out:
3841
    RCU_READ_UNLOCK;
3842
    gf_msg_debug("glusterd", 0, "returning %d", ret);
3843
    return ret;
3844
}
3845

3846
int
3847
glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
3848
                       uuid_t uuid, dict_t *dict, int *op_errno)
3849
{
3850
    int ret = -1;
3851
    glusterd_peerinfo_t *peerinfo = NULL;
3852
    glusterd_friend_sm_event_t *event = NULL;
3853
    glusterd_probe_ctx_t *ctx = NULL;
3854

3855
    GF_ASSERT(hoststr);
3856
    GF_ASSERT(req);
3857

3858
    RCU_READ_LOCK;
3859

3860
    peerinfo = glusterd_peerinfo_find(uuid, hoststr);
3861
    if (peerinfo == NULL) {
3862
        ret = -1;
3863
        gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_PEER_NOT_FOUND,
3864
               "Unable to find peerinfo"
3865
               " for host: %s %d",
3866
               hoststr, port);
3867
        goto out;
3868
    }
3869

3870
    if (!peerinfo->rpc) {
3871
        // handle this case
3872
        goto out;
3873
    }
3874

3875
    if (peerinfo->detaching) {
3876
        ret = -1;
3877
        if (op_errno)
3878
            *op_errno = GF_DEPROBE_FRIEND_DETACHING;
3879
        goto out;
3880
    }
3881

3882
    ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_INIT_REMOVE_FRIEND,
3883
                                       &event);
3884

3885
    if (ret) {
3886
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
3887
               "Unable to get new event");
3888
        goto out;
3889
    }
3890

3891
    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_probe_ctx_t);
3892

3893
    if (!ctx) {
3894
        goto out;
3895
    }
3896

3897
    ctx->hostname = gf_strdup(hoststr);
3898
    ctx->port = port;
3899
    ctx->req = req;
3900
    ctx->dict = dict;
3901

3902
    event->ctx = ctx;
3903

3904
    event->peername = gf_strdup(hoststr);
3905
    gf_uuid_copy(event->peerid, uuid);
3906

3907
    ret = glusterd_friend_sm_inject_event(event);
3908

3909
    if (ret) {
3910
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
3911
               "Unable to inject event %d, "
3912
               "ret = %d",
3913
               event->event, ret);
3914
        goto out;
3915
    }
3916
    peerinfo->detaching = _gf_true;
3917

3918
out:
3919
    RCU_READ_UNLOCK;
3920
    return ret;
3921
}
3922

3923
int
3924
glusterd_xfer_friend_remove_resp(rpcsvc_request_t *req, char *hostname,
3925
                                 int port)
3926
{
3927
    gd1_mgmt_friend_rsp rsp = {
3928
        {0},
3929
    };
3930
    int32_t ret = -1;
3931

3932
    GF_ASSERT(hostname);
3933

3934
    rsp.op_ret = 0;
3935

3936
    gf_uuid_copy(rsp.uuid, MY_UUID);
3937
    rsp.hostname = hostname;
3938
    rsp.port = port;
3939
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3940
                                (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3941

3942
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
3943
           "Responded to %s (%d), ret: %d", hostname, port, ret);
3944
    return ret;
3945
}
3946

3947
int
3948
glusterd_xfer_friend_add_resp(rpcsvc_request_t *req, char *myhostname,
3949
                              char *remote_hostname, int port, int32_t op_ret,
3950
                              int32_t op_errno)
3951
{
3952
    gd1_mgmt_friend_rsp rsp = {
3953
        {0},
3954
    };
3955
    int32_t ret = -1;
3956

3957
    GF_ASSERT(myhostname);
3958

3959
    gf_uuid_copy(rsp.uuid, MY_UUID);
3960
    rsp.op_ret = op_ret;
3961
    rsp.op_errno = op_errno;
3962
    rsp.hostname = gf_strdup(myhostname);
3963
    rsp.port = port;
3964

3965
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
3966
                                (xdrproc_t)xdr_gd1_mgmt_friend_rsp);
3967

3968
    gf_msg("glusterd", GF_LOG_INFO, 0, GD_MSG_RESPONSE_INFO,
3969
           "Responded to %s (%d), ret: %d, op_ret: %d", remote_hostname, port,
3970
           ret, op_ret);
3971
    GF_FREE(rsp.hostname);
3972
    return ret;
3973
}
3974

3975
static void
3976
set_probe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
3977
                    size_t len, char *hostname, int port)
3978
{
3979
    if ((op_errstr) && (strcmp(op_errstr, ""))) {
3980
        snprintf(errstr, len, "%s", op_errstr);
3981
        return;
3982
    }
3983

3984
    if (!op_ret) {
3985
        switch (op_errno) {
3986
            case GF_PROBE_LOCALHOST:
3987
                snprintf(errstr, len,
3988
                         "Probe on localhost not "
3989
                         "needed");
3990
                break;
3991

3992
            case GF_PROBE_FRIEND:
3993
                snprintf(errstr, len,
3994
                         "Host %s port %d already"
3995
                         " in peer list",
3996
                         hostname, port);
3997
                break;
3998

3999
            case GF_PROBE_FRIEND_DETACHING:
4000
                snprintf(errstr, len,
4001
                         "Peer is already being "
4002
                         "detached from cluster.\n"
4003
                         "Check peer status by running "
4004
                         "gluster peer status");
4005
                break;
4006
            default:
4007
                if (op_errno != 0)
4008
                    snprintf(errstr, len,
4009
                             "Probe returned "
4010
                             "with %s",
4011
                             strerror(op_errno));
4012
                break;
4013
        }
4014
    } else {
4015
        switch (op_errno) {
4016
            case GF_PROBE_ANOTHER_CLUSTER:
4017
                snprintf(errstr, len,
4018
                         "%s is either already "
4019
                         "part of another cluster or having "
4020
                         "volumes configured",
4021
                         hostname);
4022
                break;
4023

4024
            case GF_PROBE_VOLUME_CONFLICT:
4025
                snprintf(errstr, len,
4026
                         "At least one volume on "
4027
                         "%s conflicts with existing volumes "
4028
                         "in the cluster",
4029
                         hostname);
4030
                break;
4031

4032
            case GF_PROBE_UNKNOWN_PEER:
4033
                snprintf(errstr, len,
4034
                         "%s responded with "
4035
                         "'unknown peer' error, this could "
4036
                         "happen if %s doesn't have localhost "
4037
                         "in its peer database",
4038
                         hostname, hostname);
4039
                break;
4040

4041
            case GF_PROBE_ADD_FAILED:
4042
                snprintf(errstr, len,
4043
                         "Failed to add peer "
4044
                         "information on %s",
4045
                         hostname);
4046
                break;
4047

4048
            case GF_PROBE_SAME_UUID:
4049
                snprintf(errstr, len,
4050
                         "Peer uuid (host %s) is "
4051
                         "same as local uuid",
4052
                         hostname);
4053
                break;
4054

4055
            case GF_PROBE_QUORUM_NOT_MET:
4056
                snprintf(errstr, len,
4057
                         "Cluster quorum is not "
4058
                         "met. Changing peers is not allowed "
4059
                         "in this state");
4060
                break;
4061

4062
            case GF_PROBE_MISSED_SNAP_CONFLICT:
4063
                snprintf(errstr, len,
4064
                         "Failed to update "
4065
                         "list of missed snapshots from "
4066
                         "peer %s",
4067
                         hostname);
4068
                break;
4069

4070
            case GF_PROBE_SNAP_CONFLICT:
4071
                snprintf(errstr, len,
4072
                         "Conflict in comparing "
4073
                         "list of snapshots from "
4074
                         "peer %s",
4075
                         hostname);
4076
                break;
4077

4078
            default:
4079
                snprintf(errstr, len,
4080
                         "Probe returned with "
4081
                         "%s",
4082
                         strerror(op_errno));
4083
                break;
4084
        }
4085
    }
4086
}
4087

4088
int
4089
glusterd_xfer_cli_probe_resp(rpcsvc_request_t *req, int32_t op_ret,
4090
                             int32_t op_errno, char *op_errstr, char *hostname,
4091
                             int port, dict_t *dict)
4092
{
4093
    gf_cli_rsp rsp = {
4094
        0,
4095
    };
4096
    int32_t ret = -1;
4097
    char errstr[2048] = {
4098
        0,
4099
    };
4100
    char *cmd_str = NULL;
4101
    xlator_t *this = THIS;
4102

4103
    GF_ASSERT(req);
4104

4105
    (void)set_probe_error_str(op_ret, op_errno, op_errstr, errstr,
4106
                              sizeof(errstr), hostname, port);
4107

4108
    if (dict) {
4109
        ret = dict_get_str(dict, "cmd-str", &cmd_str);
4110
        if (ret)
4111
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CMDSTR_NOTFOUND_IN_DICT,
4112
                   "Failed to get "
4113
                   "command string");
4114
    }
4115

4116
    rsp.op_ret = op_ret;
4117
    rsp.op_errno = op_errno;
4118
    rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
4119

4120
    gf_cmd_log("", "%s : %s %s %s", cmd_str, (op_ret) ? "FAILED" : "SUCCESS",
4121
               (errstr[0] != '\0') ? ":" : " ",
4122
               (errstr[0] != '\0') ? errstr : " ");
4123

4124
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
4125
                                (xdrproc_t)xdr_gf_cli_rsp);
4126

4127
    if (dict)
4128
        dict_unref(dict);
4129
    gf_msg_debug(this->name, 0, "Responded to CLI, ret: %d", ret);
4130

4131
    return ret;
4132
}
4133

4134
static void
4135
set_deprobe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
4136
                      size_t len, char *hostname)
4137
{
4138
    if ((op_errstr) && (strcmp(op_errstr, ""))) {
4139
        snprintf(errstr, len, "%s", op_errstr);
4140
        return;
4141
    }
4142

4143
    if (op_ret) {
4144
        switch (op_errno) {
4145
            case GF_DEPROBE_LOCALHOST:
4146
                snprintf(errstr, len, "%s is localhost", hostname);
4147
                break;
4148

4149
            case GF_DEPROBE_NOT_FRIEND:
4150
                snprintf(errstr, len,
4151
                         "%s is not part of "
4152
                         "cluster",
4153
                         hostname);
4154
                break;
4155

4156
            case GF_DEPROBE_BRICK_EXIST:
4157
                snprintf(errstr, len,
4158
                         "Peer %s hosts one or more bricks. If the peer is in "
4159
                         "not recoverable state then use either replace-brick "
4160
                         "or remove-brick command with force to remove all "
4161
                         "bricks from the peer and attempt the peer detach "
4162
                         "again.",
4163
                         hostname);
4164
                break;
4165

4166
            case GF_DEPROBE_SNAP_BRICK_EXIST:
4167
                snprintf(errstr, len,
4168
                         "%s is part of existing "
4169
                         "snapshot. Remove those snapshots "
4170
                         "before proceeding ",
4171
                         hostname);
4172
                break;
4173

4174
            case GF_DEPROBE_FRIEND_DOWN:
4175
                snprintf(errstr, len,
4176
                         "One of the peers is "
4177
                         "probably down. Check with "
4178
                         "'peer status'");
4179
                break;
4180

4181
            case GF_DEPROBE_QUORUM_NOT_MET:
4182
                snprintf(errstr, len,
4183
                         "Cluster quorum is not "
4184
                         "met. Changing peers is not allowed "
4185
                         "in this state");
4186
                break;
4187

4188
            case GF_DEPROBE_FRIEND_DETACHING:
4189
                snprintf(errstr, len,
4190
                         "Peer is already being "
4191
                         "detached from cluster.\n"
4192
                         "Check peer status by running "
4193
                         "gluster peer status");
4194
                break;
4195
            default:
4196
                snprintf(errstr, len,
4197
                         "Detach returned with "
4198
                         "%s",
4199
                         strerror(op_errno));
4200
                break;
4201
        }
4202
    }
4203
}
4204

4205
int
4206
glusterd_xfer_cli_deprobe_resp(rpcsvc_request_t *req, int32_t op_ret,
4207
                               int32_t op_errno, char *op_errstr,
4208
                               char *hostname, dict_t *dict)
4209
{
4210
    gf_cli_rsp rsp = {
4211
        0,
4212
    };
4213
    int32_t ret = -1;
4214
    char *cmd_str = NULL;
4215
    char errstr[2048] = {
4216
        0,
4217
    };
4218

4219
    GF_ASSERT(req);
4220

4221
    (void)set_deprobe_error_str(op_ret, op_errno, op_errstr, errstr,
4222
                                sizeof(errstr), hostname);
4223

4224
    if (dict) {
4225
        ret = dict_get_str(dict, "cmd-str", &cmd_str);
4226
        if (ret)
4227
            gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_CMDSTR_NOTFOUND_IN_DICT,
4228
                   "Failed to get "
4229
                   "command string");
4230
    }
4231

4232
    rsp.op_ret = op_ret;
4233
    rsp.op_errno = op_errno;
4234
    rsp.op_errstr = (errstr[0] != '\0') ? errstr : "";
4235

4236
    gf_cmd_log("", "%s : %s %s %s", cmd_str, (op_ret) ? "FAILED" : "SUCCESS",
4237
               (errstr[0] != '\0') ? ":" : " ",
4238
               (errstr[0] != '\0') ? errstr : " ");
4239

4240
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
4241
                                (xdrproc_t)xdr_gf_cli_rsp);
4242

4243
    gf_msg_debug(THIS->name, 0, "Responded to CLI, ret: %d", ret);
4244

4245
    return ret;
4246
}
4247

4248
int32_t
4249
glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4250
{
4251
    int32_t ret = -1;
4252
    glusterd_conf_t *priv = NULL;
4253
    glusterd_peerinfo_t *entry = NULL;
4254
    int32_t count = 0;
4255
    dict_t *friends = NULL;
4256
    gf1_cli_peer_list_rsp rsp = {
4257
        0,
4258
    };
4259
    char my_uuid_str[64] = {
4260
        0,
4261
    };
4262
    char key[64] = {
4263
        0,
4264
    };
4265
    int keylen;
4266

4267
    xlator_t *this = THIS;
4268

4269
    priv = this->private;
4270
    GF_ASSERT(priv);
4271

4272
    friends = dict_new();
4273
    if (!friends) {
4274
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
4275
        goto out;
4276
    }
4277

4278
    /* Reset ret to 0, needed to prevent failure in case no peers exist */
4279
    ret = 0;
4280
    RCU_READ_LOCK;
4281
    if (!cds_list_empty(&priv->peers)) {
4282
        cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
4283
        {
4284
            count++;
4285
            ret = gd_add_peer_detail_to_dict(entry, friends, count);
4286
            if (ret)
4287
                goto unlock;
4288
        }
4289
    }
4290
unlock:
4291
    RCU_READ_UNLOCK;
4292
    if (ret)
4293
        goto out;
4294

4295
    if (flags == GF_CLI_LIST_POOL_NODES) {
4296
        count++;
4297
        keylen = snprintf(key, sizeof(key), "friend%d.uuid", count);
4298
        uuid_utoa_r(MY_UUID, my_uuid_str);
4299
        ret = dict_set_strn(friends, key, keylen, my_uuid_str);
4300
        if (ret) {
4301
            gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4302
                    "Key=%s", key, NULL);
4303
            goto out;
4304
        }
4305

4306
        keylen = snprintf(key, sizeof(key), "friend%d.hostname", count);
4307
        ret = dict_set_nstrn(friends, key, keylen, "localhost",
4308
                             SLEN("localhost"));
4309
        if (ret) {
4310
            gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4311
                    "Key=%s", key, NULL);
4312
            goto out;
4313
        }
4314

4315
        keylen = snprintf(key, sizeof(key), "friend%d.connected", count);
4316
        ret = dict_set_int32n(friends, key, keylen, 1);
4317
        if (ret) {
4318
            gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4319
                    "Key=%s", key, NULL);
4320
            goto out;
4321
        }
4322
    }
4323

4324
    ret = dict_set_int32_sizen(friends, "count", count);
4325
    if (ret) {
4326
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
4327
                "Key=count", NULL);
4328
        goto out;
4329
    }
4330

4331
    ret = dict_allocate_and_serialize(friends, &rsp.friends.friends_val,
4332
                                      &rsp.friends.friends_len);
4333

4334
    if (ret)
4335
        goto out;
4336

4337
    ret = 0;
4338
out:
4339

4340
    if (friends)
4341
        dict_unref(friends);
4342

4343
    rsp.op_ret = ret;
4344

4345
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
4346
                          (xdrproc_t)xdr_gf1_cli_peer_list_rsp);
4347
    ret = 0;
4348
    GF_FREE(rsp.friends.friends_val);
4349

4350
    return ret;
4351
}
4352

4353
int32_t
4354
glusterd_get_volumes(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
4355
{
4356
    int32_t ret = -1;
4357
    int32_t ret_bkp = 0;
4358
    glusterd_conf_t *priv = NULL;
4359
    glusterd_volinfo_t *entry = NULL;
4360
    int32_t count = 0;
4361
    dict_t *volumes = NULL;
4362
    gf_cli_rsp rsp = {
4363
        0,
4364
    };
4365
    char *volname = NULL;
4366

4367
    priv = THIS->private;
4368
    GF_ASSERT(priv);
4369
    volumes = dict_new();
4370
    if (!volumes) {
4371
        gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
4372
               "Out of Memory");
4373
        goto out;
4374
    }
4375

4376
    if (cds_list_empty(&priv->volumes)) {
4377
        if (flags == GF_CLI_GET_VOLUME)
4378
            ret_bkp = -1;
4379
        ret = 0;
4380
        goto respond;
4381
    }
4382
    if (flags == GF_CLI_GET_NEXT_VOLUME) {
4383
        ret = dict_get_str(dict, "volname", &volname);
4384

4385
        if (ret) {
4386
            if (priv->volumes.next) {
4387
                entry = cds_list_entry(priv->volumes.next, typeof(*entry),
4388
                                       vol_list);
4389
            }
4390
        } else {
4391
            ret = glusterd_volinfo_find(volname, &entry);
4392
            if (ret)
4393
                goto respond;
4394
            entry = cds_list_entry(entry->vol_list.next, typeof(*entry),
4395
                                   vol_list);
4396
        }
4397

4398
        if (&entry->vol_list == &priv->volumes) {
4399
            goto respond;
4400
        } else {
4401
            ret = glusterd_add_volume_detail_to_dict(entry, volumes, count);
4402
            if (ret)
4403
                goto respond;
4404

4405
            count++;
4406
        }
4407
    } else if (flags == GF_CLI_GET_VOLUME) {
4408
        ret = dict_get_str(dict, "volname", &volname);
4409

4410
        if (ret)
4411
            goto respond;
4412

4413
        ret = glusterd_volinfo_find(volname, &entry);
4414
        if (ret) {
4415
            ret_bkp = ret;
4416
            goto respond;
4417
        }
4418

4419
        ret = glusterd_add_volume_detail_to_dict(entry, volumes, count);
4420
        if (ret)
4421
            goto respond;
4422

4423
        count++;
4424
    }
4425

4426
respond:
4427
    ret = dict_set_int32_sizen(volumes, "count", count);
4428
    if (ret)
4429
        goto out;
4430
    ret = dict_allocate_and_serialize(volumes, &rsp.dict.dict_val,
4431
                                      &rsp.dict.dict_len);
4432

4433
    if (ret)
4434
        goto out;
4435

4436
    ret = 0;
4437
out:
4438
    if (ret_bkp == -1) {
4439
        rsp.op_ret = ret_bkp;
4440
        rsp.op_errstr = "Volume does not exist";
4441
        rsp.op_errno = EG_NOVOL;
4442
    } else {
4443
        rsp.op_ret = ret;
4444
        rsp.op_errstr = "";
4445
    }
4446
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
4447
    ret = 0;
4448

4449
    if (volumes)
4450
        dict_unref(volumes);
4451

4452
    GF_FREE(rsp.dict.dict_val);
4453
    return ret;
4454
}
4455

4456
int
4457
__glusterd_handle_status_volume(rpcsvc_request_t *req)
4458
{
4459
    int32_t ret = -1;
4460
    uint32_t cmd = 0;
4461
    dict_t *dict = NULL;
4462
    char *volname = 0;
4463
    gf_cli_req cli_req = {{
4464
        0,
4465
    }};
4466
    glusterd_op_t cli_op = GD_OP_STATUS_VOLUME;
4467
    char err_str[256] = {
4468
        0,
4469
    };
4470
    xlator_t *this = THIS;
4471
    glusterd_conf_t *conf = NULL;
4472

4473
    GF_ASSERT(req);
4474
    conf = this->private;
4475
    GF_ASSERT(conf);
4476

4477
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4478
    if (ret < 0) {
4479
        // failed to decode msg;
4480
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
4481
               "Failed to decode "
4482
               "request received from cli");
4483
        req->rpc_err = GARBAGE_ARGS;
4484
        goto out;
4485
    }
4486

4487
    if (cli_req.dict.dict_len > 0) {
4488
        dict = dict_new();
4489
        if (!dict) {
4490
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
4491
                    NULL);
4492
            goto out;
4493
        }
4494
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
4495
                               &dict);
4496
        if (ret < 0) {
4497
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
4498
                   "failed to "
4499
                   "unserialize buffer");
4500
            snprintf(err_str, sizeof(err_str),
4501
                     "Unable to decode "
4502
                     "the command");
4503
            goto out;
4504
        }
4505
    }
4506

4507
    ret = dict_get_uint32(dict, "cmd", &cmd);
4508
    if (ret)
4509
        goto out;
4510

4511
    if (!(cmd & GF_CLI_STATUS_ALL)) {
4512
        ret = dict_get_str(dict, "volname", &volname);
4513
        if (ret) {
4514
            snprintf(err_str, sizeof(err_str),
4515
                     "Unable to get "
4516
                     "volume name");
4517
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s",
4518
                   err_str);
4519
            goto out;
4520
        }
4521
        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_STATUS_VOL_REQ_RCVD,
4522
               "Received status volume req for volume %s", volname);
4523
    }
4524

4525
    if ((cmd & GF_CLI_STATUS_QUOTAD) &&
4526
        (conf->op_version == GD_OP_VERSION_MIN)) {
4527
        snprintf(err_str, sizeof(err_str),
4528
                 "The cluster is operating "
4529
                 "at version 1. Getting the status of quotad is not "
4530
                 "allowed in this state.");
4531
        ret = -1;
4532
        goto out;
4533
    }
4534

4535
    ret = glusterd_op_begin_synctask(req, GD_OP_STATUS_VOLUME, dict);
4536

4537
out:
4538

4539
    if (ret) {
4540
        if (err_str[0] == '\0')
4541
            snprintf(err_str, sizeof(err_str), "Operation failed");
4542
        ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
4543
    }
4544
    free(cli_req.dict.dict_val);
4545

4546
    return ret;
4547
}
4548

4549
int
4550
glusterd_handle_status_volume(rpcsvc_request_t *req)
4551
{
4552
    return glusterd_big_locked_handler(req, __glusterd_handle_status_volume);
4553
}
4554

4555
int
4556
__glusterd_handle_cli_clearlocks_volume(rpcsvc_request_t *req)
4557
{
4558
    int32_t ret = -1;
4559
    gf_cli_req cli_req = {{
4560
        0,
4561
    }};
4562
    glusterd_op_t cli_op = GD_OP_CLEARLOCKS_VOLUME;
4563
    char *volname = NULL;
4564
    dict_t *dict = NULL;
4565
    char err_str[64] = {
4566
        0,
4567
    };
4568
    xlator_t *this = THIS;
4569

4570
    GF_ASSERT(req);
4571

4572
    ret = -1;
4573
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4574
    if (ret < 0) {
4575
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
4576
               "Failed to decode "
4577
               "request received from cli");
4578
        req->rpc_err = GARBAGE_ARGS;
4579
        goto out;
4580
    }
4581

4582
    if (cli_req.dict.dict_len) {
4583
        dict = dict_new();
4584

4585
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
4586
                               &dict);
4587
        if (ret < 0) {
4588
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
4589
                   "failed to unserialize req-buffer to"
4590
                   " dictionary");
4591
            snprintf(err_str, sizeof(err_str),
4592
                     "unable to decode "
4593
                     "the command");
4594
            goto out;
4595
        }
4596

4597
    } else {
4598
        ret = -1;
4599
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_CLI_REQ_EMPTY,
4600
               "Empty cli request.");
4601
        goto out;
4602
    }
4603

4604
    ret = dict_get_str(dict, "volname", &volname);
4605
    if (ret) {
4606
        snprintf(err_str, sizeof(err_str),
4607
                 "Unable to get volume "
4608
                 "name");
4609
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4610
               "%s", err_str);
4611
        goto out;
4612
    }
4613

4614
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_CLRCLK_VOL_REQ_RCVD,
4615
           "Received clear-locks volume req "
4616
           "for volume %s",
4617
           volname);
4618

4619
    ret = glusterd_op_begin_synctask(req, GD_OP_CLEARLOCKS_VOLUME, dict);
4620

4621
out:
4622
    if (ret) {
4623
        if (err_str[0] == '\0')
4624
            snprintf(err_str, sizeof(err_str), "Operation failed");
4625
        ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
4626
    }
4627
    free(cli_req.dict.dict_val);
4628

4629
    return ret;
4630
}
4631

4632
static int
4633
glusterd_handle_cli_clearlocks_volume(rpcsvc_request_t *req)
4634
{
4635
    return glusterd_big_locked_handler(req,
4636
                                       __glusterd_handle_cli_clearlocks_volume);
4637
}
4638

4639
static int
4640
glusterd_volinfo_find_by_volume_id(uuid_t volume_id,
4641
                                   glusterd_volinfo_t **volinfo)
4642
{
4643
    int32_t ret = -1;
4644
    xlator_t *this = THIS;
4645
    glusterd_volinfo_t *voliter = NULL;
4646
    glusterd_conf_t *priv = NULL;
4647

4648
    if (!volume_id) {
4649
        gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
4650
        return -1;
4651
    }
4652

4653
    priv = this->private;
4654

4655
    cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
4656
    {
4657
        if (gf_uuid_compare(volume_id, voliter->volume_id))
4658
            continue;
4659
        *volinfo = voliter;
4660
        ret = 0;
4661
        gf_msg_debug(this->name, 0, "Volume %s found", voliter->volname);
4662
        break;
4663
    }
4664
    return ret;
4665
}
4666

4667
static int
4668
get_volinfo_from_brickid(char *brickid, glusterd_volinfo_t **volinfo)
4669
{
4670
    int ret = -1;
4671
    char *volid_str = NULL;
4672
    char *brick = NULL;
4673
    char *brickid_dup = NULL;
4674
    uuid_t volid = {0};
4675
    xlator_t *this = THIS;
4676

4677
    GF_ASSERT(brickid);
4678

4679
    brickid_dup = gf_strdup(brickid);
4680
    if (!brickid_dup)
4681
        goto out;
4682

4683
    volid_str = brickid_dup;
4684
    brick = strchr(brickid_dup, ':');
4685
    if (!brick) {
4686
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_NOT_FOUND,
4687
               "Invalid brickid");
4688
        goto out;
4689
    }
4690

4691
    *brick = '\0';
4692
    brick++;
4693
    gf_uuid_parse(volid_str, volid);
4694
    ret = glusterd_volinfo_find_by_volume_id(volid, volinfo);
4695
    if (ret) {
4696
        /* Check if it is a snapshot volume */
4697
        ret = glusterd_snap_volinfo_find_by_volume_id(volid, volinfo);
4698
        if (ret) {
4699
            gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_VOLINFO_GET_FAIL,
4700
                   "Failed to find volinfo");
4701
            goto out;
4702
        }
4703
    }
4704

4705
    ret = 0;
4706
out:
4707
    GF_FREE(brickid_dup);
4708
    return ret;
4709
}
4710

4711
static int
4712
__glusterd_handle_barrier(rpcsvc_request_t *req)
4713
{
4714
    int ret = -1;
4715
    xlator_t *this = THIS;
4716
    gf_cli_req cli_req = {{
4717
        0,
4718
    }};
4719
    dict_t *dict = NULL;
4720
    char *volname = NULL;
4721

4722
    GF_ASSERT(req);
4723

4724
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
4725
    if (ret < 0) {
4726
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
4727
               "Failed to decode "
4728
               "request received from cli");
4729
        req->rpc_err = GARBAGE_ARGS;
4730
        goto out;
4731
    }
4732

4733
    if (!cli_req.dict.dict_len) {
4734
        ret = -1;
4735
        goto out;
4736
    }
4737

4738
    dict = dict_new();
4739
    if (!dict) {
4740
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
4741
        ret = -1;
4742
        goto out;
4743
    }
4744
    ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
4745
    if (ret < 0) {
4746
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
4747
               "Failed to unserialize "
4748
               "request dictionary.");
4749
        goto out;
4750
    }
4751

4752
    ret = dict_get_str(dict, "volname", &volname);
4753
    if (ret) {
4754
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4755
               "Volname not present in "
4756
               "dict");
4757
        goto out;
4758
    }
4759
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BARRIER_VOL_REQ_RCVD,
4760
           "Received barrier volume request for "
4761
           "volume %s",
4762
           volname);
4763

4764
    ret = glusterd_op_begin_synctask(req, GD_OP_BARRIER, dict);
4765

4766
out:
4767
    if (ret) {
4768
        ret = glusterd_op_send_cli_response(GD_OP_BARRIER, ret, 0, req, dict,
4769
                                            "Operation failed");
4770
    }
4771
    free(cli_req.dict.dict_val);
4772
    return ret;
4773
}
4774

4775
int
4776
glusterd_handle_barrier(rpcsvc_request_t *req)
4777
{
4778
    return glusterd_big_locked_handler(req, __glusterd_handle_barrier);
4779
}
4780

4781
static gf_boolean_t
4782
gd_is_global_option(char *opt_key)
4783
{
4784
    GF_VALIDATE_OR_GOTO(THIS->name, opt_key, out);
4785

4786
    return (strcmp(opt_key, GLUSTERD_SHARED_STORAGE_KEY) == 0 ||
4787
            strcmp(opt_key, GLUSTERD_QUORUM_RATIO_KEY) == 0 ||
4788
            strcmp(opt_key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0 ||
4789
            strcmp(opt_key, GLUSTERD_BRICK_MULTIPLEX_KEY) == 0 ||
4790
            strcmp(opt_key, GLUSTERD_LOCALTIME_LOGGING_KEY) == 0 ||
4791
            strcmp(opt_key, GLUSTERD_DAEMON_LOG_LEVEL_KEY) == 0 ||
4792
            strcmp(opt_key, GLUSTERD_MAX_OP_VERSION_KEY) == 0 ||
4793
            strcmp(opt_key, GLUSTER_BRICK_GRACEFUL_CLEANUP) == 0);
4794

4795
out:
4796
    return _gf_false;
4797
}
4798

4799
int32_t
4800
glusterd_get_volume_opts(rpcsvc_request_t *req, dict_t *dict)
4801
{
4802
    int32_t ret = -1;
4803
    int32_t count = 1;
4804
    int exists = 0;
4805
    char *key = NULL;
4806
    char *orig_key = NULL;
4807
    char *key_fixed = NULL;
4808
    char *volname = NULL;
4809
    char *value = NULL;
4810
    char err_str[2048] = {
4811
        0,
4812
    };
4813
    char dict_key[50] = {
4814
        0,
4815
    };
4816
    int keylen;
4817
    xlator_t *this = THIS;
4818
    glusterd_conf_t *priv = NULL;
4819
    glusterd_volinfo_t *volinfo = NULL;
4820
    gf_cli_rsp rsp = {
4821
        0,
4822
    };
4823
    char op_version_buff[10] = {
4824
        0,
4825
    };
4826

4827
    priv = this->private;
4828
    GF_ASSERT(priv);
4829

4830
    GF_ASSERT(req);
4831
    GF_ASSERT(dict);
4832

4833
    ret = dict_get_str(dict, "volname", &volname);
4834
    if (ret) {
4835
        snprintf(err_str, sizeof(err_str),
4836
                 "Failed to get volume "
4837
                 "name while handling get volume option command");
4838
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAME_NOTFOUND_IN_DICT,
4839
               "%s", err_str);
4840
        goto out;
4841
    }
4842

4843
    if (strcasecmp(volname, "all") == 0) {
4844
        ret = glusterd_get_global_options_for_all_vols(req, dict,
4845
                                                       &rsp.op_errstr);
4846
        goto out;
4847
    }
4848

4849
    ret = dict_get_str(dict, "key", &key);
4850
    if (ret) {
4851
        snprintf(err_str, sizeof(err_str),
4852
                 "Failed to get key "
4853
                 "while handling get volume option for %s",
4854
                 volname);
4855
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
4856
               err_str);
4857
        goto out;
4858
    }
4859
    gf_msg_debug(this->name, 0,
4860
                 "Received get volume opt request for "
4861
                 "volume %s",
4862
                 volname);
4863

4864
    ret = glusterd_volinfo_find(volname, &volinfo);
4865
    if (ret) {
4866
        snprintf(err_str, sizeof(err_str), FMTSTR_CHECK_VOL_EXISTS, volname);
4867
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
4868
               FMTSTR_CHECK_VOL_EXISTS, volname);
4869
        goto out;
4870
    }
4871
    if (strcmp(key, "all")) {
4872
        if (fnmatch(GD_HOOKS_SPECIFIC_KEY, key, FNM_NOESCAPE) == 0) {
4873
            keylen = sprintf(dict_key, "key%d", count);
4874
            ret = dict_set_strn(dict, dict_key, keylen, key);
4875
            if (ret) {
4876
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4877
                       "Failed to "
4878
                       "set %s in dictionary",
4879
                       key);
4880
                goto out;
4881
            }
4882
            ret = dict_get_str(volinfo->dict, key, &value);
4883
            if (ret) {
4884
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4885
                       "Failed to "
4886
                       "get %s in dictionary",
4887
                       key);
4888
                goto out;
4889
            }
4890
            keylen = sprintf(dict_key, "value%d", count);
4891
            ret = dict_set_strn(dict, dict_key, keylen, value);
4892
            if (ret) {
4893
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4894
                       "Failed to "
4895
                       "set %s in dictionary",
4896
                       key);
4897
                goto out;
4898
            }
4899
        } else {
4900
            exists = glusterd_check_option_exists(key, &key_fixed);
4901
            if (!exists) {
4902
                snprintf(err_str, sizeof(err_str),
4903
                         "Option "
4904
                         "with name: %s does not exist",
4905
                         key);
4906
                gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_UNKNOWN_KEY,
4907
                       "%s", err_str);
4908
                if (key_fixed)
4909
                    snprintf(err_str + ret, sizeof(err_str) - ret,
4910
                             "Did you mean %s?", key_fixed);
4911
                ret = -1;
4912
                goto out;
4913
            }
4914
            if (key_fixed) {
4915
                orig_key = key;
4916
                key = key_fixed;
4917
            }
4918

4919
            if (gd_is_global_option(key)) {
4920
                char warn_str[] =
4921
                    "Warning: support to get \
4922
                                        global option value using volume get \
4923
                                        <volname>` will be deprecated from \
4924
                                        next release. Consider using `volume \
4925
                                        get all` instead for global options";
4926

4927
                ret = dict_set_str_sizen(dict, "warning", warn_str);
4928
                if (ret) {
4929
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4930
                           "Failed to set warning "
4931
                           "message in dictionary");
4932
                    goto out;
4933
                }
4934
            }
4935

4936
            if (strcmp(key, GLUSTERD_MAX_OP_VERSION_KEY) == 0) {
4937
                ret = glusterd_get_global_max_op_version(req, dict, 1);
4938
                if (ret)
4939
                    goto out;
4940
            } else if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
4941
                keylen = sprintf(dict_key, "key%d", count);
4942
                ret = dict_set_strn(dict, dict_key, keylen, key);
4943
                if (ret) {
4944
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4945
                           "Failed"
4946
                           "to set %s in dictionary",
4947
                           key);
4948
                    goto out;
4949
                }
4950
                keylen = sprintf(dict_key, "value%d", count);
4951
                sprintf(op_version_buff, "%d", priv->op_version);
4952
                ret = dict_set_strn(dict, dict_key, keylen, op_version_buff);
4953
                if (ret) {
4954
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4955
                           "Failed"
4956
                           " to set value for key %s in "
4957
                           "dictionary",
4958
                           key);
4959
                    goto out;
4960
                }
4961
            } else if (strcmp(key, "config.memory-accounting") == 0) {
4962
                keylen = sprintf(dict_key, "key%d", count);
4963
                ret = dict_set_strn(dict, dict_key, keylen, key);
4964
                if (ret) {
4965
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4966
                           "Failed"
4967
                           " to set %s in dictionary",
4968
                           key);
4969
                    goto out;
4970
                }
4971
                keylen = sprintf(dict_key, "value%d", count);
4972

4973
                if (volinfo->memory_accounting)
4974
                    ret = dict_set_nstrn(dict, dict_key, keylen, "Enabled",
4975
                                         SLEN("Enabled"));
4976
                else
4977
                    ret = dict_set_nstrn(dict, dict_key, keylen, "Disabled",
4978
                                         SLEN("Disabled"));
4979
                if (ret) {
4980
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4981
                           "Failed"
4982
                           " to set value for key %s in "
4983
                           "dictionary",
4984
                           key);
4985
                    goto out;
4986
                }
4987
            } else if (strcmp(key, "config.transport") == 0) {
4988
                keylen = sprintf(dict_key, "key%d", count);
4989
                ret = dict_set_strn(dict, dict_key, keylen, key);
4990
                if (ret) {
4991
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4992
                           "Failed to set %s in "
4993
                           "dictionary",
4994
                           key);
4995
                    goto out;
4996
                }
4997
                keylen = sprintf(dict_key, "value%d", count);
4998

4999
                if (volinfo->transport_type == GF_TRANSPORT_RDMA)
5000
                    ret = dict_set_nstrn(dict, dict_key, keylen, "rdma",
5001
                                         SLEN("rdma"));
5002
                else if (volinfo->transport_type == GF_TRANSPORT_TCP)
5003
                    ret = dict_set_nstrn(dict, dict_key, keylen, "tcp",
5004
                                         SLEN("tcp"));
5005
                else if (volinfo->transport_type == GF_TRANSPORT_BOTH_TCP_RDMA)
5006
                    ret = dict_set_nstrn(dict, dict_key, keylen, "tcp,rdma",
5007
                                         SLEN("tcp,rdma"));
5008
                else
5009
                    ret = dict_set_nstrn(dict, dict_key, keylen, "none",
5010
                                         SLEN("none"));
5011

5012
                if (ret) {
5013
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5014
                           "Failed to set value for key "
5015
                           "%s in dictionary",
5016
                           key);
5017
                    goto out;
5018
                }
5019
            } else {
5020
                keylen = sprintf(dict_key, "key%d", count);
5021
                ret = dict_set_strn(dict, dict_key, keylen, key);
5022
                if (ret) {
5023
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5024
                           "Failed to set %s in "
5025
                           "dictionary",
5026
                           key);
5027
                    goto out;
5028
                }
5029
                keylen = sprintf(dict_key, "value%d", count);
5030
                ret = dict_get_str(priv->opts, key, &value);
5031
                if (!ret) {
5032
                    ret = dict_set_strn(dict, dict_key, keylen, value);
5033
                    if (ret) {
5034
                        gf_msg(this->name, GF_LOG_ERROR, 0,
5035
                               GD_MSG_DICT_SET_FAILED,
5036
                               "Failed to set %s in "
5037
                               " dictionary",
5038
                               key);
5039
                        goto out;
5040
                    }
5041
                } else {
5042
                    ret = glusterd_get_default_val_for_volopt(
5043
                        dict, _gf_false, key, orig_key, volinfo,
5044
                        &rsp.op_errstr);
5045
                    if (ret && !rsp.op_errstr) {
5046
                        snprintf(err_str, sizeof(err_str),
5047
                                 "Failed to fetch the "
5048
                                 "value of %s, check "
5049
                                 "log file for more"
5050
                                 " details",
5051
                                 key);
5052
                    }
5053
                }
5054
            }
5055
        }
5056
        /* Request is for a single option, explicitly set count to 1
5057
         * in the dictionary.
5058
         */
5059
        ret = dict_set_int32_sizen(dict, "count", 1);
5060
        if (ret) {
5061
            gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5062
                   "Failed to set count "
5063
                   "value in the dictionary");
5064
            goto out;
5065
        }
5066
    } else {
5067
        /* Handle the "all" volume option request */
5068
        ret = glusterd_get_default_val_for_volopt(dict, _gf_true, NULL, NULL,
5069
                                                  volinfo, &rsp.op_errstr);
5070
        if (ret && !rsp.op_errstr) {
5071
            snprintf(err_str, sizeof(err_str),
5072
                     "Failed to fetch the value of all volume "
5073
                     "options, check log file for more details");
5074
        }
5075
    }
5076

5077
out:
5078
    if (ret) {
5079
        if (!rsp.op_errstr)
5080
            rsp.op_errstr = err_str;
5081
        rsp.op_ret = ret;
5082
    } else {
5083
        rsp.op_errstr = "";
5084
        rsp.op_ret = 0;
5085
    }
5086

5087
    ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
5088
                                      &rsp.dict.dict_len);
5089

5090
    glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
5091
    GF_FREE(rsp.dict.dict_val);
5092
    GF_FREE(key_fixed);
5093
    return ret;
5094
}
5095

5096
int
5097
__glusterd_handle_get_vol_opt(rpcsvc_request_t *req)
5098
{
5099
    int32_t ret = -1;
5100
    gf_cli_req cli_req = {{
5101
        0,
5102
    }};
5103
    dict_t *dict = NULL;
5104
    char err_str[64] = {
5105
        0,
5106
    };
5107
    xlator_t *this = THIS;
5108

5109
    GF_ASSERT(req);
5110

5111
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
5112
    if (ret < 0) {
5113
        snprintf(err_str, sizeof(err_str),
5114
                 "Failed to decode "
5115
                 "request received from cli");
5116
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
5117
               err_str);
5118
        req->rpc_err = GARBAGE_ARGS;
5119
        goto out;
5120
    }
5121

5122
    if (cli_req.dict.dict_len) {
5123
        /* Unserialize the dictionary */
5124
        dict = dict_new();
5125

5126
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
5127
                               &dict);
5128
        if (ret < 0) {
5129
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
5130
                   "failed to "
5131
                   "unserialize req-buffer to dictionary");
5132
            snprintf(err_str, sizeof(err_str),
5133
                     "Unable to decode "
5134
                     "the command");
5135
            goto out;
5136
        } else {
5137
            dict->extra_stdfree = cli_req.dict.dict_val;
5138
        }
5139
    }
5140
    ret = glusterd_get_volume_opts(req, dict);
5141

5142
out:
5143
    if (dict)
5144
        dict_unref(dict);
5145

5146
    return ret;
5147
}
5148

5149
int
5150
glusterd_handle_get_vol_opt(rpcsvc_request_t *req)
5151
{
5152
    return glusterd_big_locked_handler(req, __glusterd_handle_get_vol_opt);
5153
}
5154

5155
extern struct rpc_clnt_program gd_brick_prog;
5156

5157
static int
5158
glusterd_print_global_options(dict_t *opts, char *key, data_t *val, void *data)
5159
{
5160
    FILE *fp = NULL;
5161

5162
    GF_VALIDATE_OR_GOTO(THIS->name, key, out);
5163
    GF_VALIDATE_OR_GOTO(THIS->name, val, out);
5164
    GF_VALIDATE_OR_GOTO(THIS->name, data, out);
5165

5166
    if (strcmp(key, GLUSTERD_GLOBAL_OPT_VERSION) == 0)
5167
        goto out;
5168

5169
    fp = (FILE *)data;
5170
    fprintf(fp, "%s: %s\n", key, val->data);
5171
out:
5172
    return 0;
5173
}
5174

5175
static int
5176
glusterd_print_volume_options(dict_t *opts, char *key, data_t *val, void *data)
5177
{
5178
    FILE *fp = NULL;
5179

5180
    GF_VALIDATE_OR_GOTO(THIS->name, key, out);
5181
    GF_VALIDATE_OR_GOTO(THIS->name, val, out);
5182
    GF_VALIDATE_OR_GOTO(THIS->name, data, out);
5183

5184
    fp = (FILE *)data;
5185
    fprintf(fp, "Volume%d.options.%s: %s\n", volcount, key, val->data);
5186
out:
5187
    return 0;
5188
}
5189

5190
static int
5191
glusterd_print_gsync_status(FILE *fp, dict_t *gsync_dict)
5192
{
5193
    int ret = -1;
5194
    int gsync_count = 0;
5195
    int i = 0;
5196
    gf_gsync_status_t *status_vals = NULL;
5197
    char status_val_name[PATH_MAX] = {
5198
        0,
5199
    };
5200

5201
    GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
5202
    GF_VALIDATE_OR_GOTO(THIS->name, gsync_dict, out);
5203

5204
    ret = dict_get_int32(gsync_dict, "gsync-count", &gsync_count);
5205
    fprintf(fp, "Volume%d.gsync_count: %d\n", volcount, gsync_count);
5206

5207
    if (gsync_count == 0) {
5208
        ret = 0;
5209
        goto out;
5210
    }
5211

5212
    for (i = 0; i < gsync_count; i++) {
5213
        snprintf(status_val_name, sizeof(status_val_name), "status_value%d", i);
5214

5215
        ret = dict_get_bin(gsync_dict, status_val_name,
5216
                           (void **)&(status_vals));
5217
        if (ret)
5218
            goto out;
5219

5220
        fprintf(fp, "Volume%d.pair%d.session_secondary: %s\n", volcount, i + 1,
5221
                get_struct_variable(21, status_vals));
5222
        fprintf(fp, "Volume%d.pair%d.primary_node: %s\n", volcount, i + 1,
5223
                get_struct_variable(0, status_vals));
5224
        fprintf(fp, "Volume%d.pair%d.primary_volume: %s\n", volcount, i + 1,
5225
                get_struct_variable(1, status_vals));
5226
        fprintf(fp, "Volume%d.pair%d.primary_brick: %s\n", volcount, i + 1,
5227
                get_struct_variable(2, status_vals));
5228
        fprintf(fp, "Volume%d.pair%d.secondary_user: %s\n", volcount, i + 1,
5229
                get_struct_variable(3, status_vals));
5230
        fprintf(fp, "Volume%d.pair%d.secondary: %s\n", volcount, i + 1,
5231
                get_struct_variable(4, status_vals));
5232
        fprintf(fp, "Volume%d.pair%d.secondary_node: %s\n", volcount, i + 1,
5233
                get_struct_variable(5, status_vals));
5234
        fprintf(fp, "Volume%d.pair%d.status: %s\n", volcount, i + 1,
5235
                get_struct_variable(6, status_vals));
5236
        fprintf(fp, "Volume%d.pair%d.crawl_status: %s\n", volcount, i + 1,
5237
                get_struct_variable(7, status_vals));
5238
        fprintf(fp, "Volume%d.pair%d.last_synced: %s\n", volcount, i + 1,
5239
                get_struct_variable(8, status_vals));
5240
        fprintf(fp, "Volume%d.pair%d.entry: %s\n", volcount, i + 1,
5241
                get_struct_variable(9, status_vals));
5242
        fprintf(fp, "Volume%d.pair%d.data: %s\n", volcount, i + 1,
5243
                get_struct_variable(10, status_vals));
5244
        fprintf(fp, "Volume%d.pair%d.meta: %s\n", volcount, i + 1,
5245
                get_struct_variable(11, status_vals));
5246
        fprintf(fp, "Volume%d.pair%d.failures: %s\n", volcount, i + 1,
5247
                get_struct_variable(12, status_vals));
5248
        fprintf(fp, "Volume%d.pair%d.checkpoint_time: %s\n", volcount, i + 1,
5249
                get_struct_variable(13, status_vals));
5250
        fprintf(fp, "Volume%d.pair%d.checkpoint_completed: %s\n", volcount,
5251
                i + 1, get_struct_variable(14, status_vals));
5252
        fprintf(fp, "Volume%d.pair%d.checkpoint_completion_time: %s\n",
5253
                volcount, i + 1, get_struct_variable(15, status_vals));
5254
    }
5255
out:
5256
    return ret;
5257
}
5258

5259
static int
5260
glusterd_print_gsync_status_by_vol(FILE *fp, glusterd_volinfo_t *volinfo)
5261
{
5262
    int ret = -1;
5263
    dict_t *gsync_rsp_dict = NULL;
5264
    xlator_t *this = THIS;
5265

5266
    GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
5267
    GF_VALIDATE_OR_GOTO(this->name, fp, out);
5268

5269
    gsync_rsp_dict = dict_new();
5270
    if (!gsync_rsp_dict) {
5271
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
5272
        goto out;
5273
    }
5274

5275
    ret = glusterd_get_gsync_status_mst(volinfo, gsync_rsp_dict,
5276
                                        gf_gethostname());
5277
    /* Ignoring ret as above function always returns ret = 0 */
5278

5279
    ret = glusterd_print_gsync_status(fp, gsync_rsp_dict);
5280
out:
5281
    if (gsync_rsp_dict)
5282
        dict_unref(gsync_rsp_dict);
5283
    return ret;
5284
}
5285

5286
static int
5287
glusterd_print_snapinfo_by_vol(FILE *fp, glusterd_volinfo_t *volinfo,
5288
                               int volcount)
5289
{
5290
    int ret = -1;
5291
    glusterd_volinfo_t *snap_vol = NULL;
5292
    glusterd_volinfo_t *tmp_vol = NULL;
5293
    glusterd_snap_t *snapinfo = NULL;
5294
    int snapcount = 0;
5295
    char timestr[GF_TIMESTR_SIZE] = {
5296
        0,
5297
    };
5298
    char snap_status_str[STATUS_STRLEN] = {
5299
        0,
5300
    };
5301

5302
    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5303
    GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
5304

5305
    cds_list_for_each_entry_safe(snap_vol, tmp_vol, &volinfo->snap_volumes,
5306
                                 snapvol_list)
5307
    {
5308
        snapcount++;
5309
        snapinfo = snap_vol->snapshot;
5310

5311
        ret = glusterd_get_snap_status_str(snapinfo, snap_status_str);
5312
        if (ret) {
5313
            gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5314
                   "Failed to get status for snapshot: %s", snapinfo->snapname);
5315

5316
            goto out;
5317
        }
5318
        gf_time_fmt_FT(timestr, sizeof timestr, snapinfo->time_stamp);
5319

5320
        fprintf(fp, "Volume%d.snapshot%d.name: %s\n", volcount, snapcount,
5321
                snapinfo->snapname);
5322
        fprintf(fp, "Volume%d.snapshot%d.id: %s\n", volcount, snapcount,
5323
                uuid_utoa(snapinfo->snap_id));
5324
        fprintf(fp, "Volume%d.snapshot%d.time: %s\n", volcount, snapcount,
5325
                timestr);
5326

5327
        if (snapinfo->description)
5328
            fprintf(fp, "Volume%d.snapshot%d.description: %s\n", volcount,
5329
                    snapcount, snapinfo->description);
5330
        fprintf(fp, "Volume%d.snapshot%d.status: %s\n", volcount, snapcount,
5331
                snap_status_str);
5332
    }
5333

5334
    ret = 0;
5335
out:
5336
    return ret;
5337
}
5338

5339
static int
5340
glusterd_print_client_details(FILE *fp, dict_t *dict,
5341
                              glusterd_volinfo_t *volinfo, int volcount,
5342
                              glusterd_brickinfo_t *brickinfo, int brickcount)
5343
{
5344
    int ret = -1;
5345
    xlator_t *this = THIS;
5346
    int brick_index = -1;
5347
    int client_count = 0;
5348
    char key[64] = {
5349
        0,
5350
    };
5351
    int keylen;
5352
    char *clientname = NULL;
5353
    uint64_t bytesread = 0;
5354
    uint64_t byteswrite = 0;
5355
    uint32_t opversion = 0;
5356

5357
    glusterd_pending_node_t *pending_node = NULL;
5358
    rpc_clnt_t *rpc = NULL;
5359
    struct syncargs args = {
5360
        0,
5361
    };
5362
    gd1_mgmt_brick_op_req *brick_req = NULL;
5363

5364
    GF_VALIDATE_OR_GOTO(this->name, dict, out);
5365

5366
    if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
5367
        !glusterd_is_brick_started(brickinfo)) {
5368
        ret = 0;
5369
        goto out;
5370
    }
5371

5372
    brick_index++;
5373
    pending_node = GF_CALLOC(1, sizeof(*pending_node),
5374
                             gf_gld_mt_pending_node_t);
5375
    if (!pending_node) {
5376
        ret = -1;
5377
        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
5378
               "Unable to allocate memory");
5379
        goto out;
5380
    }
5381

5382
    pending_node->node = brickinfo;
5383
    pending_node->type = GD_NODE_BRICK;
5384
    pending_node->index = brick_index;
5385

5386
    rpc = glusterd_pending_node_get_rpc(pending_node);
5387
    if (!rpc) {
5388
        ret = -1;
5389
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RPC_FAILURE,
5390
               "Failed to retrieve rpc object");
5391
        goto out;
5392
    }
5393

5394
    brick_req = GF_CALLOC(1, sizeof(*brick_req), gf_gld_mt_mop_brick_req_t);
5395
    if (!brick_req) {
5396
        ret = -1;
5397
        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
5398
               "Unable to allocate memory");
5399
        goto out;
5400
    }
5401

5402
    brick_req->op = GLUSTERD_BRICK_STATUS;
5403
    brick_req->name = "";
5404
    brick_req->dict.dict_val = NULL;
5405
    brick_req->dict.dict_len = 0;
5406

5407
    ret = dict_set_str_sizen(dict, "brick-name", brickinfo->path);
5408
    if (ret) {
5409
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5410
                "Key=brick-name", NULL);
5411
        goto out;
5412
    }
5413

5414
    ret = dict_set_int32_sizen(dict, "cmd", GF_CLI_STATUS_CLIENTS);
5415
    if (ret) {
5416
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5417
                "Key=cmd", NULL);
5418
        goto out;
5419
    }
5420

5421
    ret = dict_set_str_sizen(dict, "volname", volinfo->volname);
5422
    if (ret) {
5423
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
5424
                "Key=volname", NULL);
5425
        goto out;
5426
    }
5427

5428
    ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
5429
                                      &brick_req->input.input_len);
5430
    if (ret)
5431
        goto out;
5432

5433
    GD_SYNCOP(rpc, (&args), NULL, gd_syncop_brick_op_cbk, brick_req,
5434
              &gd_brick_prog, brick_req->op, xdr_gd1_mgmt_brick_op_req);
5435

5436
    if (args.op_ret)
5437
        goto out;
5438

5439
    ret = dict_get_int32(args.dict, "clientcount", &client_count);
5440
    if (ret) {
5441
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5442
               "Couldn't get client count");
5443
        goto out;
5444
    }
5445

5446
    fprintf(fp, "Volume%d.Brick%d.client_count: %d\n", volcount, brickcount,
5447
            client_count);
5448

5449
    if (client_count == 0) {
5450
        ret = 0;
5451
        goto out;
5452
    }
5453

5454
    int i;
5455
    for (i = 1; i <= client_count; i++) {
5456
        keylen = snprintf(key, sizeof(key), "client%d.hostname", i - 1);
5457
        ret = dict_get_strn(args.dict, key, keylen, &clientname);
5458
        if (ret) {
5459
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5460
                   "Failed to get client hostname");
5461
            goto out;
5462
        }
5463

5464
        snprintf(key, sizeof(key), "Client%d.hostname", i);
5465
        fprintf(fp, "Volume%d.Brick%d.%s: %s\n", volcount, brickcount, key,
5466
                clientname);
5467

5468
        snprintf(key, sizeof(key), "client%d.bytesread", i - 1);
5469
        ret = dict_get_uint64(args.dict, key, &bytesread);
5470
        if (ret) {
5471
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5472
                   "Failed to get bytesread from client");
5473
            goto out;
5474
        }
5475

5476
        snprintf(key, sizeof(key), "Client%d.bytesread", i);
5477
        fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu64 "\n", volcount, brickcount,
5478
                key, bytesread);
5479

5480
        snprintf(key, sizeof(key), "client%d.byteswrite", i - 1);
5481
        ret = dict_get_uint64(args.dict, key, &byteswrite);
5482
        if (ret) {
5483
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5484
                   "Failed to get byteswrite from client");
5485
            goto out;
5486
        }
5487

5488
        snprintf(key, sizeof(key), "Client%d.byteswrite", i);
5489
        fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu64 "\n", volcount, brickcount,
5490
                key, byteswrite);
5491

5492
        snprintf(key, sizeof(key), "client%d.opversion", i - 1);
5493
        ret = dict_get_uint32(args.dict, key, &opversion);
5494
        if (ret) {
5495
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
5496
                   "Failed to get client opversion");
5497
            goto out;
5498
        }
5499

5500
        snprintf(key, sizeof(key), "Client%d.opversion", i);
5501
        fprintf(fp, "Volume%d.Brick%d.%s: %" PRIu32 "\n", volcount, brickcount,
5502
                key, opversion);
5503
    }
5504

5505
out:
5506
    if (pending_node)
5507
        GF_FREE(pending_node);
5508

5509
    if (brick_req) {
5510
        if (brick_req->input.input_val)
5511
            GF_FREE(brick_req->input.input_val);
5512
        GF_FREE(brick_req);
5513
    }
5514
    if (args.dict)
5515
        dict_unref(args.dict);
5516
    if (args.errstr)
5517
        GF_FREE(args.errstr);
5518

5519
    return ret;
5520
}
5521

5522
static int
5523
glusterd_volume_get_type_str(glusterd_volinfo_t *volinfo, char **voltype_str)
5524
{
5525
    int ret = -1;
5526
    int type = 0;
5527

5528
    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5529

5530
    type = get_vol_type(volinfo->type, volinfo->dist_leaf_count,
5531
                        volinfo->brick_count);
5532

5533
    *voltype_str = vol_type_str[type];
5534

5535
    ret = 0;
5536
out:
5537
    return ret;
5538
}
5539

5540
static int
5541
glusterd_volume_get_status_str(glusterd_volinfo_t *volinfo, char *status_str)
5542
{
5543
    int ret = -1;
5544

5545
    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5546
    GF_VALIDATE_OR_GOTO(THIS->name, status_str, out);
5547

5548
    switch (volinfo->status) {
5549
        case GLUSTERD_STATUS_NONE:
5550
            sprintf(status_str, "%s", "Created");
5551
            break;
5552
        case GLUSTERD_STATUS_STARTED:
5553
            sprintf(status_str, "%s", "Started");
5554
            break;
5555
        case GLUSTERD_STATUS_STOPPED:
5556
            sprintf(status_str, "%s", "Stopped");
5557
            break;
5558
        default:
5559
            goto out;
5560
    }
5561
    ret = 0;
5562
out:
5563
    return ret;
5564
}
5565

5566
static void
5567
glusterd_brick_get_status_str(glusterd_brickinfo_t *brickinfo, char *status_str)
5568
{
5569
    GF_VALIDATE_OR_GOTO(THIS->name, brickinfo, out);
5570
    GF_VALIDATE_OR_GOTO(THIS->name, status_str, out);
5571

5572
    switch (brickinfo->status) {
5573
        case GF_BRICK_STOPPED:
5574
            sprintf(status_str, "%s", "Stopped");
5575
            break;
5576
        case GF_BRICK_STARTED:
5577
            sprintf(status_str, "%s", "Started");
5578
            break;
5579
        case GF_BRICK_STARTING:
5580
            sprintf(status_str, "%s", "Starting");
5581
            break;
5582
        case GF_BRICK_STOPPING:
5583
            sprintf(status_str, "%s", "Stopping");
5584
            break;
5585
        default:
5586
            sprintf(status_str, "%s", "None");
5587
            break;
5588
    }
5589

5590
out:
5591
    return;
5592
}
5593

5594
static int
5595
glusterd_volume_get_transport_type_str(glusterd_volinfo_t *volinfo,
5596
                                       char *transport_type_str)
5597
{
5598
    int ret = -1;
5599

5600
    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5601
    GF_VALIDATE_OR_GOTO(THIS->name, transport_type_str, out);
5602

5603
    switch (volinfo->transport_type) {
5604
        case GF_TRANSPORT_TCP:
5605
            sprintf(transport_type_str, "%s", "tcp");
5606
            break;
5607
        case GF_TRANSPORT_RDMA:
5608
            sprintf(transport_type_str, "%s", "rdma");
5609
            break;
5610
        case GF_TRANSPORT_BOTH_TCP_RDMA:
5611
            sprintf(transport_type_str, "%s", "tcp_rdma_both");
5612
            break;
5613
        default:
5614
            goto out;
5615
    }
5616
    ret = 0;
5617
out:
5618
    return ret;
5619
}
5620

5621
static int
5622
glusterd_volume_get_quorum_status_str(glusterd_volinfo_t *volinfo,
5623
                                      char *quorum_status_str)
5624
{
5625
    int ret = -1;
5626

5627
    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5628
    GF_VALIDATE_OR_GOTO(THIS->name, quorum_status_str, out);
5629

5630
    switch (volinfo->quorum_status) {
5631
        case NOT_APPLICABLE_QUORUM:
5632
            sprintf(quorum_status_str, "%s", "not_applicable");
5633
            break;
5634
        case MEETS_QUORUM:
5635
            sprintf(quorum_status_str, "%s", "meets");
5636
            break;
5637
        case DOESNT_MEET_QUORUM:
5638
            sprintf(quorum_status_str, "%s", "does_not_meet");
5639
            break;
5640
        default:
5641
            goto out;
5642
    }
5643
    ret = 0;
5644
out:
5645
    return ret;
5646
}
5647

5648
static int
5649
glusterd_volume_get_rebalance_status_str(glusterd_volinfo_t *volinfo,
5650
                                         char *rebal_status_str)
5651
{
5652
    int ret = -1;
5653

5654
    GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
5655
    GF_VALIDATE_OR_GOTO(THIS->name, rebal_status_str, out);
5656

5657
    switch (volinfo->rebal.defrag_status) {
5658
        case GF_DEFRAG_STATUS_NOT_STARTED:
5659
            sprintf(rebal_status_str, "%s", "not_started");
5660
            break;
5661
        case GF_DEFRAG_STATUS_STARTED:
5662
            sprintf(rebal_status_str, "%s", "started");
5663
            break;
5664
        case GF_DEFRAG_STATUS_STOPPED:
5665
            sprintf(rebal_status_str, "%s", "stopped");
5666
            break;
5667
        case GF_DEFRAG_STATUS_COMPLETE:
5668
            sprintf(rebal_status_str, "%s", "completed");
5669
            break;
5670
        case GF_DEFRAG_STATUS_FAILED:
5671
            sprintf(rebal_status_str, "%s", "failed");
5672
            break;
5673
        case GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED:
5674
            sprintf(rebal_status_str, "%s", "layout_fix_started");
5675
            break;
5676
        case GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED:
5677
            sprintf(rebal_status_str, "%s", "layout_fix_stopped");
5678
            break;
5679
        case GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE:
5680
            sprintf(rebal_status_str, "%s", "layout_fix_complete");
5681
            break;
5682
        case GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED:
5683
            sprintf(rebal_status_str, "%s", "layout_fix_failed");
5684
            break;
5685
        default:
5686
            goto out;
5687
    }
5688
    ret = 0;
5689
out:
5690
    return ret;
5691
}
5692

5693
static int
5694
glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
5695
{
5696
    int32_t ret = -1;
5697
    gf_cli_rsp rsp = {
5698
        0,
5699
    };
5700
    FILE *fp = NULL;
5701
    DIR *dp = NULL;
5702
    char err_str[2048] = {
5703
        0,
5704
    };
5705
    glusterd_conf_t *priv = NULL;
5706
    glusterd_peerinfo_t *peerinfo = NULL;
5707
    glusterd_peer_hostname_t *peer_hostname_info = NULL;
5708
    glusterd_volinfo_t *volinfo = NULL;
5709
    glusterd_brickinfo_t *brickinfo = NULL;
5710
    xlator_t *this = THIS;
5711
    dict_t *vol_all_opts = NULL;
5712
    struct statvfs brickstat = {0};
5713
    char *odir = NULL;
5714
    char *filename = NULL;
5715
    char *ofilepath = NULL;
5716
    char *tmp_str = NULL;
5717
    int count = 0;
5718
    int count_bkp = 0;
5719
    int odirlen = 0;
5720
    time_t now = 0;
5721
    char timestamp[16] = {
5722
        0,
5723
    };
5724
    uint32_t get_state_cmd = 0;
5725
    uint64_t memtotal = 0;
5726
    uint64_t memfree = 0;
5727
    char id_str[64] = {
5728
        0,
5729
    };
5730

5731
    char *vol_type_str = NULL;
5732

5733
    char transport_type_str[STATUS_STRLEN] = {
5734
        0,
5735
    };
5736
    char quorum_status_str[STATUS_STRLEN] = {
5737
        0,
5738
    };
5739
    char rebal_status_str[STATUS_STRLEN] = {
5740
        0,
5741
    };
5742
    char vol_status_str[STATUS_STRLEN] = {
5743
        0,
5744
    };
5745
    char brick_status_str[STATUS_STRLEN] = {
5746
        0,
5747
    };
5748

5749
    priv = THIS->private;
5750
    GF_VALIDATE_OR_GOTO(this->name, priv, out);
5751

5752
    GF_VALIDATE_OR_GOTO(this->name, dict, out);
5753

5754
    ret = dict_get_str(dict, "odir", &tmp_str);
5755
    if (ret) {
5756
        odirlen = gf_asprintf(&odir, "%s", "/var/run/gluster/");
5757
        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
5758
               "Default output directory: %s", odir);
5759
    } else {
5760
        odirlen = gf_asprintf(&odir, "%s", tmp_str);
5761
    }
5762

5763
    dp = sys_opendir(odir);
5764
    if (dp) {
5765
        sys_closedir(dp);
5766
    } else {
5767
        if (errno == ENOENT) {
5768
            snprintf(err_str, sizeof(err_str),
5769
                     "Output directory %s does not exist.", odir);
5770
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
5771
                   err_str);
5772
        } else if (errno == ENOTDIR) {
5773
            snprintf(err_str, sizeof(err_str),
5774
                     "Output directory "
5775
                     "does not exist. %s points to a file.",
5776
                     odir);
5777
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
5778
                   err_str);
5779
        }
5780

5781
        GF_FREE(odir);
5782
        ret = -1;
5783
        goto out;
5784
    }
5785

5786
    ret = dict_get_str(dict, "filename", &tmp_str);
5787
    if (ret) {
5788
        now = gf_time();
5789
        strftime(timestamp, sizeof(timestamp), "%Y%m%d_%H%M%S",
5790
                 localtime(&now));
5791
        gf_asprintf(&filename, "%s_%s", "glusterd_state", timestamp);
5792

5793
        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
5794
               "Default filename: %s", filename);
5795
    } else {
5796
        gf_asprintf(&filename, "%s", tmp_str);
5797
    }
5798

5799
    ret = gf_asprintf(&ofilepath, "%s%s%s", odir,
5800
                      ((odir[odirlen - 1] != '/') ? "/" : ""), filename);
5801

5802
    if (ret < 0) {
5803
        GF_FREE(odir);
5804
        GF_FREE(filename);
5805
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5806
               "Unable to get the output path");
5807
        ret = -1;
5808
        goto out;
5809
    }
5810
    GF_FREE(odir);
5811
    GF_FREE(filename);
5812

5813
    ret = dict_set_dynstr_sizen(dict, "ofilepath", ofilepath);
5814
    if (ret) {
5815
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5816
               "Unable to set output path");
5817
        goto out;
5818
    }
5819

5820
    fp = fopen(ofilepath, "w");
5821
    if (!fp) {
5822
        snprintf(err_str, sizeof(err_str), "Failed to open file at %s",
5823
                 ofilepath);
5824
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED, "%s",
5825
               err_str);
5826
        ret = -1;
5827
        goto out;
5828
    }
5829

5830
    ret = dict_get_uint32(dict, "getstate-cmd", &get_state_cmd);
5831
    if (ret) {
5832
        gf_msg_debug(this->name, 0, "get-state command type not set");
5833
        ret = 0;
5834
    }
5835

5836
    if (get_state_cmd == GF_CLI_GET_STATE_VOLOPTS) {
5837
        fprintf(fp, "[Volume Options]\n");
5838
        cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
5839
        {
5840
            fprintf(fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
5841

5842
            volcount = count;
5843
            vol_all_opts = dict_new();
5844

5845
            ret = glusterd_get_default_val_for_volopt(
5846
                vol_all_opts, _gf_true, NULL, NULL, volinfo, &rsp.op_errstr);
5847
            if (ret) {
5848
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_OPTS_IMPORT_FAIL,
5849
                       "Failed to "
5850
                       "fetch the value of all volume options "
5851
                       "for volume %s",
5852
                       volinfo->volname);
5853
                if (vol_all_opts)
5854
                    dict_unref(vol_all_opts);
5855
                continue;
5856
            }
5857

5858
            dict_foreach(vol_all_opts, glusterd_print_volume_options, fp);
5859

5860
            if (vol_all_opts)
5861
                dict_unref(vol_all_opts);
5862
        }
5863
        ret = 0;
5864
        goto out;
5865
    }
5866

5867
    fprintf(fp, "[Global]\n");
5868

5869
    uuid_utoa_r(priv->uuid, id_str);
5870
    fprintf(fp, "MYUUID: %s\n", id_str);
5871

5872
    fprintf(fp, "op-version: %d\n", priv->op_version);
5873

5874
    fprintf(fp, "\n[Global options]\n");
5875

5876
    if (priv->opts)
5877
        dict_foreach(priv->opts, glusterd_print_global_options, fp);
5878

5879
    fprintf(fp, "\n[Peers]\n");
5880
    RCU_READ_LOCK;
5881

5882
    cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
5883
    {
5884
        fprintf(fp, "Peer%d.primary_hostname: %s\n", ++count,
5885
                peerinfo->hostname);
5886
        fprintf(fp, "Peer%d.uuid: %s\n", count, gd_peer_uuid_str(peerinfo));
5887
        fprintf(fp, "Peer%d.state: %s\n", count,
5888
                glusterd_friend_sm_state_name_get(peerinfo->state));
5889
        fprintf(fp, "Peer%d.connected: %s\n", count,
5890
                peerinfo->connected ? "Connected" : "Disconnected");
5891

5892
        fprintf(fp, "Peer%d.othernames: ", count);
5893
        count_bkp = 0;
5894
        cds_list_for_each_entry(peer_hostname_info, &peerinfo->hostnames,
5895
                                hostname_list)
5896
        {
5897
            if (strcmp(peerinfo->hostname, peer_hostname_info->hostname) == 0)
5898
                continue;
5899

5900
            if (count_bkp > 0)
5901
                fprintf(fp, ",");
5902

5903
            fprintf(fp, "%s", peer_hostname_info->hostname);
5904
            count_bkp++;
5905
        }
5906
        count_bkp = 0;
5907
        fprintf(fp, "\n");
5908
    }
5909
    RCU_READ_UNLOCK;
5910

5911
    count = 0;
5912
    fprintf(fp, "\n[Volumes]\n");
5913

5914
    cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
5915
    {
5916
        ret = glusterd_volume_get_type_str(volinfo, &vol_type_str);
5917
        if (ret) {
5918
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5919
                   "Failed to get type for volume: %s", volinfo->volname);
5920
            goto out;
5921
        }
5922

5923
        ret = glusterd_volume_get_status_str(volinfo, vol_status_str);
5924
        if (ret) {
5925
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5926
                   "Failed to get status for volume: %s", volinfo->volname);
5927
            goto out;
5928
        }
5929

5930
        ret = glusterd_volume_get_transport_type_str(volinfo,
5931
                                                     transport_type_str);
5932
        if (ret) {
5933
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5934
                   "Failed to get transport type for volume: %s",
5935
                   volinfo->volname);
5936
            goto out;
5937
        }
5938

5939
        ret = glusterd_volume_get_quorum_status_str(volinfo, quorum_status_str);
5940
        if (ret) {
5941
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5942
                   "Failed to get quorum status for volume: %s",
5943
                   volinfo->volname);
5944
            goto out;
5945
        }
5946

5947
        ret = glusterd_volume_get_rebalance_status_str(volinfo,
5948
                                                       rebal_status_str);
5949
        if (ret) {
5950
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
5951
                   "Failed to get rebalance status for volume: %s",
5952
                   volinfo->volname);
5953
            goto out;
5954
        }
5955

5956
        fprintf(fp, "Volume%d.name: %s\n", ++count, volinfo->volname);
5957

5958
        uuid_utoa_r(volinfo->volume_id, id_str);
5959
        fprintf(fp, "Volume%d.id: %s\n", count, id_str);
5960

5961
        fprintf(fp, "Volume%d.type: %s\n", count, vol_type_str);
5962
        fprintf(fp, "Volume%d.transport_type: %s\n", count, transport_type_str);
5963
        fprintf(fp, "Volume%d.status: %s\n", count, vol_status_str);
5964
        fprintf(fp, "Volume%d.profile_enabled: %d\n", count,
5965
                glusterd_is_profile_on(volinfo));
5966
        fprintf(fp, "Volume%d.brickcount: %d\n", count, volinfo->brick_count);
5967

5968
        count_bkp = count;
5969
        count = 0;
5970
        cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
5971
        {
5972
            fprintf(fp, "Volume%d.Brick%d.path: %s:%s\n", count_bkp, ++count,
5973
                    brickinfo->hostname, brickinfo->path);
5974
            fprintf(fp, "Volume%d.Brick%d.hostname: %s\n", count_bkp, count,
5975
                    brickinfo->hostname);
5976
            /* Determine which one is the arbiter brick */
5977
            if (volinfo->arbiter_count == 1) {
5978
                if (count % volinfo->replica_count == 0) {
5979
                    fprintf(fp,
5980
                            "Volume%d.Brick%d."
5981
                            "is_arbiter: 1\n",
5982
                            count_bkp, count);
5983
                }
5984
            }
5985
            /* Add following information only for bricks
5986
             *  local to current node */
5987
            if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
5988
                continue;
5989
            fprintf(fp, "Volume%d.Brick%d.port: %d\n", count_bkp, count,
5990
                    brickinfo->port);
5991
            fprintf(fp, "Volume%d.Brick%d.rdma_port: %d\n", count_bkp, count,
5992
                    brickinfo->rdma_port);
5993
            fprintf(fp, "Volume%d.Brick%d.port_registered: %d\n", count_bkp,
5994
                    count, brickinfo->port_registered);
5995
            glusterd_brick_get_status_str(brickinfo, brick_status_str);
5996
            fprintf(fp, "Volume%d.Brick%d.status: %s\n", count_bkp, count,
5997
                    brick_status_str);
5998

5999
            ret = sys_statvfs(brickinfo->path, &brickstat);
6000
            if (ret) {
6001
                gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
6002
                       "statfs error: %s ", strerror(errno));
6003
                memfree = 0;
6004
                memtotal = 0;
6005
            } else {
6006
                memfree = brickstat.f_bfree * brickstat.f_bsize;
6007
                memtotal = brickstat.f_blocks * brickstat.f_bsize;
6008
            }
6009

6010
            fprintf(fp, "Volume%d.Brick%d.spacefree: %" PRIu64 "Bytes\n",
6011
                    count_bkp, count, memfree);
6012
            fprintf(fp, "Volume%d.Brick%d.spacetotal: %" PRIu64 "Bytes\n",
6013
                    count_bkp, count, memtotal);
6014

6015
            if (get_state_cmd != GF_CLI_GET_STATE_DETAIL)
6016
                continue;
6017

6018
            ret = glusterd_print_client_details(fp, dict, volinfo, count_bkp,
6019
                                                brickinfo, count);
6020
            if (ret) {
6021
                gf_msg(this->name, GF_LOG_ERROR, 0,
6022
                       GD_MSG_CLIENTS_GET_STATE_FAILED,
6023
                       "Failed to get client details");
6024
                goto out;
6025
            }
6026
        }
6027

6028
        count = count_bkp;
6029

6030
        ret = glusterd_print_snapinfo_by_vol(fp, volinfo, count);
6031
        if (ret)
6032
            goto out;
6033

6034
        fprintf(fp, "Volume%d.snap_count: %" PRIu64 "\n", count,
6035
                volinfo->snap_count);
6036
        fprintf(fp, "Volume%d.stripe_count: %d\n", count, STRIPE_COUNT);
6037
        fprintf(fp, "Volume%d.replica_count: %d\n", count,
6038
                volinfo->replica_count);
6039
        fprintf(fp, "Volume%d.subvol_count: %d\n", count,
6040
                volinfo->subvol_count);
6041
        fprintf(fp, "Volume%d.arbiter_count: %d\n", count,
6042
                volinfo->arbiter_count);
6043
        fprintf(fp, "Volume%d.disperse_count: %d\n", count,
6044
                volinfo->disperse_count);
6045
        fprintf(fp, "Volume%d.redundancy_count: %d\n", count,
6046
                volinfo->redundancy_count);
6047
        fprintf(fp, "Volume%d.quorum_status: %s\n", count, quorum_status_str);
6048

6049
        fprintf(fp, "Volume%d.snapd_svc.online_status: %s\n", count,
6050
                volinfo->snapd.svc.online ? "Online" : "Offline");
6051
        fprintf(fp, "Volume%d.snapd_svc.inited: %s\n", count,
6052
                volinfo->snapd.svc.inited ? "True" : "False");
6053

6054
        uuid_utoa_r(volinfo->rebal.rebalance_id, id_str);
6055
        char *rebal_data = gf_uint64_2human_readable(
6056
            volinfo->rebal.rebalance_data);
6057

6058
        fprintf(fp, "Volume%d.rebalance.id: %s\n", count, id_str);
6059
        fprintf(fp, "Volume%d.rebalance.status: %s\n", count, rebal_status_str);
6060
        fprintf(fp, "Volume%d.rebalance.failures: %" PRIu64 "\n", count,
6061
                volinfo->rebal.rebalance_failures);
6062
        fprintf(fp, "Volume%d.rebalance.skipped: %" PRIu64 "\n", count,
6063
                volinfo->rebal.skipped_files);
6064
        fprintf(fp, "Volume%d.rebalance.lookedup: %" PRIu64 "\n", count,
6065
                volinfo->rebal.lookedup_files);
6066
        fprintf(fp, "Volume%d.rebalance.files: %" PRIu64 "\n", count,
6067
                volinfo->rebal.rebalance_files);
6068
        fprintf(fp, "Volume%d.rebalance.data: %s\n", count, rebal_data);
6069
        fprintf(fp, "Volume%d.time_left: %ld\n", count,
6070
                volinfo->rebal.time_left);
6071

6072
        GF_FREE(rebal_data);
6073

6074
        fprintf(fp, "Volume%d.shd_svc.online_status: %s\n", count,
6075
                volinfo->shd.svc.online ? "Online" : "Offline");
6076
        fprintf(fp, "Volume%d.shd_svc.inited: %s\n", count,
6077
                volinfo->shd.svc.inited ? "True" : "False");
6078

6079
        if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
6080
            fprintf(fp, "Volume%d.replace_brick.src: %s:%s\n", count,
6081
                    volinfo->rep_brick.src_brick->hostname,
6082
                    volinfo->rep_brick.src_brick->path);
6083
            fprintf(fp, "Volume%d.replace_brick.dest: %s:%s\n", count,
6084
                    volinfo->rep_brick.dst_brick->hostname,
6085
                    volinfo->rep_brick.dst_brick->path);
6086
        }
6087

6088
        volcount = count;
6089
        ret = glusterd_print_gsync_status_by_vol(fp, volinfo);
6090
        if (ret)
6091
            goto out;
6092

6093
        if (volinfo->dict)
6094
            dict_foreach(volinfo->dict, glusterd_print_volume_options, fp);
6095

6096
        fprintf(fp, "\n");
6097
    }
6098

6099
    count = 0;
6100

6101
    fprintf(fp, "\n[Services]\n");
6102
#ifdef BUILD_GNFS
6103
    if (priv->nfs_svc.inited) {
6104
        fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
6105
        fprintf(fp, "svc%d.online_status: %s\n\n", count,
6106
                priv->nfs_svc.online ? "Online" : "Offline");
6107
    }
6108
#endif
6109
    if (priv->bitd_svc.inited) {
6110
        fprintf(fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
6111
        fprintf(fp, "svc%d.online_status: %s\n\n", count,
6112
                priv->bitd_svc.online ? "Online" : "Offline");
6113
    }
6114

6115
    if (priv->scrub_svc.inited) {
6116
        fprintf(fp, "svc%d.name: %s\n", ++count, priv->scrub_svc.name);
6117
        fprintf(fp, "svc%d.online_status: %s\n\n", count,
6118
                priv->scrub_svc.online ? "Online" : "Offline");
6119
    }
6120

6121
    if (priv->quotad_svc.inited) {
6122
        fprintf(fp, "svc%d.name: %s\n", ++count, priv->quotad_svc.name);
6123
        fprintf(fp, "svc%d.online_status: %s\n\n", count,
6124
                priv->quotad_svc.online ? "Online" : "Offline");
6125
    }
6126

6127
    fprintf(fp, "\n[Misc]\n");
6128
    if (priv->pmap) {
6129
        fprintf(fp, "Base port: %d\n", priv->pmap->base_port);
6130
    }
6131
out:
6132

6133
    if (fp)
6134
        fclose(fp);
6135

6136
    rsp.op_ret = ret;
6137
    if (rsp.op_errstr == NULL)
6138
        rsp.op_errstr = err_str;
6139

6140
    ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
6141
                                      &rsp.dict.dict_len);
6142
    glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
6143
    GF_FREE(rsp.dict.dict_val);
6144

6145
    return ret;
6146
}
6147

6148
static int
6149
__glusterd_handle_get_state(rpcsvc_request_t *req)
6150
{
6151
    int32_t ret = -1;
6152
    gf_cli_req cli_req = {
6153
        {
6154
            0,
6155
        },
6156
    };
6157
    dict_t *dict = NULL;
6158
    char err_str[64] = {
6159
        0,
6160
    };
6161
    xlator_t *this = THIS;
6162

6163
    GF_VALIDATE_OR_GOTO(this->name, req, out);
6164

6165
    gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DAEMON_STATE_REQ_RCVD,
6166
           "Received request to get state for glusterd");
6167

6168
    ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
6169
    if (ret < 0) {
6170
        snprintf(err_str, sizeof(err_str),
6171
                 "Failed to decode "
6172
                 "request received from cli");
6173
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
6174
               err_str);
6175
        req->rpc_err = GARBAGE_ARGS;
6176
        goto out;
6177
    }
6178

6179
    if (cli_req.dict.dict_len) {
6180
        /* Unserialize the dictionary */
6181
        dict = dict_new();
6182

6183
        ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
6184
                               &dict);
6185
        if (ret < 0) {
6186
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
6187
                   "failed to "
6188
                   "unserialize req-buffer to dictionary");
6189
            snprintf(err_str, sizeof(err_str),
6190
                     "Unable to decode"
6191
                     " the command");
6192
            goto out;
6193
        } else {
6194
            dict->extra_stdfree = cli_req.dict.dict_val;
6195
        }
6196
    }
6197

6198
    ret = glusterd_get_state(req, dict);
6199

6200
out:
6201
    if (dict && ret) {
6202
        /*
6203
         * When glusterd_to_cli (called from glusterd_get_state)
6204
         * succeeds, it frees the dict for us, so this would be a
6205
         * double free, but in other cases it's our responsibility.
6206
         */
6207
        dict_unref(dict);
6208
    }
6209
    return ret;
6210
}
6211

6212
int
6213
glusterd_handle_get_state(rpcsvc_request_t *req)
6214
{
6215
    return glusterd_big_locked_handler(req, __glusterd_handle_get_state);
6216
}
6217

6218
static int
6219
get_brickinfo_from_brickid(char *brickid, glusterd_brickinfo_t **brickinfo)
6220
{
6221
    glusterd_volinfo_t *volinfo = NULL;
6222
    char *volid_str = NULL;
6223
    char *brick = NULL;
6224
    char *brickid_dup = NULL;
6225
    uuid_t volid = {0};
6226
    int ret = -1;
6227

6228
    xlator_t *this = THIS;
6229

6230
    brickid_dup = gf_strdup(brickid);
6231
    if (!brickid_dup) {
6232
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
6233
                "brick_id=%s", brickid, NULL);
6234
        goto out;
6235
    }
6236

6237
    volid_str = brickid_dup;
6238
    brick = strchr(brickid_dup, ':');
6239
    if (!volid_str) {
6240
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
6241
        goto out;
6242
    }
6243

6244
    if (!brick) {
6245
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
6246
        goto out;
6247
    }
6248

6249
    *brick = '\0';
6250
    brick++;
6251
    gf_uuid_parse(volid_str, volid);
6252
    ret = glusterd_volinfo_find_by_volume_id(volid, &volinfo);
6253
    if (ret) {
6254
        /* Check if it a snapshot volume */
6255
        ret = glusterd_snap_volinfo_find_by_volume_id(volid, &volinfo);
6256
        if (ret)
6257
            goto out;
6258
    }
6259

6260
    ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, brickinfo,
6261
                                                 _gf_false);
6262
    if (ret)
6263
        goto out;
6264

6265
    ret = 0;
6266
out:
6267
    GF_FREE(brickid_dup);
6268
    return ret;
6269
}
6270

6271
static int gd_stale_rpc_disconnect_log;
6272

6273
int
6274
__glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6275
                            rpc_clnt_event_t event, void *data)
6276
{
6277
    char *brickid = NULL;
6278
    int ret = 0;
6279
    glusterd_conf_t *conf = NULL;
6280
    glusterd_brickinfo_t *brickinfo = NULL;
6281
    glusterd_volinfo_t *volinfo = NULL;
6282
    xlator_t *this = THIS;
6283
    int32_t pid = -1;
6284
    glusterd_brickinfo_t *brickinfo_tmp = NULL;
6285
    glusterd_brick_proc_t *brick_proc = NULL;
6286
    char pidfile[PATH_MAX] = {0};
6287
    char *brickpath = NULL;
6288
    gf_boolean_t is_service_running = _gf_true;
6289

6290
    brickid = mydata;
6291
    if (!brickid)
6292
        return 0;
6293

6294
    ret = get_brickinfo_from_brickid(brickid, &brickinfo);
6295
    if (ret)
6296
        return 0;
6297

6298
    conf = this->private;
6299
    GF_ASSERT(conf);
6300

6301
    switch (event) {
6302
        case RPC_CLNT_CONNECT:
6303
            ret = get_volinfo_from_brickid(brickid, &volinfo);
6304
            if (ret) {
6305
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
6306
                       "Failed to get volinfo from "
6307
                       "brickid(%s)",
6308
                       brickid);
6309
                goto out;
6310
            }
6311
            /* If a node on coming back up, already starts a brick
6312
             * before the handshake, and the notification comes after
6313
             * the handshake is done, then we need to check if this
6314
             * is a restored brick with a snapshot pending. If so, we
6315
             * need to stop the brick
6316
             */
6317
            if (brickinfo->snap_status == -1) {
6318
                gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_SNAPSHOT_PENDING,
6319
                       "Snapshot is pending on %s:%s. "
6320
                       "Hence not starting the brick",
6321
                       brickinfo->hostname, brickinfo->path);
6322
                ret = glusterd_brick_stop(volinfo, brickinfo, _gf_false);
6323
                if (ret) {
6324
                    gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_STOP_FAIL,
6325
                           "Unable to stop %s:%s", brickinfo->hostname,
6326
                           brickinfo->path);
6327
                    goto out;
6328
                }
6329

6330
                break;
6331
            }
6332
            gf_msg_debug(this->name, 0, "Connected to %s:%s",
6333
                         brickinfo->hostname, brickinfo->path);
6334

6335
            glusterd_set_brick_status(brickinfo, GF_BRICK_STARTED);
6336

6337
            gf_event(EVENT_BRICK_CONNECTED, "peer=%s;volume=%s;brick=%s",
6338
                     brickinfo->hostname, volinfo->volname, brickinfo->path);
6339

6340
            ret = default_notify(this, GF_EVENT_CHILD_UP, NULL);
6341

6342
            break;
6343

6344
        case RPC_CLNT_DISCONNECT:
6345
            if (rpc != brickinfo->rpc) {
6346
                /*
6347
                 * There used to be a bunch of races in the volume
6348
                 * start/stop code that could result in us getting here
6349
                 * and setting the brick status incorrectly.  Many of
6350
                 * those have been fixed or avoided, but just in case
6351
                 * any are still left it doesn't hurt to keep the extra
6352
                 * check and avoid further damage.
6353
                 */
6354
                GF_LOG_OCCASIONALLY(gd_stale_rpc_disconnect_log, this->name,
6355
                                    GF_LOG_WARNING,
6356
                                    "got disconnect from stale rpc on "
6357
                                    "%s",
6358
                                    brickinfo->path);
6359
                break;
6360
            }
6361
            if (glusterd_is_brick_started(brickinfo)) {
6362
                gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_BRICK_DISCONNECTED,
6363
                       "Brick %s:%s has disconnected from glusterd.",
6364
                       brickinfo->hostname, brickinfo->path);
6365

6366
                ret = get_volinfo_from_brickid(brickid, &volinfo);
6367
                if (ret) {
6368
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL,
6369
                           "Failed to get volinfo from "
6370
                           "brickid(%s)",
6371
                           brickid);
6372
                    goto out;
6373
                }
6374
                gf_event(EVENT_BRICK_DISCONNECTED, "peer=%s;volume=%s;brick=%s",
6375
                         brickinfo->hostname, volinfo->volname,
6376
                         brickinfo->path);
6377
                /* In case of an abrupt shutdown of a brick PMAP_SIGNOUT
6378
                 * event is not received by glusterd which can lead to a
6379
                 * stale port entry in glusterd, so forcibly clean up
6380
                 * the same if the process is not running sometime
6381
                 * gf_is_service_running true so to ensure about brick instance
6382
                 * call search_brick_path_from_proc
6383
                 */
6384
                GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo, conf);
6385
                is_service_running = gf_is_service_running(pidfile, &pid);
6386
                if (pid > 0)
6387
                    brickpath = search_brick_path_from_proc(pid,
6388
                                                            brickinfo->path);
6389
                if (!is_service_running || !brickpath) {
6390
                    ret = pmap_port_remove(this, brickinfo->port,
6391
                                           brickinfo->path, NULL, _gf_true);
6392
                    if (ret) {
6393
                        gf_msg(this->name, GF_LOG_WARNING,
6394
                               GD_MSG_PMAP_REGISTRY_REMOVE_FAIL, 0,
6395
                               "Failed to remove pmap "
6396
                               "registry for port %d for "
6397
                               "brick %s",
6398
                               brickinfo->port, brickinfo->path);
6399
                        ret = 0;
6400
                    }
6401
                }
6402
            }
6403

6404
            if (brickpath)
6405
                GF_FREE(brickpath);
6406

6407
            if (is_brick_mx_enabled() && glusterd_is_brick_started(brickinfo)) {
6408
                brick_proc = brickinfo->brick_proc;
6409
                if (!brick_proc)
6410
                    break;
6411
                cds_list_for_each_entry(brickinfo_tmp, &brick_proc->bricks,
6412
                                        mux_bricks)
6413
                {
6414
                    glusterd_set_brick_status(brickinfo_tmp, GF_BRICK_STOPPED);
6415
                    brickinfo_tmp->start_triggered = _gf_false;
6416
                    /* When bricks are stopped, ports also need to
6417
                     * be cleaned up
6418
                     */
6419
                    pmap_port_remove(this, brickinfo_tmp->port,
6420
                                     brickinfo_tmp->path, NULL, _gf_true);
6421
                }
6422
            } else {
6423
                glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPED);
6424
                brickinfo->start_triggered = _gf_false;
6425
            }
6426
            break;
6427

6428
        case RPC_CLNT_DESTROY:
6429
            GF_FREE(mydata);
6430
            mydata = NULL;
6431
            break;
6432
        default:
6433
            gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
6434
            break;
6435
    }
6436

6437
out:
6438
    return ret;
6439
}
6440

6441
int
6442
glusterd_brick_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6443
                          rpc_clnt_event_t event, void *data)
6444
{
6445
    return glusterd_big_locked_notify(rpc, mydata, event, data,
6446
                                      __glusterd_brick_rpc_notify);
6447
}
6448

6449
int
6450
glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
6451
{
6452
    int ret = -1;
6453
    glusterd_friend_sm_event_t *new_event = NULL;
6454
    glusterd_peerinfo_t *peerinfo = NULL;
6455
    rpcsvc_request_t *req = NULL;
6456
    char *errstr = NULL;
6457
    dict_t *dict = NULL;
6458

6459
    GF_ASSERT(peerctx);
6460

6461
    RCU_READ_LOCK;
6462
    peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
6463
    if (!peerinfo) {
6464
        gf_msg_debug(THIS->name, 0,
6465
                     "Could not find peer %s(%s). "
6466
                     "Peer could have been deleted.",
6467
                     peerctx->peername, uuid_utoa(peerctx->peerid));
6468
        ret = 0;
6469
        goto out;
6470
    }
6471

6472
    req = peerctx->args.req;
6473
    dict = peerctx->args.dict;
6474
    errstr = peerctx->errstr;
6475

6476
    ret = glusterd_friend_sm_new_event(GD_FRIEND_EVENT_REMOVE_FRIEND,
6477
                                       &new_event);
6478
    if (!ret) {
6479
        if (!req) {
6480
            gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_EVENT_NEW_GET_FAIL,
6481
                   "Unable to find the request for responding "
6482
                   "to User (%s)",
6483
                   peerinfo->hostname);
6484
            goto out;
6485
        }
6486

6487
        glusterd_xfer_cli_probe_resp(req, -1, op_errno, errstr,
6488
                                     peerinfo->hostname, peerinfo->port, dict);
6489

6490
        new_event->peername = gf_strdup(peerinfo->hostname);
6491
        gf_uuid_copy(new_event->peerid, peerinfo->uuid);
6492
        ret = glusterd_friend_sm_inject_event(new_event);
6493

6494
    } else {
6495
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
6496
               "Unable to create event for removing peer %s",
6497
               peerinfo->hostname);
6498
    }
6499

6500
out:
6501
    RCU_READ_UNLOCK;
6502
    return ret;
6503
}
6504

6505
static int
6506
__glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6507
                           rpc_clnt_event_t event, void *data)
6508
{
6509
    xlator_t *this = NULL;
6510
    glusterd_conf_t *conf = NULL;
6511
    int ret = 0;
6512
    int32_t op_errno = ENOTCONN;
6513
    glusterd_peerinfo_t *peerinfo = NULL;
6514
    glusterd_peerctx_t *peerctx = NULL;
6515
    gf_boolean_t quorum_action = _gf_false;
6516
    glusterd_volinfo_t *volinfo = NULL;
6517
    glusterfs_ctx_t *ctx = NULL;
6518

6519
    peerctx = mydata;
6520
    if (!peerctx)
6521
        return 0;
6522

6523
    switch (event) {
6524
        case RPC_CLNT_DESTROY:
6525
            GF_FREE(peerctx->errstr);
6526
            GF_FREE(peerctx->peername);
6527
            GF_FREE(peerctx);
6528
            return 0;
6529
        case RPC_CLNT_PING:
6530
            return 0;
6531
        default:
6532
            break;
6533
    }
6534

6535
    this = THIS;
6536
    conf = this->private;
6537
    ctx = this->ctx;
6538
    GF_VALIDATE_OR_GOTO(this->name, ctx, out);
6539
    if (ctx->cleanup_started) {
6540
        gf_log(this->name, GF_LOG_INFO,
6541
               "glusterd already received a SIGTERM, "
6542
               "dropping the event %d for peer %s",
6543
               event, peerctx->peername);
6544
        return 0;
6545
    }
6546
    RCU_READ_LOCK;
6547

6548
    peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
6549
    if (!peerinfo) {
6550
        /* Peerinfo should be available at this point if its a connect
6551
         * event. Not finding it means that something terrible has
6552
         * happened. For non-connect event we might end up having a null
6553
         * peerinfo, so log at debug level.
6554
         */
6555
        gf_msg(THIS->name,
6556
               (RPC_CLNT_CONNECT == event) ? GF_LOG_CRITICAL : GF_LOG_DEBUG,
6557
               ENOENT, GD_MSG_PEER_NOT_FOUND,
6558
               "Could not find peer "
6559
               "%s(%s)",
6560
               peerctx->peername, uuid_utoa(peerctx->peerid));
6561

6562
        if (RPC_CLNT_CONNECT == event) {
6563
            gf_event(EVENT_PEER_NOT_FOUND, "peer=%s;uuid=%s", peerctx->peername,
6564
                     uuid_utoa(peerctx->peerid));
6565
        }
6566
        ret = -1;
6567
        goto out;
6568
    }
6569

6570
    switch (event) {
6571
        case RPC_CLNT_CONNECT: {
6572
            gf_msg_debug(this->name, 0, "got RPC_CLNT_CONNECT");
6573
            peerinfo->connected = 1;
6574
            peerinfo->quorum_action = _gf_true;
6575
            peerinfo->generation = uatomic_add_return(&conf->generation, 1);
6576
            peerctx->peerinfo_gen = peerinfo->generation;
6577
            /* EVENT_PEER_CONNECT will only be sent if peerctx->uuid is not
6578
             * NULL, otherwise it indicates this RPC_CLNT_CONNECT is from a
6579
             * peer probe trigger and given we already generate an event for
6580
             * peer probe this would be unnecessary.
6581
             */
6582
            if (!gf_uuid_is_null(peerinfo->uuid)) {
6583
                gf_event(EVENT_PEER_CONNECT, "host=%s;uuid=%s",
6584
                         peerinfo->hostname, uuid_utoa(peerinfo->uuid));
6585
            }
6586
            ret = glusterd_peer_dump_version(this, rpc, peerctx);
6587
            if (ret)
6588
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDSHAKE_FAILED,
6589
                       "glusterd handshake failed");
6590
            break;
6591
        }
6592

6593
        case RPC_CLNT_DISCONNECT: {
6594
            /* If DISCONNECT event is already processed, skip the further
6595
             * ones
6596
             */
6597
            if (rpc_clnt_connection_status(&rpc->conn) ==
6598
                RPC_STATUS_DISCONNECTED)
6599
                break;
6600

6601
            gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_PEER_DISCONNECTED,
6602
                   "Peer <%s> (<%s>), in state <%s>, has disconnected "
6603
                   "from glusterd.",
6604
                   peerinfo->hostname, uuid_utoa(peerinfo->uuid),
6605
                   glusterd_friend_sm_state_name_get(peerinfo->state));
6606
            gf_event(EVENT_PEER_DISCONNECT, "peer=%s;uuid=%s;state=%s",
6607
                     peerinfo->hostname, uuid_utoa(peerinfo->uuid),
6608
                     glusterd_friend_sm_state_name_get(peerinfo->state));
6609

6610
            if (peerinfo->connected) {
6611
                cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
6612
                {
6613
                    ret = glusterd_mgmt_v3_unlock(volinfo->volname,
6614
                                                  peerinfo->uuid, "vol");
6615
                    if (ret)
6616
                        gf_msg(this->name, GF_LOG_WARNING, 0,
6617
                               GD_MSG_MGMTV3_UNLOCK_FAIL,
6618
                               "Lock not released "
6619
                               "for %s",
6620
                               volinfo->volname);
6621
                }
6622

6623
                op_errno = GF_PROBE_ANOTHER_CLUSTER;
6624
                ret = 0;
6625
            }
6626

6627
            if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
6628
                (peerinfo->state == GD_FRIEND_STATE_BEFRIENDED)) {
6629
                peerinfo->quorum_contrib = QUORUM_DOWN;
6630
                quorum_action = _gf_true;
6631
                peerinfo->quorum_action = _gf_false;
6632
            }
6633

6634
            /* Remove peer if it is not a friend and connection/handshake
6635
             *  fails, and notify cli. Happens only during probe.
6636
             */
6637
            if (peerinfo->state == GD_FRIEND_STATE_DEFAULT) {
6638
                glusterd_friend_remove_notify(peerctx, op_errno);
6639
                goto out;
6640
            }
6641

6642
            peerinfo->connected = 0;
6643
            break;
6644
        }
6645

6646
        default:
6647
            gf_msg_trace(this->name, 0, "got some other RPC event %d", event);
6648
            ret = 0;
6649
            break;
6650
    }
6651

6652
out:
6653
    RCU_READ_UNLOCK;
6654

6655
    glusterd_friend_sm();
6656
    glusterd_op_sm();
6657
    if (quorum_action)
6658
        glusterd_do_quorum_action();
6659
    return ret;
6660
}
6661

6662
int
6663
glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
6664
                         rpc_clnt_event_t event, void *data)
6665
{
6666
    return glusterd_big_locked_notify(rpc, mydata, event, data,
6667
                                      __glusterd_peer_rpc_notify);
6668
}
6669

6670
int
6671
glusterd_null(rpcsvc_request_t *req)
6672
{
6673
    return 0;
6674
}
6675

6676
static rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
6677
    [GLUSTERD_MGMT_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
6678
                            DRC_NA, 0},
6679
    [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK",
6680
                                    glusterd_handle_cluster_lock, NULL,
6681
                                    GLUSTERD_MGMT_CLUSTER_LOCK, DRC_NA, 0},
6682
    [GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
6683
                                      glusterd_handle_cluster_unlock, NULL,
6684
                                      GLUSTERD_MGMT_CLUSTER_UNLOCK, DRC_NA, 0},
6685
    [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_handle_stage_op, NULL,
6686
                                GLUSTERD_MGMT_STAGE_OP, DRC_NA, 0},
6687
    [GLUSTERD_MGMT_COMMIT_OP] =
6688
        {
6689
            "COMMIT_OP",
6690
            glusterd_handle_commit_op,
6691
            NULL,
6692
            GLUSTERD_MGMT_COMMIT_OP,
6693
            DRC_NA,
6694
            0,
6695
        },
6696
};
6697

6698
struct rpcsvc_program gd_svc_mgmt_prog = {
6699
    .progname = "GlusterD svc mgmt",
6700
    .prognum = GD_MGMT_PROGRAM,
6701
    .progver = GD_MGMT_VERSION,
6702
    .numactors = GLUSTERD_MGMT_MAXVALUE,
6703
    .actors = gd_svc_mgmt_actors,
6704
    .synctask = _gf_true,
6705
};
6706

6707
static rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
6708
    [GLUSTERD_FRIEND_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
6709
                              DRC_NA, 0},
6710
    [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", glusterd_handle_probe_query, NULL,
6711
                              GLUSTERD_PROBE_QUERY, DRC_NA, 0},
6712
    [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", glusterd_handle_incoming_friend_req,
6713
                             NULL, GLUSTERD_FRIEND_ADD, DRC_NA, 0},
6714
    [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE",
6715
                                glusterd_handle_incoming_unfriend_req, NULL,
6716
                                GLUSTERD_FRIEND_REMOVE, DRC_NA, 0},
6717
    [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd_handle_friend_update,
6718
                                NULL, GLUSTERD_FRIEND_UPDATE, DRC_NA, 0},
6719
};
6720

6721
struct rpcsvc_program gd_svc_peer_prog = {
6722
    .progname = "GlusterD svc peer",
6723
    .prognum = GD_FRIEND_PROGRAM,
6724
    .progver = GD_FRIEND_VERSION,
6725
    .numactors = GLUSTERD_FRIEND_MAXVALUE,
6726
    .actors = gd_svc_peer_actors,
6727
    .synctask = _gf_false,
6728
};
6729

6730
static rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
6731
    [GLUSTER_CLI_PROBE] = {"CLI_PROBE", glusterd_handle_cli_probe, NULL,
6732
                           GLUSTER_CLI_PROBE, DRC_NA, 0},
6733
    [GLUSTER_CLI_CREATE_VOLUME] = {"CLI_CREATE_VOLUME",
6734
                                   glusterd_handle_create_volume, NULL,
6735
                                   GLUSTER_CLI_CREATE_VOLUME, DRC_NA, 0},
6736
    [GLUSTER_CLI_DEFRAG_VOLUME] = {"CLI_DEFRAG_VOLUME",
6737
                                   glusterd_handle_defrag_volume, NULL,
6738
                                   GLUSTER_CLI_DEFRAG_VOLUME, DRC_NA, 0},
6739
    [GLUSTER_CLI_DEPROBE] = {"FRIEND_REMOVE", glusterd_handle_cli_deprobe, NULL,
6740
                             GLUSTER_CLI_DEPROBE, DRC_NA, 0},
6741
    [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
6742
                                  glusterd_handle_cli_list_friends, NULL,
6743
                                  GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
6744
    [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", glusterd_handle_cli_uuid_reset,
6745
                                NULL, GLUSTER_CLI_UUID_RESET, DRC_NA, 0},
6746
    [GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
6747
                              GLUSTER_CLI_UUID_GET, DRC_NA, 0},
6748
    [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME",
6749
                                  glusterd_handle_cli_start_volume, NULL,
6750
                                  GLUSTER_CLI_START_VOLUME, DRC_NA, 0},
6751
    [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", glusterd_handle_cli_stop_volume,
6752
                                 NULL, GLUSTER_CLI_STOP_VOLUME, DRC_NA, 0},
6753
    [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME",
6754
                                   glusterd_handle_cli_delete_volume, NULL,
6755
                                   GLUSTER_CLI_DELETE_VOLUME, DRC_NA, 0},
6756
    [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
6757
                                NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
6758
    [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", glusterd_handle_add_brick, NULL,
6759
                               GLUSTER_CLI_ADD_BRICK, DRC_NA, 0},
6760
    [GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", glusterd_handle_attach_tier,
6761
                                 NULL, GLUSTER_CLI_ATTACH_TIER, DRC_NA, 0},
6762
    [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK",
6763
                                   glusterd_handle_replace_brick, NULL,
6764
                                   GLUSTER_CLI_REPLACE_BRICK, DRC_NA, 0},
6765
    [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", glusterd_handle_remove_brick,
6766
                                  NULL, GLUSTER_CLI_REMOVE_BRICK, DRC_NA, 0},
6767
    [GLUSTER_CLI_LOG_ROTATE] = {"LOG FILENAME", glusterd_handle_log_rotate,
6768
                                NULL, GLUSTER_CLI_LOG_ROTATE, DRC_NA, 0},
6769
    [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", glusterd_handle_set_volume, NULL,
6770
                                GLUSTER_CLI_SET_VOLUME, DRC_NA, 0},
6771
    [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", glusterd_handle_sync_volume,
6772
                                 NULL, GLUSTER_CLI_SYNC_VOLUME, DRC_NA, 0},
6773
    [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", glusterd_handle_reset_volume,
6774
                                  NULL, GLUSTER_CLI_RESET_VOLUME, DRC_NA, 0},
6775
    [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", glusterd_handle_fsm_log, NULL,
6776
                             GLUSTER_CLI_FSM_LOG, DRC_NA, 0},
6777
    [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", glusterd_handle_gsync_set, NULL,
6778
                               GLUSTER_CLI_GSYNC_SET, DRC_NA, 0},
6779
    [GLUSTER_CLI_PROFILE_VOLUME] = {"STATS_VOLUME",
6780
                                    glusterd_handle_cli_profile_volume, NULL,
6781
                                    GLUSTER_CLI_PROFILE_VOLUME, DRC_NA, 0},
6782
    [GLUSTER_CLI_QUOTA] = {"QUOTA", glusterd_handle_quota, NULL,
6783
                           GLUSTER_CLI_QUOTA, DRC_NA, 0},
6784
    [GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
6785
                           GLUSTER_CLI_GETWD, DRC_NA, 1},
6786
    [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
6787
                                   glusterd_handle_status_volume, NULL,
6788
                                   GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
6789
    [GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
6790
                           GLUSTER_CLI_MOUNT, DRC_NA, 1},
6791
    [GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
6792
                            GLUSTER_CLI_UMOUNT, DRC_NA, 1},
6793
    [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", glusterd_handle_cli_heal_volume,
6794
                                 NULL, GLUSTER_CLI_HEAL_VOLUME, DRC_NA, 0},
6795
    [GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME",
6796
                                      glusterd_handle_cli_statedump_volume,
6797
                                      NULL, GLUSTER_CLI_STATEDUMP_VOLUME,
6798
                                      DRC_NA, 0},
6799
    [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
6800
                                 NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
6801
    [GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME",
6802
                                     glusterd_handle_cli_clearlocks_volume,
6803
                                     NULL, GLUSTER_CLI_CLRLOCKS_VOLUME, DRC_NA,
6804
                                     0},
6805
    [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", glusterd_handle_copy_file, NULL,
6806
                               GLUSTER_CLI_COPY_FILE, DRC_NA, 0},
6807
    [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", glusterd_handle_sys_exec, NULL,
6808
                              GLUSTER_CLI_SYS_EXEC, DRC_NA, 0},
6809
    [GLUSTER_CLI_SNAP] = {"SNAP", glusterd_handle_snapshot, NULL,
6810
                          GLUSTER_CLI_SNAP, DRC_NA, 0},
6811
    [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", glusterd_handle_barrier,
6812
                                    NULL, GLUSTER_CLI_BARRIER_VOLUME, DRC_NA,
6813
                                    0},
6814
    [GLUSTER_CLI_GANESHA] = {"GANESHA", glusterd_handle_ganesha_cmd, NULL,
6815
                             GLUSTER_CLI_GANESHA, DRC_NA, 0},
6816
    [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", glusterd_handle_get_vol_opt,
6817
                                 NULL, DRC_NA, 0},
6818
    [GLUSTER_CLI_BITROT] = {"BITROT", glusterd_handle_bitrot, NULL,
6819
                            GLUSTER_CLI_BITROT, DRC_NA, 0},
6820
    [GLUSTER_CLI_GET_STATE] = {"GET_STATE", glusterd_handle_get_state, NULL,
6821
                               GLUSTER_CLI_GET_STATE, DRC_NA, 0},
6822
    [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", glusterd_handle_reset_brick,
6823
                                 NULL, GLUSTER_CLI_RESET_BRICK, DRC_NA, 0},
6824
    [GLUSTER_CLI_TIER] = {"TIER", glusterd_handle_tier, NULL, GLUSTER_CLI_TIER,
6825
                          DRC_NA, 0},
6826
    [GLUSTER_CLI_REMOVE_TIER_BRICK] = {"REMOVE_TIER_BRICK",
6827
                                       glusterd_handle_tier, NULL,
6828
                                       GLUSTER_CLI_REMOVE_TIER_BRICK, DRC_NA,
6829
                                       0},
6830
    [GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK",
6831
                                    glusterd_handle_add_tier_brick, NULL,
6832
                                    GLUSTER_CLI_ADD_TIER_BRICK, DRC_NA, 0},
6833
};
6834

6835
struct rpcsvc_program gd_svc_cli_prog = {
6836
    .progname = "GlusterD svc cli",
6837
    .prognum = GLUSTER_CLI_PROGRAM,
6838
    .progver = GLUSTER_CLI_VERSION,
6839
    .numactors = GLUSTER_CLI_MAXVALUE,
6840
    .actors = gd_svc_cli_actors,
6841
    .synctask = _gf_true,
6842
};
6843

6844
/**
6845
 * This set of RPC progs are deemed to be trusted. Most of the actors support
6846
 * read only queries, the only exception being MOUNT/UMOUNT which is required
6847
 * by geo-replication to support unprivileged primary -> secondary sessions.
6848
 */
6849
static rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
6850
    [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
6851
                                  glusterd_handle_cli_list_friends, NULL,
6852
                                  GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
6853
    [GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
6854
                              GLUSTER_CLI_UUID_GET, DRC_NA, 0},
6855
    [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
6856
                                NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
6857
    [GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
6858
                           GLUSTER_CLI_GETWD, DRC_NA, 1},
6859
    [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
6860
                                   glusterd_handle_status_volume, NULL,
6861
                                   GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
6862
    [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
6863
                                 NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
6864
    [GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
6865
                           GLUSTER_CLI_MOUNT, DRC_NA, 1},
6866
    [GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
6867
                            GLUSTER_CLI_UMOUNT, DRC_NA, 1},
6868
};
6869

6870
struct rpcsvc_program gd_svc_cli_trusted_progs = {
6871
    .progname = "GlusterD svc cli read-only",
6872
    .prognum = GLUSTER_CLI_PROGRAM,
6873
    .progver = GLUSTER_CLI_VERSION,
6874
    .numactors = GLUSTER_CLI_MAXVALUE,
6875
    .actors = gd_svc_cli_trusted_actors,
6876
    .synctask = _gf_true,
6877
};
6878

6879
/* As we cant remove the handlers, I'm moving the tier based
6880
 * handlers to this file as we no longer have gluster-tier.c
6881
 * and other tier.c files
6882
 */
6883

6884
int
6885
glusterd_handle_tier(rpcsvc_request_t *req)
6886
{
6887
    return 0;
6888
}
6889

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.