glusterfs

Форк
0
/
glusterd-op-sm.c 
8304 строки · 256.0 Кб
1
/*
2
   Copyright (c) 2006-2012 Red Hat, Inc. <http://www.redhat.com>
3
   This file is part of GlusterFS.
4

5
   This file is licensed to you under your choice of the GNU Lesser
6
   General Public License, version 3 or any later version (LGPLv3 or
7
   later), or the GNU General Public License, version 2 (GPLv2), in all
8
   cases as published by the Free Software Foundation.
9
*/
10

11
#include <time.h>
12
#include <sys/uio.h>
13
#include <sys/mount.h>
14

15
#include <libgen.h>
16
#include <glusterfs/compat-uuid.h>
17

18
#include "fnmatch.h"
19
#include <glusterfs/list.h>
20
#include <glusterfs/dict.h>
21
#include <glusterfs/compat-errno.h>
22
#include <glusterfs/statedump.h>
23
#include "glusterd-op-sm.h"
24
#include "glusterd-utils.h"
25
#include "glusterd-store.h"
26
#include "glusterd-locks.h"
27
#include "glusterd-quota.h"
28
#include <glusterfs/syscall.h>
29
#include "glusterd-snapshot-utils.h"
30
#include "glusterd-svc-mgmt.h"
31
#include "glusterd-svc-helper.h"
32
#include "glusterd-shd-svc-helper.h"
33
#include "glusterd-shd-svc.h"
34
#include "glusterd-quotad-svc.h"
35
#include "glusterd-server-quorum.h"
36
#include <sys/types.h>
37
#include <signal.h>
38
#include <sys/wait.h>
39
#include "glusterd-gfproxyd-svc-helper.h"
40

41
#define len_strcmp(key, len, str)                                              \
42
    ((len == SLEN(str)) && (strcmp(key, str) == 0))
43

44
extern char local_node_hostname[PATH_MAX];
45
static int
46
glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
47
                            char **op_errstr);
48

49
/*
50
 * Valid options for all volumes to be listed in the valid_all_vol_opts table.
51
 * To add newer options to all volumes, we can just add more entries to this
52
 * table.
53
 *
54
 * It's important that every value have a default, or have a special handler
55
 * in glusterd_get_global_options_for_all_vols, or else we might crash there.
56
 */
57
const glusterd_all_vol_opts valid_all_vol_opts[] = {
58
    {GLUSTERD_QUORUM_RATIO_KEY, "51"},
59
    {GLUSTERD_SHARED_STORAGE_KEY, "disable"},
60
    /* This one actually gets filled in dynamically. */
61
    {GLUSTERD_GLOBAL_OP_VERSION_KEY, "BUG_NO_OP_VERSION"},
62
    /*
63
     * This one should be filled in dynamically, but it didn't used to be
64
     * (before the defaults were added here) so the value is unclear.
65
     *
66
     * TBD: add a dynamic handler to set the appropriate value
67
     */
68
    {GLUSTERD_MAX_OP_VERSION_KEY, "BUG_NO_MAX_OP_VERSION"},
69
    {GLUSTERD_BRICK_MULTIPLEX_KEY, "disable"},
70
    /* Set this value to 0 by default implying brick-multiplexing
71
     * behaviour with no limit set on the number of brick instances that
72
     * can be attached per process.
73
     * TBD: Discuss the default value for this. Maybe this should be a
74
     * dynamic value depending on the memory specifications per node */
75
    {GLUSTERD_BRICKMUX_LIMIT_KEY, GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE},
76
    {GLUSTERD_VOL_CNT_PER_THRD, GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE},
77
    {GLUSTERD_LOCALTIME_LOGGING_KEY, "disable"},
78
    {GLUSTERD_DAEMON_LOG_LEVEL_KEY, "INFO"},
79
    {GLUSTER_BRICK_GRACEFUL_CLEANUP, "disable"},
80
    {NULL},
81
};
82

83
static struct cds_list_head gd_op_sm_queue;
84
synclock_t gd_op_sm_lock;
85
glusterd_op_info_t opinfo = {
86
    GD_OP_STATE_DEFAULT,
87
};
88

89
int32_t
90
glusterd_txn_opinfo_dict_init(void)
91
{
92
    int32_t ret = -1;
93
    xlator_t *this = THIS;
94
    glusterd_conf_t *priv = NULL;
95

96
    priv = this->private;
97
    GF_ASSERT(priv);
98

99
    priv->glusterd_txn_opinfo = dict_new();
100
    if (!priv->glusterd_txn_opinfo) {
101
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
102
        ret = -1;
103
        goto out;
104
    }
105

106
    memset(priv->global_txn_id, '\0', sizeof(uuid_t));
107

108
    ret = 0;
109
out:
110
    return ret;
111
}
112

113
void
114
glusterd_txn_opinfo_dict_fini(void)
115
{
116
    glusterd_conf_t *priv = NULL;
117

118
    priv = THIS->private;
119
    GF_ASSERT(priv);
120

121
    if (priv->glusterd_txn_opinfo)
122
        dict_unref(priv->glusterd_txn_opinfo);
123
}
124

125
void
126
glusterd_txn_opinfo_init(glusterd_op_info_t *opinfo,
127
                         glusterd_op_sm_state_t state, int *op, dict_t *op_ctx,
128
                         rpcsvc_request_t *req)
129
{
130
    glusterd_conf_t *conf = NULL;
131

132
    GF_ASSERT(opinfo);
133

134
    conf = THIS->private;
135
    GF_ASSERT(conf);
136

137
    if (state)
138
        opinfo->state = state;
139

140
    if (op)
141
        opinfo->op = *op;
142

143
    if (op_ctx)
144
        opinfo->op_ctx = dict_ref(op_ctx);
145
    else
146
        opinfo->op_ctx = NULL;
147

148
    if (req)
149
        opinfo->req = req;
150

151
    opinfo->txn_generation = conf->generation;
152
    cmm_smp_rmb();
153

154
    return;
155
}
156

157
int32_t
158
glusterd_generate_txn_id(dict_t *dict, uuid_t **txn_id)
159
{
160
    int32_t ret = -1;
161
    xlator_t *this = THIS;
162

163
    GF_ASSERT(dict);
164

165
    *txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
166
    if (!*txn_id) {
167
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
168
        goto out;
169
    }
170

171
    gf_uuid_generate(**txn_id);
172

173
    ret = dict_set_bin(dict, "transaction_id", *txn_id, sizeof(**txn_id));
174
    if (ret) {
175
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
176
               "Failed to set transaction id.");
177
        goto out;
178
    }
179

180
    gf_msg_debug(this->name, 0, "Transaction_id = %s", uuid_utoa(**txn_id));
181
out:
182
    if (ret && *txn_id) {
183
        GF_FREE(*txn_id);
184
        *txn_id = NULL;
185
    }
186

187
    return ret;
188
}
189

190
int32_t
191
glusterd_get_txn_opinfo(uuid_t *txn_id, glusterd_op_info_t *opinfo)
192
{
193
    int32_t ret = -1;
194
    glusterd_txn_opinfo_obj *opinfo_obj = NULL;
195
    glusterd_conf_t *priv = NULL;
196
    xlator_t *this = THIS;
197

198
    priv = this->private;
199
    GF_ASSERT(priv);
200

201
    if (!txn_id || !opinfo) {
202
        gf_msg_callingfn(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
203
                         "Empty transaction id or opinfo received.");
204
        ret = -1;
205
        goto out;
206
    }
207

208
    ret = dict_get_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
209
                       (void **)&opinfo_obj);
210
    if (ret)
211
        goto out;
212

213
    (*opinfo) = opinfo_obj->opinfo;
214

215
    gf_msg_debug(this->name, 0,
216
                 "Successfully got opinfo for transaction ID : %s",
217
                 uuid_utoa(*txn_id));
218

219
    ret = 0;
220
out:
221
    gf_msg_debug(this->name, 0, "Returning %d", ret);
222
    return ret;
223
}
224

225
int32_t
226
glusterd_set_txn_opinfo(uuid_t *txn_id, glusterd_op_info_t *opinfo)
227
{
228
    int32_t ret = -1;
229
    glusterd_txn_opinfo_obj *opinfo_obj = NULL;
230
    glusterd_conf_t *priv = NULL;
231
    xlator_t *this = THIS;
232

233
    priv = this->private;
234
    GF_ASSERT(priv);
235

236
    if (!txn_id) {
237
        gf_msg_callingfn(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
238
                         "Empty transaction id received.");
239
        ret = -1;
240
        goto out;
241
    }
242

243
    ret = dict_get_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
244
                       (void **)&opinfo_obj);
245
    if (ret) {
246
        opinfo_obj = GF_CALLOC(1, sizeof(glusterd_txn_opinfo_obj),
247
                               gf_common_mt_txn_opinfo_obj_t);
248
        if (!opinfo_obj) {
249
            ret = -1;
250
            goto out;
251
        }
252

253
        ret = dict_set_bin(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id),
254
                           opinfo_obj, sizeof(glusterd_txn_opinfo_obj));
255
        if (ret) {
256
            gf_msg_callingfn(this->name, GF_LOG_ERROR, -ret,
257
                             GD_MSG_DICT_SET_FAILED,
258
                             "Unable to set opinfo for transaction"
259
                             " ID : %s",
260
                             uuid_utoa(*txn_id));
261
            goto out;
262
        }
263
    }
264

265
    opinfo_obj->opinfo = (*opinfo);
266

267
    gf_msg_debug(this->name, 0,
268
                 "Successfully set opinfo for transaction ID : %s",
269
                 uuid_utoa(*txn_id));
270
    ret = 0;
271
out:
272
    if (ret)
273
        if (opinfo_obj)
274
            GF_FREE(opinfo_obj);
275

276
    gf_msg_debug(this->name, 0, "Returning %d", ret);
277
    return ret;
278
}
279

280
int32_t
281
glusterd_clear_txn_opinfo(uuid_t *txn_id)
282
{
283
    int32_t ret = -1;
284
    glusterd_op_info_t txn_op_info = {
285
        GD_OP_STATE_DEFAULT,
286
    };
287
    glusterd_conf_t *priv = NULL;
288
    xlator_t *this = THIS;
289

290
    priv = this->private;
291
    GF_ASSERT(priv);
292

293
    if (!txn_id) {
294
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_ID_GET_FAIL,
295
               "Empty transaction id received.");
296
        ret = -1;
297
        goto out;
298
    }
299

300
    ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
301
    if (ret) {
302
        gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
303
                         GD_MSG_TRANS_OPINFO_GET_FAIL,
304
                         "Unable to get transaction opinfo "
305
                         "for transaction ID : %s",
306
                         uuid_utoa(*txn_id));
307
        goto out;
308
    }
309

310
    if (txn_op_info.op_ctx)
311
        dict_unref(txn_op_info.op_ctx);
312

313
    dict_del(priv->glusterd_txn_opinfo, uuid_utoa(*txn_id));
314

315
    gf_msg_debug(this->name, 0,
316
                 "Successfully cleared opinfo for transaction ID : %s",
317
                 uuid_utoa(*txn_id));
318

319
    ret = 0;
320
out:
321
    gf_msg_debug(this->name, 0, "Returning %d", ret);
322
    return ret;
323
}
324

325
static int glusterfs_port = GLUSTERD_DEFAULT_PORT;
326
static char *glusterd_op_sm_state_names[] = {
327
    "Default",
328
    "Lock sent",
329
    "Locked",
330
    "Stage op sent",
331
    "Staged",
332
    "Commit op sent",
333
    "Committed",
334
    "Unlock sent",
335
    "Stage op failed",
336
    "Commit op failed",
337
    "Brick op sent",
338
    "Brick op failed",
339
    "Brick op Committed",
340
    "Brick op Commit failed",
341
    "Ack drain",
342
    "Invalid",
343
};
344

345
static char *glusterd_op_sm_event_names[] = {
346
    "GD_OP_EVENT_NONE",       "GD_OP_EVENT_START_LOCK",
347
    "GD_OP_EVENT_LOCK",       "GD_OP_EVENT_RCVD_ACC",
348
    "GD_OP_EVENT_ALL_ACC",    "GD_OP_EVENT_STAGE_ACC",
349
    "GD_OP_EVENT_COMMIT_ACC", "GD_OP_EVENT_RCVD_RJT",
350
    "GD_OP_EVENT_STAGE_OP",   "GD_OP_EVENT_COMMIT_OP",
351
    "GD_OP_EVENT_UNLOCK",     "GD_OP_EVENT_START_UNLOCK",
352
    "GD_OP_EVENT_ALL_ACK",    "GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP",
353
    "GD_OP_EVENT_INVALID"};
354

355
char *
356
glusterd_op_sm_state_name_get(int state)
357
{
358
    if (state < 0 || state >= GD_OP_STATE_MAX)
359
        return glusterd_op_sm_state_names[GD_OP_STATE_MAX];
360
    return glusterd_op_sm_state_names[state];
361
}
362

363
char *
364
glusterd_op_sm_event_name_get(int event)
365
{
366
    if (event < 0 || event >= GD_OP_EVENT_MAX)
367
        return glusterd_op_sm_event_names[GD_OP_EVENT_MAX];
368
    return glusterd_op_sm_event_names[event];
369
}
370

371
static void
372
glusterd_destroy_lock_ctx(glusterd_op_lock_ctx_t *ctx)
373
{
374
    if (!ctx)
375
        return;
376
    GF_FREE(ctx);
377
}
378

379
void
380
glusterd_set_volume_status(glusterd_volinfo_t *volinfo,
381
                           glusterd_volume_status status)
382
{
383
    GF_ASSERT(volinfo);
384
    volinfo->status = status;
385
}
386

387
static int
388
glusterd_op_sm_inject_all_acc(uuid_t *txn_id)
389
{
390
    int ret = -1;
391
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, txn_id, NULL);
392
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
393
    return ret;
394
}
395

396
static int
397
glusterd_check_bitrot_cmd(char *key, const int keylen, char *errstr,
398
                          const size_t size)
399
{
400
    int ret = -1;
401

402
    if (len_strcmp(key, keylen, "bitrot") ||
403
        len_strcmp(key, keylen, "features.bitrot")) {
404
        snprintf(errstr, size,
405
                 " 'gluster volume set <VOLNAME> %s' is invalid command."
406
                 " Use 'gluster volume bitrot <VOLNAME> {enable|disable}'"
407
                 " instead.",
408
                 key);
409
        goto out;
410
    } else if (len_strcmp(key, keylen, "scrub-freq") ||
411
               len_strcmp(key, keylen, "features.scrub-freq")) {
412
        snprintf(errstr, size,
413
                 " 'gluster volume set <VOLNAME> %s' is invalid command."
414
                 " Use 'gluster volume bitrot <VOLNAME> scrub-frequency"
415
                 " {hourly|daily|weekly|biweekly|monthly}' instead.",
416
                 key);
417
        goto out;
418
    } else if (len_strcmp(key, keylen, "scrub") ||
419
               len_strcmp(key, keylen, "features.scrub")) {
420
        snprintf(errstr, size,
421
                 " 'gluster volume set <VOLNAME> %s' is invalid command."
422
                 " Use 'gluster volume bitrot <VOLNAME> scrub {pause|resume}'"
423
                 " instead.",
424
                 key);
425
        goto out;
426
    } else if (len_strcmp(key, keylen, "scrub-throttle") ||
427
               len_strcmp(key, keylen, "features.scrub-throttle")) {
428
        snprintf(errstr, size,
429
                 " 'gluster volume set <VOLNAME> %s' is invalid command."
430
                 " Use 'gluster volume bitrot <VOLNAME> scrub-throttle "
431
                 " {lazy|normal|aggressive}' instead.",
432
                 key);
433
        goto out;
434
    }
435

436
    ret = 0;
437
out:
438
    return ret;
439
}
440

441
static int
442
glusterd_check_quota_cmd(char *key, const int keylen, char *value, char *errstr,
443
                         size_t size)
444
{
445
    int ret = -1;
446
    gf_boolean_t b = _gf_false;
447

448
    if (len_strcmp(key, keylen, "quota") ||
449
        len_strcmp(key, keylen, "features.quota")) {
450
        ret = gf_string2boolean(value, &b);
451
        if (ret)
452
            goto out;
453
        ret = -1;
454
        if (b) {
455
            snprintf(errstr, size,
456
                     " 'gluster volume set <VOLNAME> %s %s' is deprecated."
457
                     " Use 'gluster volume quota <VOLNAME> enable' instead.",
458
                     key, value);
459
        } else {
460
            snprintf(errstr, size,
461
                     " 'gluster volume set <VOLNAME> %s %s' is deprecated."
462
                     " Use 'gluster volume quota <VOLNAME> disable' instead.",
463
                     key, value);
464
        }
465
        goto out;
466
    } else if (len_strcmp(key, keylen, "inode-quota") ||
467
               len_strcmp(key, keylen, "features.inode-quota")) {
468
        ret = gf_string2boolean(value, &b);
469
        if (ret)
470
            goto out;
471
        ret = -1;
472
        if (b) {
473
            snprintf(
474
                errstr, size,
475
                " 'gluster volume set <VOLNAME> %s %s' is deprecated."
476
                " Use 'gluster volume inode-quota <VOLNAME> enable' instead.",
477
                key, value);
478
        } else {
479
            /* inode-quota disable not supported,
480
             * use quota disable
481
             */
482
            snprintf(errstr, size,
483
                     " 'gluster volume set <VOLNAME> %s %s' is deprecated."
484
                     " Use 'gluster volume quota <VOLNAME> disable' instead.",
485
                     key, value);
486
        }
487
        goto out;
488
    }
489

490
    ret = 0;
491
out:
492
    return ret;
493
}
494

495
int
496
glusterd_brick_op_build_payload(glusterd_op_t op,
497
                                glusterd_brickinfo_t *brickinfo,
498
                                gd1_mgmt_brick_op_req **req, dict_t *dict)
499
{
500
    int ret = -1;
501
    gd1_mgmt_brick_op_req *brick_req = NULL;
502
    char *volname = NULL;
503
    char name[1024] = {
504
        0,
505
    };
506
    gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
507
    xlator_t *this = THIS;
508
    glusterd_volinfo_t *volinfo = NULL;
509

510
    GF_ASSERT(op < GD_OP_MAX);
511
    GF_ASSERT(op > GD_OP_NONE);
512
    GF_ASSERT(req);
513

514
    switch (op) {
515
        case GD_OP_REMOVE_BRICK:
516
        case GD_OP_STOP_VOLUME:
517
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
518
                                  gf_gld_mt_mop_brick_req_t);
519
            if (!brick_req) {
520
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
521
                        NULL);
522
                goto out;
523
            }
524
            brick_req->op = GLUSTERD_BRICK_TERMINATE;
525
            brick_req->name = brickinfo->path;
526
            glusterd_set_brick_status(brickinfo, GF_BRICK_STOPPING);
527
            break;
528
        case GD_OP_PROFILE_VOLUME:
529
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
530
                                  gf_gld_mt_mop_brick_req_t);
531

532
            if (!brick_req) {
533
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
534
                        NULL);
535
                goto out;
536
            }
537

538
            brick_req->op = GLUSTERD_BRICK_XLATOR_INFO;
539
            brick_req->name = brickinfo->path;
540

541
            break;
542
        case GD_OP_HEAL_VOLUME: {
543
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
544
                                  gf_gld_mt_mop_brick_req_t);
545
            if (!brick_req) {
546
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
547
                        NULL);
548
                goto out;
549
            }
550

551
            brick_req->op = GLUSTERD_BRICK_XLATOR_OP;
552
            brick_req->name = "";
553
            ret = dict_get_int32(dict, "heal-op", (int32_t *)&heal_op);
554
            if (ret) {
555
                gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
556
                        "Key=heal-op", NULL);
557
                goto out;
558
            }
559
            ret = dict_set_int32_sizen(dict, "xl-op", heal_op);
560
            if (ret) {
561
                gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
562
                        "Key=xl-op", NULL);
563
                goto out;
564
            }
565
        } break;
566
        case GD_OP_STATUS_VOLUME: {
567
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
568
                                  gf_gld_mt_mop_brick_req_t);
569
            if (!brick_req) {
570
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
571
                        NULL);
572
                goto out;
573
            }
574
            brick_req->op = GLUSTERD_BRICK_STATUS;
575
            brick_req->name = "";
576
            ret = dict_set_str_sizen(dict, "brick-name", brickinfo->path);
577
            if (ret) {
578
                gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
579
                        "Key=brick-name", NULL);
580
                goto out;
581
            }
582
        } break;
583
        case GD_OP_REBALANCE:
584
        case GD_OP_DEFRAG_BRICK_VOLUME:
585
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
586
                                  gf_gld_mt_mop_brick_req_t);
587
            if (!brick_req) {
588
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
589
                        NULL);
590
                goto out;
591
            }
592

593
            brick_req->op = GLUSTERD_BRICK_XLATOR_DEFRAG;
594
            ret = dict_get_str(dict, "volname", &volname);
595
            if (ret) {
596
                gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
597
                        "Key=volname", NULL);
598
                goto out;
599
            }
600
            ret = glusterd_volinfo_find(volname, &volinfo);
601
            if (ret) {
602
                gf_smsg(this->name, GF_LOG_ERROR, errno,
603
                        GD_MSG_VOLINFO_GET_FAIL, "Volume=%s", volname, NULL);
604
                goto out;
605
            }
606
            snprintf(name, sizeof(name), "%s-dht", volname);
607
            brick_req->name = gf_strdup(name);
608

609
            break;
610
        case GD_OP_SNAP:
611
        case GD_OP_BARRIER:
612
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
613
                                  gf_gld_mt_mop_brick_req_t);
614
            if (!brick_req) {
615
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
616
                        NULL);
617
                goto out;
618
            }
619
            brick_req->op = GLUSTERD_BRICK_BARRIER;
620
            brick_req->name = brickinfo->path;
621
            break;
622

623
        default:
624
            goto out;
625
            break;
626
    }
627

628
    brick_req->dict.dict_len = 0;
629
    brick_req->dict.dict_val = NULL;
630
    ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
631
                                      &brick_req->input.input_len);
632
    if (ret) {
633
        gf_smsg(this->name, GF_LOG_ERROR, errno,
634
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
635
        goto out;
636
    }
637
    *req = brick_req;
638
    ret = 0;
639

640
out:
641
    if (ret && brick_req)
642
        GF_FREE(brick_req);
643
    gf_msg_debug(this->name, 0, "Returning %d", ret);
644
    return ret;
645
}
646

647
int
648
glusterd_node_op_build_payload(glusterd_op_t op, gd1_mgmt_brick_op_req **req,
649
                               dict_t *dict)
650
{
651
    int ret = -1;
652
    gd1_mgmt_brick_op_req *brick_req = NULL;
653
    char *volname = NULL;
654

655
    GF_ASSERT(op < GD_OP_MAX);
656
    GF_ASSERT(op > GD_OP_NONE);
657
    GF_ASSERT(req);
658
    xlator_t *this = THIS;
659

660
    switch (op) {
661
        case GD_OP_PROFILE_VOLUME:
662
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
663
                                  gf_gld_mt_mop_brick_req_t);
664
            if (!brick_req) {
665
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
666
                        NULL);
667
                goto out;
668
            }
669

670
            brick_req->op = GLUSTERD_NODE_PROFILE;
671
            brick_req->name = "";
672

673
            break;
674

675
        case GD_OP_STATUS_VOLUME:
676
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
677
                                  gf_gld_mt_mop_brick_req_t);
678
            if (!brick_req) {
679
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
680
                        NULL);
681
                goto out;
682
            }
683

684
            brick_req->op = GLUSTERD_NODE_STATUS;
685
            brick_req->name = "";
686

687
            break;
688

689
        case GD_OP_SCRUB_STATUS:
690
        case GD_OP_SCRUB_ONDEMAND:
691
            brick_req = GF_CALLOC(1, sizeof(*brick_req),
692
                                  gf_gld_mt_mop_brick_req_t);
693
            if (!brick_req) {
694
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY,
695
                        NULL);
696
                goto out;
697
            }
698

699
            brick_req->op = GLUSTERD_NODE_BITROT;
700

701
            ret = dict_get_str(dict, "volname", &volname);
702
            if (ret) {
703
                gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
704
                        "Key=volname", NULL);
705
                goto out;
706
            }
707

708
            brick_req->name = gf_strdup(volname);
709
            break;
710
        default:
711
            goto out;
712
    }
713

714
    brick_req->dict.dict_len = 0;
715
    brick_req->dict.dict_val = NULL;
716
    ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
717
                                      &brick_req->input.input_len);
718

719
    if (ret) {
720
        gf_smsg(this->name, GF_LOG_ERROR, errno,
721
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
722
        goto out;
723
    }
724

725
    *req = brick_req;
726
    ret = 0;
727

728
out:
729
    if (ret && brick_req)
730
        GF_FREE(brick_req);
731
    gf_msg_debug(this->name, 0, "Returning %d", ret);
732
    return ret;
733
}
734

735
static int
736
glusterd_validate_quorum_options(xlator_t *this, char *fullkey, char *value,
737
                                 char **op_errstr)
738
{
739
    int ret = 0;
740
    char *key = NULL;
741
    volume_option_t *opt = NULL;
742

743
    if (!glusterd_is_quorum_option(fullkey))
744
        goto out;
745
    key = strchr(fullkey, '.');
746
    if (key == NULL) {
747
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
748
        ret = -1;
749
        goto out;
750
    }
751
    key++;
752
    opt = xlator_volume_option_get(this, key);
753
    if (!opt) {
754
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL, NULL);
755
        ret = -1;
756
        goto out;
757
    }
758
    ret = xlator_option_validate(this, key, value, opt, op_errstr);
759
out:
760
    return ret;
761
}
762

763
static int
764
glusterd_validate_brick_mx_options(xlator_t *this, char *fullkey, char *value,
765
                                   char **op_errstr)
766
{
767
    int ret = 0;
768

769
    // Placeholder function for now
770

771
    return ret;
772
}
773

774
static int32_t
775
glusterd_count_connected_peers(int32_t *count)
776
{
777
    glusterd_peerinfo_t *peerinfo = NULL;
778
    glusterd_conf_t *conf = NULL;
779
    int32_t ret = -1;
780
    xlator_t *this = THIS;
781

782
    conf = this->private;
783
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
784
    GF_VALIDATE_OR_GOTO(this->name, count, out);
785

786
    *count = 1;
787

788
    RCU_READ_LOCK;
789
    cds_list_for_each_entry_rcu(peerinfo, &conf->peers, uuid_list)
790
    {
791
        /* Find peer who is connected and is a friend */
792
        if ((peerinfo->connected) &&
793
            (peerinfo->state == GD_FRIEND_STATE_BEFRIENDED)) {
794
            (*count)++;
795
        }
796
    }
797
    RCU_READ_UNLOCK;
798

799
    ret = 0;
800
out:
801
    return ret;
802
}
803

804
static int
805
glusterd_validate_shared_storage(char *value, char *errstr)
806
{
807
    int32_t ret = -1;
808
    int32_t count = -1;
809
    char *op = NULL;
810
    char hook_script[PATH_MAX] = "";
811
    xlator_t *this = THIS;
812
    glusterd_conf_t *conf = NULL;
813
    int32_t len = 0;
814
    glusterd_volinfo_t *volinfo = NULL;
815

816
    conf = this->private;
817
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
818

819
    GF_VALIDATE_OR_GOTO(this->name, value, out);
820
    GF_VALIDATE_OR_GOTO(this->name, errstr, out);
821

822
    if ((strcmp(value, "enable")) && (strcmp(value, "disable"))) {
823
        snprintf(errstr, PATH_MAX,
824
                 "Invalid option(%s). Valid options "
825
                 "are 'enable' and 'disable'",
826
                 value);
827
        gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
828
               errstr);
829
        ret = -1;
830
        goto out;
831
    }
832

833
    len = snprintf(hook_script, sizeof(hook_script),
834
                   "%s" GLUSTERD_SHRD_STRG_HOOK_SCRIPT, conf->workdir);
835
    if ((len < 0) || (len >= sizeof(hook_script))) {
836
        ret = -1;
837
        goto out;
838
    }
839

840
    ret = sys_access(hook_script, R_OK | X_OK);
841
    if (ret) {
842
        len = snprintf(errstr, PATH_MAX,
843
                       "The hook-script (%s) required "
844
                       "for this operation is not present. "
845
                       "Please install the hook-script "
846
                       "and retry",
847
                       hook_script);
848
        if (len < 0) {
849
            strncpy(errstr, "<error>", PATH_MAX);
850
        }
851
        gf_msg(this->name, GF_LOG_ERROR, ENOENT, GD_MSG_FILE_OP_FAILED, "%s",
852
               errstr);
853
        goto out;
854
    }
855

856
    if (!strncmp(value, "disable", SLEN("disable"))) {
857
        ret = dict_get_str(conf->opts, GLUSTERD_SHARED_STORAGE_KEY, &op);
858
        if (ret || !strncmp(op, "disable", SLEN("disable"))) {
859
            snprintf(errstr, PATH_MAX,
860
                     "Shared storage volume "
861
                     "does not exist. Please enable shared storage"
862
                     " for creating shared storage volume.");
863
            gf_msg(this->name, GF_LOG_ERROR, 0,
864
                   GD_MSG_SHARED_STORAGE_DOES_NOT_EXIST, "%s", errstr);
865
            ret = -1;
866
            goto out;
867
        }
868
        goto out;
869
    }
870

871
    ret = glusterd_volinfo_find(GLUSTER_SHARED_STORAGE, &volinfo);
872
    if (!ret) {
873
        snprintf(errstr, PATH_MAX,
874
                 "Shared storage volume(" GLUSTER_SHARED_STORAGE
875
                 ") already exists.");
876
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ALREADY_EXIST, "%s",
877
               errstr);
878
        ret = -1;
879
        goto out;
880
    }
881

882
    ret = glusterd_count_connected_peers(&count);
883
    if (ret) {
884
        snprintf(errstr, PATH_MAX,
885
                 "Failed to calculate number of connected peers.");
886
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PEER_COUNT_GET_FAIL, "%s",
887
               errstr);
888
        goto out;
889
    }
890

891
    if (count <= 1) {
892
        snprintf(errstr, PATH_MAX,
893
                 "More than one node should "
894
                 "be up/present in the cluster to enable this option");
895
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INSUFFICIENT_UP_NODES, "%s",
896
               errstr);
897
        ret = -1;
898
        goto out;
899
    }
900

901
out:
902
    return ret;
903
}
904

905
static int
906
glusterd_validate_localtime_logging(char *value, char *errstr)
907
{
908
    int32_t ret = -1;
909
    xlator_t *this = THIS;
910
    glusterd_conf_t *conf = NULL;
911
    int already_enabled = 0;
912

913
    conf = this->private;
914
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
915
    GF_VALIDATE_OR_GOTO(this->name, value, out);
916

917
    already_enabled = gf_log_get_localtime();
918

919
    ret = 0;
920
    if (strcmp(value, "enable") == 0) {
921
        gf_log_set_localtime(1);
922
        if (!already_enabled)
923
            gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_ENABLE,
924
                   "localtime logging enable");
925
    } else if (strcmp(value, "disable") == 0) {
926
        gf_log_set_localtime(0);
927
        if (already_enabled)
928
            gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_LOCALTIME_LOGGING_DISABLE,
929
                   "localtime logging disable");
930
    } else {
931
        ret = -1;
932
        GF_VALIDATE_OR_GOTO(this->name, errstr, out);
933
        snprintf(errstr, PATH_MAX,
934
                 "Invalid option(%s). Valid options "
935
                 "are 'enable' and 'disable'",
936
                 value);
937
        gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
938
               errstr);
939
    }
940

941
out:
942
    return ret;
943
}
944

945
static int
946
glusterd_validate_daemon_log_level(char *value, char *errstr)
947
{
948
    int32_t ret = -1;
949
    xlator_t *this = THIS;
950
    glusterd_conf_t *conf = NULL;
951

952
    conf = this->private;
953
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
954

955
    GF_VALIDATE_OR_GOTO(this->name, value, out);
956

957
    ret = 0;
958

959
    if ((strcmp(value, "INFO")) && (strcmp(value, "WARNING")) &&
960
        (strcmp(value, "DEBUG")) && (strcmp(value, "TRACE")) &&
961
        (strcmp(value, "ERROR"))) {
962
        ret = -1;
963
        GF_VALIDATE_OR_GOTO(this->name, errstr, out);
964
        snprintf(errstr, PATH_MAX,
965
                 "Invalid option(%s). Valid options "
966
                 "are 'INFO' or 'WARNING' or 'ERROR' or 'DEBUG' or "
967
                 " 'TRACE'",
968
                 value);
969
        gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY, "%s",
970
               errstr);
971
    }
972

973
out:
974
    return ret;
975
}
976

977
static int
978
glusterd_op_stage_set_volume(dict_t *dict, char **op_errstr)
979
{
980
    int ret = -1;
981
    char *volname = NULL;
982
    int exists = 0;
983
    char *key = NULL;
984
    char *key_fixed = NULL;
985
    char *value = NULL;
986
    char *val_dup = NULL;
987
    char keystr[100] = {
988
        0,
989
    };
990
    int keystr_len;
991
    int keylen;
992
    char *trash_path = NULL;
993
    int trash_path_len = 0;
994
    int count = 0;
995
    int dict_count = 0;
996
    char errstr[PATH_MAX] = {
997
        0,
998
    };
999
    glusterd_volinfo_t *volinfo = NULL;
1000
    glusterd_brickinfo_t *brickinfo = NULL;
1001
    dict_t *val_dict = NULL;
1002
    gf_boolean_t global_opt = _gf_false;
1003
    gf_boolean_t key_matched = _gf_false; /* if a key was processed or not*/
1004
    glusterd_volinfo_t *voliter = NULL;
1005
    glusterd_conf_t *priv = NULL;
1006
    xlator_t *this = THIS;
1007
    uint32_t new_op_version = GD_OP_VERSION_MIN;
1008
    uint32_t local_new_op_version = GD_OP_VERSION_MIN;
1009
    uint32_t local_new_client_op_version = GD_OP_VERSION_MIN;
1010
    uint32_t key_op_version = GD_OP_VERSION_MIN;
1011
    uint32_t local_key_op_version = GD_OP_VERSION_MIN;
1012
    gf_boolean_t origin_glusterd = _gf_true;
1013
    gf_boolean_t check_op_version = _gf_true;
1014
    gf_boolean_t trash_enabled = _gf_false;
1015
    gf_boolean_t all_vol = _gf_false;
1016
    struct volopt_map_entry *vmep = NULL;
1017

1018
    GF_ASSERT(dict);
1019
    priv = this->private;
1020
    GF_ASSERT(priv);
1021

1022
    /* Check if we can support the required op-version
1023
     * This check is not done on the originator glusterd. The originator
1024
     * glusterd sets this value.
1025
     */
1026
    origin_glusterd = is_origin_glusterd(dict);
1027

1028
    if (!origin_glusterd) {
1029
        /* Check for v3.3.x origin glusterd */
1030
        check_op_version = dict_get_str_boolean(dict, "check-op-version",
1031
                                                _gf_false);
1032

1033
        if (check_op_version) {
1034
            ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
1035
            if (ret) {
1036
                gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1037
                        "Key=new-op-version", NULL);
1038
                goto out;
1039
            }
1040

1041
            if ((new_op_version > GD_OP_VERSION_MAX) ||
1042
                (new_op_version < GD_OP_VERSION_MIN)) {
1043
                ret = -1;
1044
                snprintf(errstr, sizeof(errstr),
1045
                         "Required op_version (%d) is not supported."
1046
                         " Max supported op version is %d",
1047
                         new_op_version, priv->op_version);
1048
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNSUPPORTED_VERSION,
1049
                       "%s", errstr);
1050
                goto out;
1051
            }
1052
        }
1053
    }
1054

1055
    ret = dict_get_int32(dict, "count", &dict_count);
1056
    if (ret) {
1057
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1058
               "Count(dict),not set in Volume-Set");
1059
        goto out;
1060
    }
1061

1062
    if (dict_count == 0) {
1063
        /*No options would be specified of volume set help */
1064
        if (dict_get_sizen(dict, "help")) {
1065
            ret = 0;
1066
            goto out;
1067
        }
1068

1069
        if (dict_get_sizen(dict, "help-xml")) {
1070
#if (HAVE_LIB_XML)
1071
            ret = 0;
1072
            goto out;
1073
#else
1074
            ret = -1;
1075
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MODULE_NOT_INSTALLED,
1076
                   "libxml not present in the system");
1077
            *op_errstr = gf_strdup(
1078
                "Error: xml libraries not present to produce xml-output");
1079
            goto out;
1080
#endif
1081
        }
1082
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
1083
               "No options received ");
1084
        *op_errstr = gf_strdup("Options not specified");
1085
        ret = -1;
1086
        goto out;
1087
    }
1088

1089
    ret = dict_get_str(dict, "volname", &volname);
1090
    if (ret) {
1091
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1092
                "Key=volname", NULL);
1093
        goto out;
1094
    }
1095

1096
    if (strcasecmp(volname, "all") != 0) {
1097
        ret = glusterd_volinfo_find(volname, &volinfo);
1098
        if (ret) {
1099
            snprintf(errstr, sizeof(errstr), FMTSTR_CHECK_VOL_EXISTS, volname);
1100
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
1101
                   FMTSTR_CHECK_VOL_EXISTS, volname);
1102
            goto out;
1103
        }
1104

1105
        ret = glusterd_validate_volume_id(dict, volinfo);
1106
        if (ret)
1107
            goto out;
1108

1109
        local_new_op_version = volinfo->op_version;
1110
        local_new_client_op_version = volinfo->client_op_version;
1111

1112
    } else {
1113
        all_vol = _gf_true;
1114
    }
1115

1116
    val_dict = dict_new();
1117
    if (!val_dict) {
1118
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
1119
        goto out;
1120
    }
1121

1122
    for (count = 1; ret != 1; count++) {
1123
        keystr_len = sprintf(keystr, "key%d", count);
1124
        ret = dict_get_strn(dict, keystr, keystr_len, &key);
1125
        if (ret) {
1126
            gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1127
                    "Key=%s", keystr, NULL);
1128
            break;
1129
        }
1130

1131
        keystr_len = sprintf(keystr, "value%d", count);
1132
        ret = dict_get_strn(dict, keystr, keystr_len, &value);
1133
        if (ret) {
1134
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1135
                   "invalid key,value pair in 'volume set'");
1136
            ret = -1;
1137
            goto out;
1138
        }
1139

1140
        key_matched = _gf_false;
1141
        keylen = strlen(key);
1142
        if (len_strcmp(key, keylen, "config.memory-accounting")) {
1143
            key_matched = _gf_true;
1144
            gf_msg_debug(this->name, 0,
1145
                         "enabling memory accounting for volume %s", volname);
1146
            ret = 0;
1147
        } else if (len_strcmp(key, keylen, "config.transport")) {
1148
            key_matched = _gf_true;
1149
            gf_msg_debug(this->name, 0, "changing transport-type for volume %s",
1150
                         volname);
1151
            ret = 0;
1152
            /* if value is none of 'tcp/rdma/tcp,rdma' error out */
1153
            if (!((strcasecmp(value, "rdma") == 0) ||
1154
                  (strcasecmp(value, "tcp") == 0) ||
1155
                  (strcasecmp(value, "tcp,rdma") == 0) ||
1156
                  (strcasecmp(value, "rdma,tcp") == 0))) {
1157
                ret = snprintf(errstr, sizeof(errstr),
1158
                               "transport-type %s does not exist", value);
1159
                /* lets not bother about above return value,
1160
                   its a failure anyways */
1161
                ret = -1;
1162
                goto out;
1163
            }
1164
        } else if (len_strcmp(key, keylen, "ganesha.enable")) {
1165
            key_matched = _gf_true;
1166
            if (strcmp(value, "off") == 0) {
1167
                ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
1168
                if (ret)
1169
                    goto out;
1170
            }
1171
        }
1172

1173
        if (!key_matched) {
1174
            ret = glusterd_check_bitrot_cmd(key, keylen, errstr,
1175
                                            sizeof(errstr));
1176
            if (ret)
1177
                goto out;
1178
            ret = glusterd_check_quota_cmd(key, keylen, value, errstr,
1179
                                           sizeof(errstr));
1180
            if (ret)
1181
                goto out;
1182
        }
1183

1184
        if (is_key_glusterd_hooks_friendly(key))
1185
            continue;
1186

1187
        ret = glusterd_volopt_validate(volinfo, dict, key, value, op_errstr);
1188
        if (ret)
1189
            goto out;
1190

1191
        exists = glusterd_check_option_exists(key, &key_fixed);
1192
        if (exists == -1) {
1193
            ret = -1;
1194
            goto out;
1195
        }
1196

1197
        if (!exists) {
1198
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
1199
                   "Option with name: %s does not exist", key);
1200
            ret = snprintf(errstr, sizeof(errstr), "option : %s does not exist",
1201
                           key);
1202
            if (key_fixed)
1203
                snprintf(errstr + ret, sizeof(errstr) - ret,
1204
                         "\nDid you mean %s?", key_fixed);
1205
            ret = -1;
1206
            goto out;
1207
        }
1208

1209
        if (key_fixed) {
1210
            key = key_fixed;
1211
            keylen = strlen(key_fixed);
1212
        }
1213

1214
#ifdef HAVE_LIBAIO
1215
        if (len_strcmp(key, keylen, "storage.linux-aio")) {
1216
            if (volinfo && volinfo->status == GLUSTERD_STATUS_STARTED) {
1217
                snprintf(errstr, sizeof(errstr),
1218
                         "Changing 'storage.linux-aio' is not"
1219
                         " supported when the volume is in started"
1220
                         " state. Please stop the volume first.");
1221
                ret = -1;
1222
                goto out;
1223
            }
1224
        }
1225
#endif /* HAVE_LIBAIO */
1226

1227
#ifdef HAVE_LIBURING
1228
        if (len_strcmp(key, keylen, "storage.linux-io_uring")) {
1229
            if (volinfo && volinfo->status == GLUSTERD_STATUS_STARTED) {
1230
                snprintf(errstr, sizeof(errstr),
1231
                         "Changing 'storage.linux-io_uring' is not"
1232
                         " supported when the volume is in started"
1233
                         " state. Please stop the volume first.");
1234
                ret = -1;
1235
                goto out;
1236
            }
1237
        }
1238
#endif /* HAVE_LIBURING */
1239

1240
        if (len_strcmp(key, keylen, "cluster.granular-entry-heal")) {
1241
            /* For granular entry-heal, if the set command was
1242
             * invoked through volume-set CLI, then allow the
1243
             * command only if the volume is still in 'Created'
1244
             * state
1245
             */
1246
            if (volinfo && volinfo->status != GLUSTERD_STATUS_NONE &&
1247
                (dict_get_sizen(dict, "is-special-key") == NULL)) {
1248
                snprintf(errstr, sizeof(errstr),
1249
                         " 'gluster volume set <VOLNAME> %s {enable, disable}'"
1250
                         " is not supported."
1251
                         " Use 'gluster volume heal <VOLNAME> "
1252
                         "granular-entry-heal {enable, disable}' instead.",
1253
                         key);
1254
                ret = -1;
1255
                goto out;
1256
            }
1257
        } else if (len_strcmp(key, keylen, GLUSTERD_GLOBAL_OP_VERSION_KEY)) {
1258
            /* Check if the key is cluster.op-version and set
1259
             * local_new_op_version to the value given if possible.
1260
             */
1261
            if (!all_vol) {
1262
                ret = -1;
1263
                snprintf(errstr, sizeof(errstr),
1264
                         "Option \"%s\" is not valid for a single volume", key);
1265
                goto out;
1266
            }
1267
            /* Check if cluster.op-version is the only option being
1268
             * set
1269
             */
1270
            if (count != 1) {
1271
                ret = -1;
1272
                snprintf(errstr, sizeof(errstr),
1273
                         "Option \"%s\" cannot be set along with other options",
1274
                         key);
1275
                goto out;
1276
            }
1277
            /* Just reusing the variable, but I'm using it for
1278
             * storing the op-version from value
1279
             */
1280
            ret = gf_string2uint(value, &local_key_op_version);
1281
            if (ret) {
1282
                snprintf(errstr, sizeof(errstr),
1283
                         "invalid number format \"%s\" in option \"%s\"", value,
1284
                         key);
1285
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY, "%s",
1286
                       errstr);
1287
                goto out;
1288
            }
1289

1290
            if (local_key_op_version > GD_OP_VERSION_MAX ||
1291
                local_key_op_version < GD_OP_VERSION_MIN) {
1292
                ret = -1;
1293
                snprintf(errstr, sizeof(errstr),
1294
                         "Required op_version (%d) is not supported."
1295
                         " Max supported op version is %d",
1296
                         local_key_op_version, priv->op_version);
1297
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
1298
                       "%s", errstr);
1299
                goto out;
1300
            }
1301
            if (local_key_op_version > priv->op_version) {
1302
                local_new_op_version = local_key_op_version;
1303
            } else {
1304
                ret = -1;
1305
                snprintf(errstr, sizeof(errstr),
1306
                         "Required op-version (%d) should"
1307
                         " not be equal or lower than current"
1308
                         " cluster op-version (%d).",
1309
                         local_key_op_version, priv->op_version);
1310
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VERSION_UNSUPPORTED,
1311
                       "%s", errstr);
1312
                goto out;
1313
            }
1314

1315
            goto cont;
1316
        }
1317

1318
        ALL_VOLUME_OPTION_CHECK(volname, _gf_false, key, ret, op_errstr, out);
1319
        ret = glusterd_validate_quorum_options(this, key, value, op_errstr);
1320
        if (ret)
1321
            goto out;
1322

1323
        ret = glusterd_validate_brick_mx_options(this, key, value, op_errstr);
1324
        if (ret)
1325
            goto out;
1326

1327
        vmep = gd_get_vmep(key);
1328
        local_key_op_version = glusterd_get_op_version_from_vmep(vmep);
1329
        if (local_key_op_version > local_new_op_version)
1330
            local_new_op_version = local_key_op_version;
1331
        if (gd_is_client_option(vmep) &&
1332
            (local_key_op_version > local_new_client_op_version))
1333
            local_new_client_op_version = local_key_op_version;
1334

1335
        sprintf(keystr, "op-version%d", count);
1336
        if (origin_glusterd) {
1337
            ret = dict_set_uint32(dict, keystr, local_key_op_version);
1338
            if (ret) {
1339
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1340
                       "Failed to set key-op-version in dict");
1341
                goto out;
1342
            }
1343
        } else if (check_op_version) {
1344
            ret = dict_get_uint32(dict, keystr, &key_op_version);
1345
            if (ret) {
1346
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1347
                       "Failed to get key-op-version from dict");
1348
                goto out;
1349
            }
1350
            if (local_key_op_version != key_op_version) {
1351
                ret = -1;
1352
                snprintf(errstr, sizeof(errstr),
1353
                         "option: %s op-version mismatch", key);
1354
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERSION_MISMATCH,
1355
                       "%s, required op-version = %" PRIu32
1356
                       ", available op-version = %" PRIu32,
1357
                       errstr, key_op_version, local_key_op_version);
1358
                goto out;
1359
            }
1360
        }
1361

1362
        global_opt = glusterd_check_globaloption(key);
1363

1364
        if (len_strcmp(key, keylen, GLUSTERD_SHARED_STORAGE_KEY)) {
1365
            ret = glusterd_validate_shared_storage(value, errstr);
1366
            if (ret) {
1367
                gf_msg(this->name, GF_LOG_ERROR, 0,
1368
                       GD_MSG_SHARED_STRG_VOL_OPT_VALIDATE_FAIL,
1369
                       "Failed to validate shared storage volume options");
1370
                goto out;
1371
            }
1372
        } else if (len_strcmp(key, keylen, GLUSTERD_LOCALTIME_LOGGING_KEY)) {
1373
            ret = glusterd_validate_localtime_logging(value, errstr);
1374
            if (ret) {
1375
                gf_msg(this->name, GF_LOG_ERROR, 0,
1376
                       GD_MSG_LOCALTIME_LOGGING_VOL_OPT_VALIDATE_FAIL,
1377
                       "Failed to validate localtime logging volume options");
1378
                goto out;
1379
            }
1380
        } else if (len_strcmp(key, keylen, GLUSTERD_DAEMON_LOG_LEVEL_KEY)) {
1381
            ret = glusterd_validate_daemon_log_level(value, errstr);
1382
            if (ret) {
1383
                gf_msg(this->name, GF_LOG_ERROR, 0,
1384
                       GD_MSG_DAEMON_LOG_LEVEL_VOL_OPT_VALIDATE_FAIL,
1385
                       "Failed to validate daemon-log-level volume options");
1386
                goto out;
1387
            }
1388
        } else if (len_strcmp(key, keylen, "features.trash-dir")) {
1389
            if (volinfo) {
1390
                ret = glusterd_volinfo_get(volinfo, VKEY_FEATURES_TRASH,
1391
                                           &val_dup);
1392
                if (!ret && val_dup) {
1393
                    ret = gf_string2boolean(val_dup, &trash_enabled);
1394
                    if (ret)
1395
                        goto out;
1396
                }
1397
            }
1398
            if (!trash_enabled) {
1399
                snprintf(errstr, sizeof(errstr),
1400
                         "Trash translator is not enabled. "
1401
                         "Use volume set %s trash on",
1402
                         volname);
1403
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1404
                       "Unable to set the options in 'volume set': %s", errstr);
1405
                ret = -1;
1406
                goto out;
1407
            }
1408
            if (strchr(value, '/')) {
1409
                snprintf(errstr, sizeof(errstr),
1410
                         "Path is not allowed as option");
1411
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1412
                       "Unable to set the options in 'volume set': %s", errstr);
1413
                ret = -1;
1414
                goto out;
1415
            }
1416

1417
            list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
1418
            {
1419
                /* Check for local brick */
1420
                if (!gf_uuid_compare(brickinfo->uuid, MY_UUID)) {
1421
                    trash_path_len = strlen(value) + strlen(brickinfo->path) +
1422
                                     2;
1423
                    trash_path = GF_MALLOC(trash_path_len, gf_common_mt_char);
1424
                    snprintf(trash_path, trash_path_len, "%s/%s",
1425
                             brickinfo->path, value);
1426

1427
                    /* Checks whether a directory with
1428
                       given option exists or not */
1429
                    if (!sys_access(trash_path, R_OK)) {
1430
                        snprintf(errstr, sizeof(errstr), "Path %s exists",
1431
                                 value);
1432
                        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1433
                               "Unable to set the options in 'volume set': %s",
1434
                               errstr);
1435
                        ret = -1;
1436
                        goto out;
1437
                    } else {
1438
                        gf_msg_debug(this->name, 0,
1439
                                     "Directory with given name does not exist,"
1440
                                     " continuing");
1441
                    }
1442

1443
                    if (volinfo->status == GLUSTERD_STATUS_STARTED &&
1444
                        brickinfo->status != GF_BRICK_STARTED) {
1445
                        /* If volume is in started state , checks
1446
                           whether bricks are online */
1447
                        snprintf(errstr, sizeof(errstr),
1448
                                 "One or more bricks are down");
1449
                        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
1450
                               "Unable to set the options in 'volume set': %s",
1451
                               errstr);
1452
                        ret = -1;
1453
                        goto out;
1454
                    }
1455
                }
1456
                if (trash_path) {
1457
                    GF_FREE(trash_path);
1458
                    trash_path = NULL;
1459
                }
1460
            }
1461
        }
1462

1463
        ret = dict_set_strn(val_dict, key, keylen, value);
1464

1465
        if (ret) {
1466
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1467
                   "Unable to set the options in 'volume set'");
1468
            ret = -1;
1469
            goto out;
1470
        }
1471

1472
        *op_errstr = NULL;
1473
        if (!global_opt && !all_vol)
1474
            ret = glusterd_validate_reconfopts(volinfo, val_dict, op_errstr);
1475
        else if (!all_vol) {
1476
            voliter = NULL;
1477
            cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
1478
            {
1479
                ret = glusterd_validate_globalopts(voliter, val_dict,
1480
                                                   op_errstr);
1481
                if (ret)
1482
                    break;
1483
            }
1484
        }
1485

1486
        if (ret) {
1487
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
1488
                   "Could not create temp volfile, some option failed: %s",
1489
                   *op_errstr);
1490
            goto out;
1491
        }
1492
        dict_deln(val_dict, key, keylen);
1493

1494
        if (key_fixed) {
1495
            GF_FREE(key_fixed);
1496
            key_fixed = NULL;
1497
        }
1498
    }
1499

1500
    /* Check if all the connected clients support the new client-op-version
1501
     */
1502
    ret = glusterd_check_client_op_version_support(
1503
        volname, local_new_client_op_version, op_errstr);
1504
    if (ret)
1505
        goto out;
1506
cont:
1507
    if (origin_glusterd) {
1508
        ret = dict_set_uint32(dict, "new-op-version", local_new_op_version);
1509
        if (ret) {
1510
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1511
                   "Failed to set new-op-version in dict");
1512
            goto out;
1513
        }
1514
        /* Set this value in dict so other peers know to check for
1515
         * op-version. This is a hack for 3.3.x compatibility
1516
         *
1517
         * TODO: Remove this and the other places this is referred once
1518
         * 3.3.x compatibility is not required
1519
         */
1520
        ret = dict_set_int32_sizen(dict, "check-op-version", 1);
1521
        if (ret) {
1522
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
1523
                   "Failed to set check-op-version in dict");
1524
            goto out;
1525
        }
1526
    }
1527

1528
    ret = 0;
1529

1530
out:
1531
    if (val_dict)
1532
        dict_unref(val_dict);
1533

1534
    if (trash_path)
1535
        GF_FREE(trash_path);
1536

1537
    GF_FREE(key_fixed);
1538
    if (errstr[0] != '\0')
1539
        *op_errstr = gf_strdup(errstr);
1540

1541
    if (ret) {
1542
        if (!(*op_errstr)) {
1543
            *op_errstr = gf_strdup("Error, Validation Failed");
1544
            gf_msg_debug(this->name, 0, "Error, Cannot Validate option :%s",
1545
                         *op_errstr);
1546
        } else {
1547
            gf_msg_debug(this->name, 0, "Error, Cannot Validate option");
1548
        }
1549
    }
1550
    return ret;
1551
}
1552

1553
static int
1554
glusterd_op_stage_reset_volume(dict_t *dict, char **op_errstr)
1555
{
1556
    int ret = 0;
1557
    char *volname = NULL;
1558
    int exists = 0;
1559
    char msg[2048] = {0};
1560
    char *key = NULL;
1561
    char *key_fixed = NULL;
1562
    glusterd_volinfo_t *volinfo = NULL;
1563
    xlator_t *this = THIS;
1564

1565
    ret = dict_get_str(dict, "volname", &volname);
1566

1567
    if (ret) {
1568
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1569
               "Unable to get volume name");
1570
        goto out;
1571
    }
1572

1573
    if (strcasecmp(volname, "all") != 0) {
1574
        ret = glusterd_volinfo_find(volname, &volinfo);
1575
        if (ret) {
1576
            snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1577
            goto out;
1578
        }
1579

1580
        ret = glusterd_validate_volume_id(dict, volinfo);
1581
        if (ret)
1582
            goto out;
1583
    }
1584

1585
    ret = dict_get_str(dict, "key", &key);
1586
    if (ret) {
1587
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1588
               "Unable to get option key");
1589
        goto out;
1590
    }
1591

1592
    /* *
1593
     * If key ganesha.enable is set, then volume should be unexported from
1594
     * ganesha server. Also it is a volume-level option, perform only when
1595
     * volume name not equal to "all"(in other words if volinfo != NULL)
1596
     */
1597
    if (volinfo && (!strcmp(key, "all") || !strcmp(key, "ganesha.enable"))) {
1598
        if (glusterd_check_ganesha_export(volinfo)) {
1599
            ret = ganesha_manage_export(dict, "off", _gf_true, op_errstr);
1600
            if (ret)
1601
                gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
1602
                       "Could not reset ganesha.enable key");
1603
        }
1604
    }
1605

1606
    if (strcmp(key, "all")) {
1607
        exists = glusterd_check_option_exists(key, &key_fixed);
1608
        if (exists == -1) {
1609
            ret = -1;
1610
            goto out;
1611
        }
1612

1613
        if (!exists) {
1614
            ret = snprintf(msg, sizeof(msg), "Option %s does not exist", key);
1615
            if (key_fixed)
1616
                snprintf(msg + ret, sizeof(msg) - ret, "\nDid you mean %s?",
1617
                         key_fixed);
1618
            ret = -1;
1619
            goto out;
1620
        } else if (exists > 0) {
1621
            if (key_fixed)
1622
                key = key_fixed;
1623

1624
            /* 'gluster volume set/reset <VOLNAME>
1625
             * features.quota/features.inode-quota' should
1626
             * not be allowed as it is deprecated.
1627
             * Setting and resetting quota/inode-quota features
1628
             * should be allowed only through 'gluster volume quota
1629
             * <VOLNAME> enable/disable'.
1630
             * But, 'gluster volume set features.quota-deem-statfs'
1631
             * can be turned on/off when quota is enabled.
1632
             */
1633

1634
            if (strcmp(VKEY_FEATURES_INODE_QUOTA, key) == 0 ||
1635
                strcmp(VKEY_FEATURES_QUOTA, key) == 0) {
1636
                snprintf(msg, sizeof(msg),
1637
                         "'gluster volume "
1638
                         "reset <VOLNAME> %s' is deprecated. "
1639
                         "Use 'gluster volume quota <VOLNAME> "
1640
                         "disable' instead.",
1641
                         key);
1642
                ret = -1;
1643
                goto out;
1644
            }
1645
            ALL_VOLUME_OPTION_CHECK(volname, _gf_false, key, ret, op_errstr,
1646
                                    out);
1647
        }
1648
    }
1649

1650
out:
1651
    GF_FREE(key_fixed);
1652

1653
    if (msg[0] != '\0') {
1654
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_RESET_VOL_FAIL,
1655
               "%s", msg);
1656
        *op_errstr = gf_strdup(msg);
1657
    }
1658

1659
    gf_msg_debug(this->name, 0, "Returning %d", ret);
1660

1661
    return ret;
1662
}
1663

1664
static int
1665
glusterd_op_stage_sync_volume(dict_t *dict, char **op_errstr)
1666
{
1667
    int ret = -1;
1668
    char *volname = NULL;
1669
    char *hostname = NULL;
1670
    glusterd_peerinfo_t *peerinfo = NULL;
1671
    char msg[2048] = {
1672
        0,
1673
    };
1674
    glusterd_volinfo_t *volinfo = NULL;
1675
    xlator_t *this = THIS;
1676

1677
    ret = dict_get_str(dict, "hostname", &hostname);
1678
    if (ret) {
1679
        snprintf(msg, sizeof(msg),
1680
                 "hostname couldn't be "
1681
                 "retrieved from msg");
1682
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1683
                "Key=hostname", NULL);
1684
        *op_errstr = gf_strdup(msg);
1685
        goto out;
1686
    }
1687

1688
    if (glusterd_gf_is_local_addr(hostname)) {
1689
        // volname is not present in case of sync all
1690
        ret = dict_get_str(dict, "volname", &volname);
1691
        if (!ret) {
1692
            ret = glusterd_volinfo_find(volname, &volinfo);
1693
            if (ret) {
1694
                snprintf(msg, sizeof(msg),
1695
                         "Volume %s "
1696
                         "does not exist",
1697
                         volname);
1698
                gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_FOUND,
1699
                        "Volume=%s", volname, NULL);
1700
                *op_errstr = gf_strdup(msg);
1701
                goto out;
1702
            }
1703
        }
1704
    } else {
1705
        RCU_READ_LOCK;
1706

1707
        peerinfo = glusterd_peerinfo_find(NULL, hostname);
1708
        if (peerinfo == NULL) {
1709
            RCU_READ_UNLOCK;
1710
            ret = -1;
1711
            snprintf(msg, sizeof(msg), "%s, is not a friend", hostname);
1712
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_NOT_FOUND,
1713
                    "Peer_name=%s", hostname, NULL);
1714
            *op_errstr = gf_strdup(msg);
1715
            goto out;
1716

1717
        } else if (!peerinfo->connected) {
1718
            RCU_READ_UNLOCK;
1719
            ret = -1;
1720
            snprintf(msg, sizeof(msg),
1721
                     "%s, is not connected at "
1722
                     "the moment",
1723
                     hostname);
1724
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_DISCONNECTED,
1725
                    "Peer_name=%s", hostname, NULL);
1726
            *op_errstr = gf_strdup(msg);
1727
            goto out;
1728
        }
1729

1730
        RCU_READ_UNLOCK;
1731
    }
1732

1733
out:
1734
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
1735

1736
    return ret;
1737
}
1738

1739
static int
1740
glusterd_op_stage_status_volume(dict_t *dict, char **op_errstr)
1741
{
1742
    int ret = -1;
1743
    uint32_t cmd = 0;
1744
    char msg[2048] = {
1745
        0,
1746
    };
1747
    char *volname = NULL;
1748
    char *brick = NULL;
1749
    xlator_t *this = THIS;
1750
    glusterd_conf_t *priv = NULL;
1751
    glusterd_brickinfo_t *brickinfo = NULL;
1752
    glusterd_volinfo_t *volinfo = NULL;
1753
    dict_t *vol_opts = NULL;
1754
#ifdef BUILD_GNFS
1755
    gf_boolean_t nfs_disabled = _gf_false;
1756
#endif
1757
    gf_boolean_t shd_enabled = _gf_false;
1758

1759
    GF_ASSERT(dict);
1760
    priv = this->private;
1761
    GF_ASSERT(priv);
1762

1763
    ret = dict_get_uint32(dict, "cmd", &cmd);
1764
    if (ret) {
1765
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
1766
                "Key=cmd", NULL);
1767
        goto out;
1768
    }
1769

1770
    if (cmd & GF_CLI_STATUS_ALL)
1771
        goto out;
1772

1773
    if ((cmd & GF_CLI_STATUS_QUOTAD) &&
1774
        (priv->op_version == GD_OP_VERSION_MIN)) {
1775
        snprintf(msg, sizeof(msg),
1776
                 "The cluster is operating at "
1777
                 "version 1. Getting the status of quotad is not "
1778
                 "allowed in this state.");
1779
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_GET_STAT_FAIL,
1780
                msg, NULL);
1781
        ret = -1;
1782
        goto out;
1783
    }
1784

1785
    ret = dict_get_str(dict, "volname", &volname);
1786
    if (ret) {
1787
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1788
               "Unable to get volume name");
1789
        goto out;
1790
    }
1791

1792
    ret = glusterd_volinfo_find(volname, &volinfo);
1793
    if (ret) {
1794
        snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
1795
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOLINFO_GET_FAIL,
1796
                "Volume=%s", volname, NULL);
1797
        ret = -1;
1798
        goto out;
1799
    }
1800

1801
    ret = glusterd_validate_volume_id(dict, volinfo);
1802
    if (ret) {
1803
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VALIDATE_FAILED, NULL);
1804
        goto out;
1805
    }
1806

1807
    ret = glusterd_is_volume_started(volinfo);
1808
    if (!ret) {
1809
        snprintf(msg, sizeof(msg), "Volume %s is not started", volname);
1810
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_NOT_STARTED,
1811
                "Volume=%s", volname, NULL);
1812
        ret = -1;
1813
        goto out;
1814
    }
1815

1816
    vol_opts = volinfo->dict;
1817

1818
    if ((cmd & GF_CLI_STATUS_SHD) != 0) {
1819
        if (glusterd_is_shd_compatible_volume(volinfo)) {
1820
            shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
1821
        } else {
1822
            ret = -1;
1823
            snprintf(msg, sizeof(msg), "Volume %s is not Self-heal compatible",
1824
                     volname);
1825
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_VOL_SHD_NOT_COMP,
1826
                    "Volume=%s", volname, NULL);
1827
            goto out;
1828
        }
1829
        if (!shd_enabled) {
1830
            ret = -1;
1831
            snprintf(msg, sizeof(msg),
1832
                     "Self-heal Daemon is disabled for volume %s", volname);
1833
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SELF_HEALD_DISABLED,
1834
                    "Volume=%s", volname, NULL);
1835
            goto out;
1836
        }
1837
#ifdef BUILD_GNFS
1838
    } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
1839
        nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
1840
                                            _gf_false);
1841
        if (nfs_disabled) {
1842
            ret = -1;
1843
            snprintf(msg, sizeof(msg), "NFS server is disabled for volume %s",
1844
                     volname);
1845
            gf_smsg(this->name, GF_LOG_ERROR, errno,
1846
                    GD_MSG_NFS_GANESHA_DISABLED, "Volume=%s", volname, NULL);
1847
            goto out;
1848
        }
1849
#endif
1850
    } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
1851
        if (!glusterd_is_volume_quota_enabled(volinfo)) {
1852
            ret = -1;
1853
            snprintf(msg, sizeof(msg),
1854
                     "Volume %s does not have "
1855
                     "quota enabled",
1856
                     volname);
1857
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_QUOTA_DISABLED,
1858
                    "Volume=%s", volname, NULL);
1859
            goto out;
1860
        }
1861
    } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
1862
        if (!glusterd_is_bitrot_enabled(volinfo)) {
1863
            ret = -1;
1864
            snprintf(msg, sizeof(msg),
1865
                     "Volume %s does not have "
1866
                     "bitrot enabled",
1867
                     volname);
1868
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
1869
                    "Volume=%s", volname, NULL);
1870
            goto out;
1871
        }
1872
    } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
1873
        if (!glusterd_is_bitrot_enabled(volinfo)) {
1874
            ret = -1;
1875
            snprintf(msg, sizeof(msg),
1876
                     "Volume %s does not have "
1877
                     "bitrot enabled. Scrubber will be enabled "
1878
                     "automatically if bitrot is enabled",
1879
                     volname);
1880
            gf_smsg(
1881
                this->name, GF_LOG_ERROR, errno, GD_MSG_BITROT_NOT_ENABLED,
1882
                "Scrubber will be enabled automatically if bitrot is enabled",
1883
                "Volume=%s", volname, NULL);
1884
            goto out;
1885
        }
1886
    } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
1887
        if (!glusterd_is_snapd_enabled(volinfo)) {
1888
            ret = -1;
1889
            snprintf(msg, sizeof(msg),
1890
                     "Volume %s does not have "
1891
                     "uss enabled",
1892
                     volname);
1893
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_SNAPD_NOT_RUNNING,
1894
                    "Volume=%s", volname, NULL);
1895
            goto out;
1896
        }
1897
    } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
1898
        ret = dict_get_str(dict, "brick", &brick);
1899
        if (ret) {
1900
            gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
1901
                    "Key=brick", NULL);
1902
            goto out;
1903
        }
1904

1905
        ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
1906
                                                     _gf_false);
1907
        if (ret) {
1908
            snprintf(msg, sizeof(msg),
1909
                     "No brick %s in"
1910
                     " volume %s",
1911
                     brick, volname);
1912
            gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_BRICK_NOT_FOUND,
1913
                    "Brick=%s, Volume=%s", brick, volname, NULL);
1914
            ret = -1;
1915
            goto out;
1916
        }
1917
    }
1918

1919
    ret = 0;
1920

1921
out:
1922
    if (ret) {
1923
        if (msg[0] != '\0')
1924
            *op_errstr = gf_strdup(msg);
1925
        else
1926
            *op_errstr = gf_strdup("Validation Failed for Status");
1927
    }
1928

1929
    gf_msg_debug(this->name, 0, "Returning: %d", ret);
1930
    return ret;
1931
}
1932

1933
int
1934
glusterd_op_stage_stats_volume(dict_t *dict, char **op_errstr)
1935
{
1936
    int ret = -1;
1937
    char *volname = NULL;
1938
    char msg[2048] = {
1939
        0,
1940
    };
1941
    int32_t stats_op = GF_CLI_STATS_NONE;
1942
    glusterd_volinfo_t *volinfo = NULL;
1943

1944
    ret = dict_get_str(dict, "volname", &volname);
1945
    if (ret) {
1946
        snprintf(msg, sizeof(msg), "Volume name get failed");
1947
        goto out;
1948
    }
1949

1950
    ret = glusterd_volinfo_find(volname, &volinfo);
1951
    if (ret) {
1952
        snprintf(msg, sizeof(msg),
1953
                 "Volume %s, "
1954
                 "doesn't exist",
1955
                 volname);
1956
        goto out;
1957
    }
1958

1959
    ret = glusterd_validate_volume_id(dict, volinfo);
1960
    if (ret)
1961
        goto out;
1962

1963
    ret = dict_get_int32(dict, "op", &stats_op);
1964
    if (ret) {
1965
        snprintf(msg, sizeof(msg), "Volume profile op get failed");
1966
        goto out;
1967
    }
1968

1969
    if (GF_CLI_STATS_START == stats_op) {
1970
        if (_gf_true == glusterd_is_profile_on(volinfo)) {
1971
            snprintf(msg, sizeof(msg),
1972
                     "Profile on Volume %s is"
1973
                     " already started",
1974
                     volinfo->volname);
1975
            ret = -1;
1976
            goto out;
1977
        }
1978
    } else if ((GF_CLI_STATS_STOP == stats_op) ||
1979
               (GF_CLI_STATS_INFO == stats_op)) {
1980
        if (_gf_false == glusterd_is_profile_on(volinfo)) {
1981
            snprintf(msg, sizeof(msg),
1982
                     "Profile on Volume %s is"
1983
                     " not started",
1984
                     volinfo->volname);
1985
            ret = -1;
1986

1987
            goto out;
1988
        }
1989
    }
1990
    if ((GF_CLI_STATS_TOP == stats_op) || (GF_CLI_STATS_INFO == stats_op)) {
1991
        if (_gf_false == glusterd_is_volume_started(volinfo)) {
1992
            snprintf(msg, sizeof(msg), "Volume %s is not started.",
1993
                     volinfo->volname);
1994
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_STARTED, "%s",
1995
                   msg);
1996
            ret = -1;
1997
            goto out;
1998
        }
1999
    }
2000
    ret = 0;
2001
out:
2002
    if (msg[0] != '\0') {
2003
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_OP_STAGE_STATS_VOL_FAIL,
2004
               "%s", msg);
2005
        *op_errstr = gf_strdup(msg);
2006
    }
2007
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
2008
    return ret;
2009
}
2010

2011
static int
2012
_delete_reconfig_opt(dict_t *this, char *key, data_t *value, void *data)
2013
{
2014
    int32_t *is_force = 0;
2015

2016
    GF_ASSERT(data);
2017
    is_force = (int32_t *)data;
2018

2019
    /* Keys which has the flag VOLOPT_FLAG_NEVER_RESET
2020
     * should not be deleted
2021
     */
2022

2023
    if (_gf_true ==
2024
        glusterd_check_voloption_flags(key, VOLOPT_FLAG_NEVER_RESET)) {
2025
        if (*is_force != 1)
2026
            *is_force = *is_force | GD_OP_PROTECTED;
2027
        goto out;
2028
    }
2029

2030
    if (*is_force != 1) {
2031
        if (_gf_true ==
2032
            glusterd_check_voloption_flags(key, VOLOPT_FLAG_FORCE)) {
2033
            /* indicate to caller that we don't set the option
2034
             * due to being protected
2035
             */
2036
            *is_force = *is_force | GD_OP_PROTECTED;
2037
            goto out;
2038
        } else {
2039
            *is_force = *is_force | GD_OP_UNPROTECTED;
2040
        }
2041
    }
2042

2043
    gf_msg_debug("glusterd", 0, "deleting dict with key=%s,value=%s", key,
2044
                 value->data);
2045
    dict_del(this, key);
2046
    /**Delete scrubber (pause/resume) option from the dictionary if bitrot
2047
     * option is going to be reset
2048
     * */
2049
    if (!strncmp(key, VKEY_FEATURES_BITROT, strlen(VKEY_FEATURES_BITROT))) {
2050
        dict_del_sizen(this, VKEY_FEATURES_SCRUB);
2051
    }
2052
out:
2053
    return 0;
2054
}
2055

2056
static int
2057
_delete_reconfig_global_opt(dict_t *this, char *key, data_t *value, void *data)
2058
{
2059
    GF_ASSERT(data);
2060

2061
    if (strcmp(GLUSTERD_GLOBAL_OPT_VERSION, key) == 0)
2062
        goto out;
2063

2064
    _delete_reconfig_opt(this, key, value, data);
2065
out:
2066
    return 0;
2067
}
2068

2069
static int
2070
glusterd_options_reset(glusterd_volinfo_t *volinfo, char *key,
2071
                       int32_t *is_force)
2072
{
2073
    int ret = 0;
2074
    data_t *value = NULL;
2075
    char *key_fixed = NULL;
2076
    xlator_t *this = THIS;
2077
    glusterd_svc_t *svc = NULL;
2078

2079
    GF_ASSERT(volinfo->dict);
2080
    GF_ASSERT(key);
2081

2082
    if (!strncmp(key, "all", 3)) {
2083
        dict_foreach(volinfo->dict, _delete_reconfig_opt, is_force);
2084
        ret = glusterd_enable_default_options(volinfo, NULL);
2085
        if (ret) {
2086
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAIL_DEFAULT_OPT_SET,
2087
                   "Failed to set "
2088
                   "default options on reset for volume %s",
2089
                   volinfo->volname);
2090
            goto out;
2091
        }
2092
    } else {
2093
        value = dict_get(volinfo->dict, key);
2094
        if (!value) {
2095
            gf_msg_debug(this->name, 0, "no value set for option %s", key);
2096
            goto out;
2097
        }
2098
        _delete_reconfig_opt(volinfo->dict, key, value, is_force);
2099
        ret = glusterd_enable_default_options(volinfo, key);
2100
        if (ret) {
2101
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_FAIL_DEFAULT_OPT_SET,
2102
                   "Failed to set "
2103
                   "default value for option '%s' on reset for "
2104
                   "volume %s",
2105
                   key, volinfo->volname);
2106
            goto out;
2107
        }
2108
    }
2109

2110
    gd_update_volume_op_versions(volinfo);
2111
    if (!volinfo->is_snap_volume) {
2112
        svc = &(volinfo->snapd.svc);
2113
        ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
2114
        if (ret)
2115
            goto out;
2116
    }
2117
    svc = &(volinfo->gfproxyd.svc);
2118
    ret = svc->reconfigure(volinfo);
2119
    if (ret)
2120
        goto out;
2121

2122
    svc = &(volinfo->shd.svc);
2123
    ret = svc->reconfigure(volinfo);
2124
    if (ret)
2125
        goto out;
2126

2127
    ret = glusterd_create_volfiles_and_notify_services(volinfo);
2128
    if (ret) {
2129
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
2130
               "Unable to create volfile for"
2131
               " 'volume reset'");
2132
        ret = -1;
2133
        goto out;
2134
    }
2135

2136
    ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
2137
    if (ret)
2138
        goto out;
2139

2140
    if (GLUSTERD_STATUS_STARTED == volinfo->status) {
2141
        ret = glusterd_svcs_reconfigure(volinfo);
2142
        if (ret)
2143
            goto out;
2144
    }
2145

2146
    ret = 0;
2147

2148
out:
2149
    GF_FREE(key_fixed);
2150
    gf_msg_debug(this->name, 0, "Returning %d", ret);
2151
    return ret;
2152
}
2153

2154
static int
2155
glusterd_op_reset_all_volume_options(xlator_t *this, dict_t *dict)
2156
{
2157
    char *key = NULL;
2158
    char *key_fixed = NULL;
2159
    int ret = -1;
2160
    int32_t is_force = 0;
2161
    glusterd_conf_t *conf = NULL;
2162
    dict_t *dup_opt = NULL;
2163
    gf_boolean_t all = _gf_false;
2164
    char *next_version = NULL;
2165
    gf_boolean_t quorum_action = _gf_false;
2166

2167
    conf = this->private;
2168
    ret = dict_get_str(dict, "key", &key);
2169
    if (ret) {
2170
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2171
               "Failed to get key");
2172
        goto out;
2173
    }
2174

2175
    ret = dict_get_int32(dict, "force", &is_force);
2176
    if (ret)
2177
        is_force = 0;
2178

2179
    if (strcmp(key, "all")) {
2180
        ret = glusterd_check_option_exists(key, &key_fixed);
2181
        if (ret <= 0) {
2182
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
2183
                   "Option %s does not "
2184
                   "exist",
2185
                   key);
2186
            ret = -1;
2187
            goto out;
2188
        }
2189
    } else {
2190
        all = _gf_true;
2191
    }
2192

2193
    if (key_fixed)
2194
        key = key_fixed;
2195

2196
    ret = -1;
2197
    dup_opt = dict_new();
2198
    if (!dup_opt) {
2199
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
2200
        goto out;
2201
    }
2202
    if (!all) {
2203
        dict_copy(conf->opts, dup_opt);
2204
        dict_del(dup_opt, key);
2205
    }
2206
    ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
2207
    if (ret)
2208
        goto out;
2209

2210
    ret = dict_set_str_sizen(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
2211
                             next_version);
2212
    if (ret) {
2213
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2214
                "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2215
        goto out;
2216
    }
2217

2218
    ret = glusterd_store_options(this, dup_opt);
2219
    if (ret)
2220
        goto out;
2221

2222
    if (glusterd_is_quorum_changed(conf->opts, key, NULL))
2223
        quorum_action = _gf_true;
2224

2225
    ret = dict_set_dynstr_sizen(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
2226
                                next_version);
2227
    if (ret) {
2228
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2229
                "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2230
        goto out;
2231
    } else
2232
        next_version = NULL;
2233

2234
    if (!all) {
2235
        dict_del(conf->opts, key);
2236
    } else {
2237
        dict_foreach(conf->opts, _delete_reconfig_global_opt, &is_force);
2238
    }
2239
out:
2240
    GF_FREE(key_fixed);
2241
    if (dup_opt)
2242
        dict_unref(dup_opt);
2243

2244
    gf_msg_debug(this->name, 0, "returning %d", ret);
2245
    if (quorum_action)
2246
        glusterd_do_quorum_action();
2247
    GF_FREE(next_version);
2248
    return ret;
2249
}
2250

2251
static int
2252
glusterd_op_reset_volume(dict_t *dict, char **op_rspstr)
2253
{
2254
    glusterd_volinfo_t *volinfo = NULL;
2255
    int ret = -1;
2256
    char *volname = NULL;
2257
    char *key = NULL;
2258
    char *key_fixed = NULL;
2259
    int32_t is_force = 0;
2260
    gf_boolean_t quorum_action = _gf_false;
2261
    xlator_t *this = THIS;
2262

2263
    ret = dict_get_str(dict, "volname", &volname);
2264
    if (ret) {
2265
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2266
               "Unable to get volume name");
2267
        goto out;
2268
    }
2269

2270
    if (strcasecmp(volname, "all") == 0) {
2271
        ret = glusterd_op_reset_all_volume_options(this, dict);
2272
        goto out;
2273
    }
2274

2275
    ret = dict_get_int32(dict, "force", &is_force);
2276
    if (ret)
2277
        is_force = 0;
2278

2279
    ret = dict_get_str(dict, "key", &key);
2280
    if (ret) {
2281
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2282
               "Unable to get option key");
2283
        goto out;
2284
    }
2285

2286
    ret = glusterd_volinfo_find(volname, &volinfo);
2287
    if (ret) {
2288
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
2289
               FMTSTR_CHECK_VOL_EXISTS, volname);
2290
        goto out;
2291
    }
2292

2293
    if (strcmp(key, "all") &&
2294
        glusterd_check_option_exists(key, &key_fixed) != 1) {
2295
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
2296
               "volinfo dict inconsistency: option %s not found", key);
2297
        ret = -1;
2298
        goto out;
2299
    }
2300
    if (key_fixed)
2301
        key = key_fixed;
2302

2303
    if (glusterd_is_quorum_changed(volinfo->dict, key, NULL))
2304
        quorum_action = _gf_true;
2305

2306
    ret = glusterd_options_reset(volinfo, key, &is_force);
2307
    if (ret == -1) {
2308
        gf_asprintf(op_rspstr, "Volume reset : failed");
2309
    } else if (is_force & GD_OP_PROTECTED) {
2310
        if (is_force & GD_OP_UNPROTECTED) {
2311
            gf_asprintf(op_rspstr,
2312
                        "All unprotected fields were"
2313
                        " reset. To reset the protected fields,"
2314
                        " use 'force'.");
2315
        } else {
2316
            ret = -1;
2317
            gf_asprintf(op_rspstr,
2318
                        "'%s' is protected. To reset"
2319
                        " use 'force'.",
2320
                        key);
2321
        }
2322
    }
2323

2324
    if (!strcmp(key, "ganesha.enable") || !strcmp(key, "all")) {
2325
        if (glusterd_check_ganesha_export(volinfo) &&
2326
            is_origin_glusterd(dict)) {
2327
            ret = manage_export_config(volname, "off", op_rspstr);
2328
            if (ret)
2329
                gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_NFS_GNS_RESET_FAIL,
2330
                       "Could not reset ganesha.enable key");
2331
        }
2332
    }
2333

2334
out:
2335
    GF_FREE(key_fixed);
2336
    if (quorum_action)
2337
        glusterd_do_quorum_action();
2338

2339
    gf_msg_debug(this->name, 0, "'volume reset' returning %d", ret);
2340
    return ret;
2341
}
2342

2343
int
2344
glusterd_stop_bricks(glusterd_volinfo_t *volinfo)
2345
{
2346
    glusterd_brickinfo_t *brickinfo = NULL;
2347

2348
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2349
    {
2350
        /*TODO: Need to change @del_brick in brick_stop to _gf_true
2351
         * once we enable synctask in peer rpc prog */
2352
        if (glusterd_brick_stop(volinfo, brickinfo, _gf_false)) {
2353
            gf_event(EVENT_BRICK_STOP_FAILED, "peer=%s;volume=%s;brick=%s",
2354
                     brickinfo->hostname, volinfo->volname, brickinfo->path);
2355
            return -1;
2356
        }
2357
    }
2358

2359
    return 0;
2360
}
2361

2362
int
2363
glusterd_start_bricks(glusterd_volinfo_t *volinfo)
2364

2365
{
2366
    int ret = -1;
2367
    glusterd_brickinfo_t *brickinfo = NULL;
2368

2369
    GF_ASSERT(volinfo);
2370

2371
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
2372
    {
2373
        if (!brickinfo->start_triggered) {
2374
            pthread_mutex_lock(&brickinfo->restart_mutex);
2375
            {
2376
                /* coverity[SLEEP] */
2377
                ret = glusterd_brick_start(volinfo, brickinfo, _gf_false,
2378
                                           _gf_false);
2379
            }
2380
            pthread_mutex_unlock(&brickinfo->restart_mutex);
2381
            if (ret) {
2382
                gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_DISCONNECTED,
2383
                       "Failed to start %s:%s for %s", brickinfo->hostname,
2384
                       brickinfo->path, volinfo->volname);
2385
                gf_event(EVENT_BRICK_START_FAILED, "peer=%s;volume=%s;brick=%s",
2386
                         brickinfo->hostname, volinfo->volname,
2387
                         brickinfo->path);
2388
                goto out;
2389
            }
2390
        }
2391
    }
2392
    ret = 0;
2393
out:
2394
    return ret;
2395
}
2396

2397
static int
2398
glusterd_update_volumes_dict(glusterd_volinfo_t *volinfo)
2399
{
2400
    int ret = -1;
2401
    xlator_t *this = THIS;
2402
    glusterd_conf_t *conf = NULL;
2403
    char *address_family_str = NULL;
2404

2405
    conf = this->private;
2406
    GF_VALIDATE_OR_GOTO(this->name, conf, out);
2407

2408
    /* 3.9.0 onwards gNFS will be disabled by default. In case of an upgrade
2409
     * from anything below than 3.9.0 to 3.9.x the volume's dictionary will
2410
     * not have 'nfs.disable' key set which means the same will not be set
2411
     * to on until explicitly done. setnfs.disable to 'on' at op-version
2412
     * bump up flow is the ideal way here. The same is also applicable for
2413
     * transport.address-family where if the transport type is set to tcp
2414
     * then transport.address-family is defaulted to 'inet'.
2415
     */
2416
    if (conf->op_version >= GD_OP_VERSION_3_9_0) {
2417
        if (dict_get_str_boolean(volinfo->dict, NFS_DISABLE_MAP_KEY, 1)) {
2418
            ret = dict_set_dynstr_with_alloc(volinfo->dict, NFS_DISABLE_MAP_KEY,
2419
                                             "on");
2420
            if (ret) {
2421
                gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2422
                       "Failed to set "
2423
                       "option ' NFS_DISABLE_MAP_KEY ' on "
2424
                       "volume %s",
2425
                       volinfo->volname);
2426
                goto out;
2427
            }
2428
        }
2429
        ret = dict_get_str(volinfo->dict, "transport.address-family",
2430
                           &address_family_str);
2431
        if (ret) {
2432
            if (volinfo->transport_type == GF_TRANSPORT_TCP) {
2433
                ret = dict_set_dynstr_with_alloc(
2434
                    volinfo->dict, "transport.address-family", "inet");
2435
                if (ret) {
2436
                    gf_msg(this->name, GF_LOG_ERROR, -ret,
2437
                           GD_MSG_DICT_SET_FAILED,
2438
                           "failed to set transport."
2439
                           "address-family on %s",
2440
                           volinfo->volname);
2441
                    goto out;
2442
                }
2443
            }
2444
        }
2445
    }
2446
    ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
2447

2448
out:
2449
    return ret;
2450
}
2451

2452
static int
2453
glusterd_set_brick_mx_opts(dict_t *dict, char *key, char *value,
2454
                           char **op_errstr)
2455
{
2456
    int32_t ret = -1;
2457
    xlator_t *this = THIS;
2458
    glusterd_conf_t *priv = NULL;
2459

2460
    GF_VALIDATE_OR_GOTO(this->name, dict, out);
2461
    GF_VALIDATE_OR_GOTO(this->name, key, out);
2462
    GF_VALIDATE_OR_GOTO(this->name, value, out);
2463
    GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
2464

2465
    ret = 0;
2466

2467
    priv = this->private;
2468

2469
    if (!strcmp(key, GLUSTERD_BRICK_MULTIPLEX_KEY)) {
2470
        ret = dict_set_dynstr_sizen(priv->opts, GLUSTERD_BRICK_MULTIPLEX_KEY,
2471
                                    gf_strdup(value));
2472
    }
2473

2474
out:
2475
    return ret;
2476
}
2477

2478
static int
2479
glusterd_set_brick_graceful_cleanup(dict_t *dict, char *key, char *value,
2480
                                    glusterd_conf_t *priv)
2481
{
2482
    int ret = 0;
2483
    char *dup_value = NULL;
2484

2485
    if (!strcmp(key, GLUSTER_BRICK_GRACEFUL_CLEANUP)) {
2486
        dup_value = gf_strdup(value);
2487
        if (!dup_value) {
2488
            ret = -1;
2489
            goto out;
2490
        }
2491
        ret = dict_set_dynstr_sizen(priv->opts, GLUSTER_BRICK_GRACEFUL_CLEANUP,
2492
                                    dup_value);
2493
    }
2494

2495
out:
2496
    if (ret && dup_value)
2497
        GF_FREE(dup_value);
2498
    return ret;
2499
}
2500

2501
/* This is a hack to prevent client-io-threads from being loaded in the graph
2502
 * when the cluster-op-version is bumped up from 3.8.x to 3.13.x. The key is
2503
 * deleted subsequently in glusterd_create_volfiles(). */
2504
static int
2505
glusterd_dict_set_skip_cliot_key(glusterd_volinfo_t *volinfo)
2506
{
2507
    return dict_set_int32_sizen(volinfo->dict, "skip-CLIOT", 1);
2508
}
2509

2510
static int
2511
glusterd_op_set_all_volume_options(xlator_t *this, dict_t *dict,
2512
                                   char **op_errstr)
2513
{
2514
    char *key = NULL;
2515
    char *key_fixed = NULL;
2516
    char *value = NULL;
2517
    char *dup_value = NULL;
2518
    int ret = -1;
2519
    glusterd_conf_t *conf = NULL;
2520
    dict_t *dup_opt = NULL;
2521
    char *next_version = NULL;
2522
    gf_boolean_t quorum_action = _gf_false;
2523
    uint32_t op_version = 0;
2524
    glusterd_volinfo_t *volinfo = NULL;
2525
    glusterd_svc_t *svc = NULL;
2526
    gf_boolean_t svcs_reconfigure = _gf_false;
2527

2528
    conf = this->private;
2529
    ret = dict_get_str(dict, "key1", &key);
2530
    if (ret) {
2531
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
2532
                "Key=key1", NULL);
2533
        goto out;
2534
    }
2535

2536
    ret = dict_get_str(dict, "value1", &value);
2537
    if (ret) {
2538
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2539
               "invalid key,value pair in 'volume set'");
2540
        goto out;
2541
    }
2542

2543
    ret = glusterd_check_option_exists(key, &key_fixed);
2544
    if (ret <= 0) {
2545
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_KEY,
2546
               "Invalid key %s", key);
2547
        ret = -1;
2548
        goto out;
2549
    }
2550

2551
    if (key_fixed)
2552
        key = key_fixed;
2553

2554
    ret = glusterd_set_shared_storage(dict, key, value, op_errstr);
2555
    if (ret) {
2556
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHARED_STRG_SET_FAIL,
2557
               "Failed to set shared storage option");
2558
        goto out;
2559
    }
2560

2561
    ret = glusterd_set_brick_mx_opts(dict, key, value, op_errstr);
2562
    if (ret) {
2563
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_MX_SET_FAIL,
2564
               "Failed to set brick multiplexing option");
2565
        goto out;
2566
    }
2567

2568
    ret = glusterd_set_brick_graceful_cleanup(dict, key, value, conf);
2569
    if (ret) {
2570
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_GRACEFUL_CLEANUP_SET_FAIL,
2571
               "Failed to set brick graceful option");
2572
        goto out;
2573
    }
2574

2575
    /* If the key is cluster.op-version, set conf->op_version to the value
2576
     * if needed and save it.
2577
     */
2578
    if (strcmp(key, GLUSTERD_GLOBAL_OP_VERSION_KEY) == 0) {
2579
        ret = 0;
2580

2581
        ret = gf_string2uint(value, &op_version);
2582
        if (ret)
2583
            goto out;
2584

2585
        if (op_version >= conf->op_version) {
2586
            conf->op_version = op_version;
2587

2588
            /* When a bump up happens, update the quota.conf file
2589
             * as well. This is because, till 3.7 we had a quota
2590
             * conf version v1.1 in quota.conf. When inode-quota
2591
             * feature is introduced, this needs to be changed to
2592
             * v1.2 in quota.conf and 16 bytes uuid in quota.conf
2593
             * needs to be changed to 17 bytes. Look
2594
             * glusterd_store_quota_config for more details.
2595
             */
2596
            cds_list_for_each_entry(volinfo, &conf->volumes, vol_list)
2597
            {
2598
                ret = glusterd_store_quota_config(
2599
                    volinfo, NULL, NULL, GF_QUOTA_OPTION_TYPE_UPGRADE, NULL);
2600
                if (ret)
2601
                    goto out;
2602
                ret = glusterd_update_volumes_dict(volinfo);
2603
                if (ret)
2604
                    goto out;
2605

2606
                if (glusterd_dict_set_skip_cliot_key(volinfo))
2607
                    goto out;
2608

2609
                if (!volinfo->is_snap_volume) {
2610
                    svc = &(volinfo->snapd.svc);
2611
                    ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
2612
                    if (ret)
2613
                        goto out;
2614
                }
2615

2616
                svc = &(volinfo->gfproxyd.svc);
2617
                ret = svc->reconfigure(volinfo);
2618
                if (ret)
2619
                    goto out;
2620

2621
                svc = &(volinfo->shd.svc);
2622
                ret = svc->reconfigure(volinfo);
2623
                if (ret)
2624
                    goto out;
2625

2626
                ret = glusterd_create_volfiles_and_notify_services(volinfo);
2627
                if (ret) {
2628
                    gf_msg(this->name, GF_LOG_ERROR, 0,
2629
                           GD_MSG_VOLFILE_CREATE_FAIL,
2630
                           "Unable to create volfile for"
2631
                           " 'volume set'");
2632
                    goto out;
2633
                }
2634
                if (GLUSTERD_STATUS_STARTED == volinfo->status) {
2635
                    svcs_reconfigure = _gf_true;
2636
                }
2637
            }
2638
            if (svcs_reconfigure) {
2639
                ret = glusterd_svcs_reconfigure(NULL);
2640
                if (ret) {
2641
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
2642
                           "Unable to restart "
2643
                           "services");
2644
                    goto out;
2645
                }
2646
            }
2647

2648
            ret = glusterd_store_global_info(this);
2649
            if (ret) {
2650
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
2651
                       "Failed to store op-version.");
2652
            }
2653
        }
2654
        /* No need to save cluster.op-version in conf->opts
2655
         */
2656
        goto out;
2657
    }
2658
    ret = -1;
2659
    dup_opt = dict_new();
2660
    if (!dup_opt) {
2661
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
2662
        goto out;
2663
    }
2664
    dict_copy(conf->opts, dup_opt);
2665
    ret = dict_set_str(dup_opt, key, value);
2666
    if (ret) {
2667
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2668
                "Key=%s", key, NULL);
2669
        goto out;
2670
    }
2671

2672
    ret = glusterd_get_next_global_opt_version_str(conf->opts, &next_version);
2673
    if (ret)
2674
        goto out;
2675

2676
    ret = dict_set_str_sizen(dup_opt, GLUSTERD_GLOBAL_OPT_VERSION,
2677
                             next_version);
2678
    if (ret) {
2679
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2680
                "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2681
        goto out;
2682
    }
2683

2684
    ret = glusterd_store_options(this, dup_opt);
2685
    if (ret)
2686
        goto out;
2687

2688
    if (glusterd_is_quorum_changed(conf->opts, key, value))
2689
        quorum_action = _gf_true;
2690

2691
    ret = dict_set_dynstr_sizen(conf->opts, GLUSTERD_GLOBAL_OPT_VERSION,
2692
                                next_version);
2693
    if (ret) {
2694
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2695
                "Key=%s", GLUSTERD_GLOBAL_OPT_VERSION, NULL);
2696
        goto out;
2697
    } else
2698
        next_version = NULL;
2699

2700
    dup_value = gf_strdup(value);
2701
    if (!dup_value)
2702
        goto out;
2703

2704
    ret = dict_set_dynstr(conf->opts, key, dup_value);
2705
    if (ret) {
2706
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2707
                "Key=%s", key, NULL);
2708
        goto out;
2709
    } else
2710
        dup_value = NULL; /* Protect the allocation from GF_FREE */
2711

2712
out:
2713
    GF_FREE(dup_value);
2714
    GF_FREE(key_fixed);
2715
    if (dup_opt)
2716
        dict_unref(dup_opt);
2717

2718
    gf_msg_debug(this->name, 0, "returning %d", ret);
2719
    if (quorum_action)
2720
        glusterd_do_quorum_action();
2721
    GF_FREE(next_version);
2722
    return ret;
2723
}
2724

2725
int
2726
glusterd_op_get_max_opversion(char **op_errstr, dict_t *rsp_dict)
2727
{
2728
    int ret = -1;
2729

2730
    GF_VALIDATE_OR_GOTO(THIS->name, rsp_dict, out);
2731

2732
    ret = dict_set_int32_sizen(rsp_dict, "max-opversion", GD_OP_VERSION_MAX);
2733
    if (ret) {
2734
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
2735
               "Setting value for max-opversion to dict failed");
2736
        goto out;
2737
    }
2738

2739
out:
2740
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
2741
    return ret;
2742
}
2743

2744
static int
2745
glusterd_set_shared_storage(dict_t *dict, char *key, char *value,
2746
                            char **op_errstr)
2747
{
2748
    int32_t ret = -1;
2749
    char hooks_args[PATH_MAX] = {
2750
        0,
2751
    };
2752
    char errstr[PATH_MAX] = {
2753
        0,
2754
    };
2755
    xlator_t *this = THIS;
2756
    int32_t len = 0;
2757

2758
    GF_VALIDATE_OR_GOTO(this->name, dict, out);
2759
    GF_VALIDATE_OR_GOTO(this->name, key, out);
2760
    GF_VALIDATE_OR_GOTO(this->name, value, out);
2761
    GF_VALIDATE_OR_GOTO(this->name, op_errstr, out);
2762

2763
    ret = 0;
2764

2765
    if (strcmp(key, GLUSTERD_SHARED_STORAGE_KEY)) {
2766
        goto out;
2767
    }
2768

2769
    /* Re-create the brick path so as to be *
2770
     * able to re-use it                    *
2771
     */
2772
    ret = recursive_rmdir(GLUSTER_SHARED_STORAGE_BRICK_DIR);
2773
    if (ret) {
2774
        snprintf(errstr, PATH_MAX,
2775
                 "Failed to remove shared "
2776
                 "storage brick(%s). "
2777
                 "Reason: %s",
2778
                 GLUSTER_SHARED_STORAGE_BRICK_DIR, strerror(errno));
2779
        gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_DIR_OP_FAILED, "%s",
2780
               errstr);
2781
        ret = -1;
2782
        goto out;
2783
    }
2784

2785
    ret = mkdir_p(GLUSTER_SHARED_STORAGE_BRICK_DIR, 0755, _gf_true);
2786
    if (-1 == ret) {
2787
        snprintf(errstr, PATH_MAX,
2788
                 "Failed to create shared "
2789
                 "storage brick(%s). "
2790
                 "Reason: %s",
2791
                 GLUSTER_SHARED_STORAGE_BRICK_DIR, strerror(errno));
2792
        gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, "%s",
2793
               errstr);
2794
        goto out;
2795
    }
2796

2797
    if (is_origin_glusterd(dict)) {
2798
        len = snprintf(hooks_args, sizeof(hooks_args),
2799
                       "is_originator=1,local_node_hostname=%s",
2800
                       local_node_hostname);
2801
    } else {
2802
        len = snprintf(hooks_args, sizeof(hooks_args),
2803
                       "is_originator=0,local_node_hostname=%s",
2804
                       local_node_hostname);
2805
    }
2806
    if ((len < 0) || (len >= sizeof(hooks_args))) {
2807
        ret = -1;
2808
        goto out;
2809
    }
2810

2811
    ret = dict_set_dynstr_with_alloc(dict, "hooks_args", hooks_args);
2812
    if (ret) {
2813
        gf_msg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
2814
               "Failed to set"
2815
               " hooks_args in dict.");
2816
        goto out;
2817
    }
2818

2819
out:
2820
    if (ret && strlen(errstr)) {
2821
        *op_errstr = gf_strdup(errstr);
2822
    }
2823

2824
    return ret;
2825
}
2826

2827
static int
2828
glusterd_op_set_volume(dict_t *dict, char **errstr)
2829
{
2830
    int ret = 0;
2831
    glusterd_volinfo_t *volinfo = NULL;
2832
    char *volname = NULL;
2833
    xlator_t *this = THIS;
2834
    glusterd_conf_t *priv = NULL;
2835
    int count = 1;
2836
    char *key = NULL;
2837
    char *key_fixed = NULL;
2838
    char *value = NULL;
2839
    char keystr[50] = {
2840
        0,
2841
    };
2842
    int keylen;
2843
    gf_boolean_t global_opt = _gf_false;
2844
    gf_boolean_t global_opts_set = _gf_false;
2845
    glusterd_volinfo_t *voliter = NULL;
2846
    int32_t dict_count = 0;
2847
    gf_boolean_t check_op_version = _gf_false;
2848
    uint32_t new_op_version = 0;
2849
    gf_boolean_t quorum_action = _gf_false;
2850
    glusterd_svc_t *svc = NULL;
2851
    dict_t *volinfo_dict_orig = NULL;
2852

2853
    priv = this->private;
2854
    GF_ASSERT(priv);
2855

2856
    volinfo_dict_orig = dict_new();
2857
    if (!volinfo_dict_orig)
2858
        goto out;
2859

2860
    ret = dict_get_int32(dict, "count", &dict_count);
2861
    if (ret) {
2862
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2863
               "Count(dict),not set in Volume-Set");
2864
        goto out;
2865
    }
2866

2867
    if (dict_count == 0) {
2868
        ret = glusterd_volset_help(NULL, errstr);
2869
        goto out;
2870
    }
2871

2872
    ret = dict_get_str(dict, "volname", &volname);
2873
    if (ret) {
2874
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2875
               "Unable to get volume name");
2876
        goto out;
2877
    }
2878

2879
    if (strcasecmp(volname, "all") == 0) {
2880
        ret = glusterd_op_set_all_volume_options(this, dict, errstr);
2881
        goto out;
2882
    }
2883

2884
    ret = glusterd_volinfo_find(volname, &volinfo);
2885
    if (ret) {
2886
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
2887
               FMTSTR_CHECK_VOL_EXISTS, volname);
2888
        goto out;
2889
    }
2890

2891
    if (dict_copy(volinfo->dict, volinfo_dict_orig) == NULL) {
2892
        ret = -ENOMEM;
2893
        goto out;
2894
    }
2895

2896
    /* TODO: Remove this once v3.3 compatibility is not required */
2897
    check_op_version = dict_get_str_boolean(dict, "check-op-version",
2898
                                            _gf_false);
2899

2900
    if (check_op_version) {
2901
        ret = dict_get_uint32(dict, "new-op-version", &new_op_version);
2902
        if (ret) {
2903
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2904
                   "Unable to get new op-version from dict");
2905
            goto out;
2906
        }
2907
    }
2908

2909
    for (count = 1; ret != -1; count++) {
2910
        keylen = snprintf(keystr, sizeof(keystr), "key%d", count);
2911
        ret = dict_get_strn(dict, keystr, keylen, &key);
2912
        if (ret)
2913
            break;
2914

2915
        keylen = snprintf(keystr, sizeof(keystr), "value%d", count);
2916
        ret = dict_get_strn(dict, keystr, keylen, &value);
2917
        if (ret) {
2918
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
2919
                   "invalid key,value pair in 'volume set'");
2920
            ret = -1;
2921
            goto out;
2922
        }
2923

2924
        if (strcmp(key, "config.memory-accounting") == 0) {
2925
            ret = gf_string2boolean(value, &volinfo->memory_accounting);
2926
            if (ret == -1) {
2927
                gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_INVALID_ENTRY,
2928
                       "Invalid value in key-value pair.");
2929
                goto out;
2930
            }
2931
        }
2932

2933
        if (strcmp(key, "config.transport") == 0) {
2934
            gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_VOL_TRANSPORT_TYPE_CHANGE,
2935
                   "changing transport-type for volume %s to %s", volname,
2936
                   value);
2937
            ret = 0;
2938
            if (strcasecmp(value, "rdma") == 0) {
2939
                volinfo->transport_type = GF_TRANSPORT_RDMA;
2940
            } else if (strcasecmp(value, "tcp") == 0) {
2941
                volinfo->transport_type = GF_TRANSPORT_TCP;
2942
            } else if ((strcasecmp(value, "tcp,rdma") == 0) ||
2943
                       (strcasecmp(value, "rdma,tcp") == 0)) {
2944
                volinfo->transport_type = GF_TRANSPORT_BOTH_TCP_RDMA;
2945
            } else {
2946
                ret = -1;
2947
                goto out;
2948
            }
2949
        }
2950

2951
        ret = glusterd_check_ganesha_cmd(key, value, errstr, dict);
2952
        if (ret == -1)
2953
            goto out;
2954

2955
        if (!is_key_glusterd_hooks_friendly(key)) {
2956
            ret = glusterd_check_option_exists(key, &key_fixed);
2957
            GF_ASSERT(ret);
2958
            if (ret <= 0) {
2959
                key_fixed = NULL;
2960
                goto out;
2961
            }
2962
        }
2963

2964
        global_opt = _gf_false;
2965
        if (glusterd_check_globaloption(key)) {
2966
            global_opt = _gf_true;
2967
            global_opts_set = _gf_true;
2968
        }
2969

2970
        if (!global_opt)
2971
            value = gf_strdup(value);
2972

2973
        if (!value) {
2974
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_SET_FAIL,
2975
                   "Unable to set the options in 'volume set'");
2976
            ret = -1;
2977
            goto out;
2978
        }
2979

2980
        if (key_fixed)
2981
            key = key_fixed;
2982

2983
        if (glusterd_is_quorum_changed(volinfo->dict, key, value))
2984
            quorum_action = _gf_true;
2985

2986
        if (global_opt) {
2987
            cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
2988
            {
2989
                value = gf_strdup(value);
2990
                ret = dict_set_dynstr(voliter->dict, key, value);
2991
                if (ret)
2992
                    goto out;
2993
            }
2994
        } else {
2995
            ret = dict_set_dynstr(volinfo->dict, key, value);
2996
            if (ret)
2997
                goto out;
2998
        }
2999

3000
        if (key_fixed) {
3001
            GF_FREE(key_fixed);
3002
            key_fixed = NULL;
3003
        }
3004
    }
3005

3006
    if (count == 1) {
3007
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
3008
               "No options received ");
3009
        ret = -1;
3010
        goto out;
3011
    }
3012

3013
    /* Update the cluster op-version before regenerating volfiles so that
3014
     * correct volfiles are generated
3015
     */
3016
    if (new_op_version > priv->op_version) {
3017
        priv->op_version = new_op_version;
3018
        ret = glusterd_store_global_info(this);
3019
        if (ret) {
3020
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_VERS_STORE_FAIL,
3021
                   "Failed to store op-version");
3022
            goto out;
3023
        }
3024
    }
3025
    if (!global_opts_set) {
3026
        gd_update_volume_op_versions(volinfo);
3027

3028
        if (!volinfo->is_snap_volume) {
3029
            svc = &(volinfo->snapd.svc);
3030
            ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
3031
            if (ret)
3032
                goto out;
3033
        }
3034
        svc = &(volinfo->gfproxyd.svc);
3035
        ret = svc->reconfigure(volinfo);
3036
        if (ret)
3037
            goto out;
3038

3039
        svc = &(volinfo->shd.svc);
3040
        ret = svc->reconfigure(volinfo);
3041
        if (ret)
3042
            goto out;
3043

3044
        ret = glusterd_create_volfiles_and_notify_services(volinfo);
3045
        if (ret) {
3046
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
3047
                   "Unable to create volfile for"
3048
                   " 'volume set'");
3049
            ret = -1;
3050
            goto out;
3051
        }
3052

3053
        ret = glusterd_store_volinfo(volinfo,
3054
                                     GLUSTERD_VOLINFO_VER_AC_INCREMENT);
3055
        if (ret)
3056
            goto out;
3057

3058
        if (GLUSTERD_STATUS_STARTED == volinfo->status) {
3059
            ret = glusterd_svcs_reconfigure(volinfo);
3060
            if (ret) {
3061
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
3062
                       "Unable to restart services");
3063
                goto out;
3064
            }
3065
        }
3066

3067
    } else {
3068
        cds_list_for_each_entry(voliter, &priv->volumes, vol_list)
3069
        {
3070
            volinfo = voliter;
3071
            gd_update_volume_op_versions(volinfo);
3072

3073
            if (!volinfo->is_snap_volume) {
3074
                svc = &(volinfo->snapd.svc);
3075
                ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT);
3076
                if (ret)
3077
                    goto out;
3078
            }
3079

3080
            svc = &(volinfo->gfproxyd.svc);
3081
            ret = svc->reconfigure(volinfo);
3082
            if (ret)
3083
                goto out;
3084

3085
            svc = &(volinfo->shd.svc);
3086
            ret = svc->reconfigure(volinfo);
3087
            if (ret)
3088
                goto out;
3089

3090
            ret = glusterd_create_volfiles_and_notify_services(volinfo);
3091
            if (ret) {
3092
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
3093
                       "Unable to create volfile for"
3094
                       " 'volume set'");
3095
                ret = -1;
3096
                goto out;
3097
            }
3098

3099
            ret = glusterd_store_volinfo(volinfo,
3100
                                         GLUSTERD_VOLINFO_VER_AC_INCREMENT);
3101
            if (ret)
3102
                goto out;
3103

3104
            if (GLUSTERD_STATUS_STARTED == volinfo->status) {
3105
                ret = glusterd_svcs_reconfigure(volinfo);
3106
                if (ret) {
3107
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SVC_RESTART_FAIL,
3108
                           "Unable to restart services");
3109
                    goto out;
3110
                }
3111
            }
3112
        }
3113
    }
3114

3115
out:
3116
    GF_FREE(key_fixed);
3117
    gf_msg_debug(this->name, 0, "returning %d", ret);
3118
    if (quorum_action)
3119
        glusterd_do_quorum_action();
3120
    if (ret < 0 && count > 1) {
3121
        if (dict_reset(volinfo->dict) == 0)
3122
            dict_copy(volinfo_dict_orig, volinfo->dict);
3123
    }
3124
    if (volinfo_dict_orig)
3125
        dict_unref(volinfo_dict_orig);
3126
    return ret;
3127
}
3128

3129
static int
3130
glusterd_op_sync_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
3131
{
3132
    int ret = -1;
3133
    char *volname = NULL;
3134
    char *hostname = NULL;
3135
    char msg[2048] = {
3136
        0,
3137
    };
3138
    int count = 1;
3139
    int vol_count = 0;
3140
    glusterd_conf_t *priv = NULL;
3141
    glusterd_volinfo_t *volinfo = NULL;
3142
    xlator_t *this = THIS;
3143

3144
    priv = this->private;
3145
    GF_ASSERT(priv);
3146

3147
    ret = dict_get_str(dict, "hostname", &hostname);
3148
    if (ret) {
3149
        snprintf(msg, sizeof(msg),
3150
                 "hostname couldn't be "
3151
                 "retrieved from msg");
3152
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
3153
                "Key=hostname", NULL);
3154
        *op_errstr = gf_strdup(msg);
3155
        goto out;
3156
    }
3157

3158
    if (!glusterd_gf_is_local_addr(hostname)) {
3159
        ret = 0;
3160
        goto out;
3161
    }
3162

3163
    // volname is not present in case of sync all
3164
    ret = dict_get_str(dict, "volname", &volname);
3165
    if (!ret) {
3166
        ret = glusterd_volinfo_find(volname, &volinfo);
3167
        if (ret) {
3168
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
3169
                   "Volume with name: %s "
3170
                   "not exists",
3171
                   volname);
3172
            goto out;
3173
        }
3174
    }
3175

3176
    if (!rsp_dict) {
3177
        // this should happen only on source
3178
        gf_smsg(this->name, GF_LOG_INFO, errno, GD_MSG_INVALID_ARGUMENT, NULL);
3179
        ret = 0;
3180
        goto out;
3181
    }
3182

3183
    if (volname) {
3184
        ret = glusterd_add_volume_to_dict(volinfo, rsp_dict, 1, "volume");
3185
        if (ret)
3186
            goto out;
3187
        vol_count = 1;
3188
    } else {
3189
        cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
3190
        {
3191
            ret = glusterd_add_volume_to_dict(volinfo, rsp_dict, count,
3192
                                              "volume");
3193
            if (ret)
3194
                goto out;
3195

3196
            vol_count = count++;
3197
        }
3198
    }
3199
    ret = dict_set_int32_sizen(rsp_dict, "count", vol_count);
3200

3201
out:
3202
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
3203

3204
    return ret;
3205
}
3206

3207
static int
3208
glusterd_add_profile_volume_options(glusterd_volinfo_t *volinfo)
3209
{
3210
    int ret = -1;
3211

3212
    GF_ASSERT(volinfo);
3213

3214
    ret = dict_set_nstrn(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT,
3215
                         SLEN(VKEY_DIAG_LAT_MEASUREMENT), "on", SLEN("on"));
3216
    if (ret) {
3217
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3218
               "failed to set the volume %s "
3219
               "option %s value %s",
3220
               volinfo->volname, VKEY_DIAG_LAT_MEASUREMENT, "on");
3221
        goto out;
3222
    }
3223

3224
    ret = dict_set_nstrn(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS,
3225
                         SLEN(VKEY_DIAG_CNT_FOP_HITS), "on", SLEN("on"));
3226
    if (ret) {
3227
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3228
               "failed to set the volume %s "
3229
               "option %s value %s",
3230
               volinfo->volname, VKEY_DIAG_CNT_FOP_HITS, "on");
3231
        goto out;
3232
    }
3233
out:
3234
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
3235
    return ret;
3236
}
3237

3238
static void
3239
glusterd_remove_profile_volume_options(glusterd_volinfo_t *volinfo)
3240
{
3241
    GF_ASSERT(volinfo);
3242

3243
    dict_del_sizen(volinfo->dict, VKEY_DIAG_LAT_MEASUREMENT);
3244
    dict_del_sizen(volinfo->dict, VKEY_DIAG_CNT_FOP_HITS);
3245
}
3246

3247
int
3248
glusterd_op_stats_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
3249
{
3250
    int ret = -1;
3251
    char *volname = NULL;
3252
    char msg[2048] = {
3253
        0,
3254
    };
3255
    glusterd_volinfo_t *volinfo = NULL;
3256
    int32_t stats_op = GF_CLI_STATS_NONE;
3257

3258
    ret = dict_get_str(dict, "volname", &volname);
3259
    if (ret) {
3260
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3261
               "volume name get failed");
3262
        goto out;
3263
    }
3264

3265
    ret = glusterd_volinfo_find(volname, &volinfo);
3266
    if (ret) {
3267
        snprintf(msg, sizeof(msg), "Volume %s does not exists", volname);
3268

3269
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
3270
        goto out;
3271
    }
3272

3273
    ret = dict_get_int32(dict, "op", &stats_op);
3274
    if (ret) {
3275
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3276
               "volume profile op get failed");
3277
        goto out;
3278
    }
3279

3280
    switch (stats_op) {
3281
        case GF_CLI_STATS_START:
3282
            ret = glusterd_add_profile_volume_options(volinfo);
3283
            if (ret)
3284
                goto out;
3285
            break;
3286
        case GF_CLI_STATS_STOP:
3287
            glusterd_remove_profile_volume_options(volinfo);
3288
            break;
3289
        case GF_CLI_STATS_INFO:
3290
        case GF_CLI_STATS_TOP:
3291
            // info is already collected in brick op.
3292
            // just goto out;
3293
            ret = 0;
3294
            goto out;
3295
            break;
3296
        default:
3297
            GF_ASSERT(0);
3298
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
3299
                   "Invalid profile op: %d", stats_op);
3300
            ret = -1;
3301
            goto out;
3302
            break;
3303
    }
3304
    ret = glusterd_create_volfiles_and_notify_services(volinfo);
3305

3306
    if (ret) {
3307
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL,
3308
               "Unable to create volfile for"
3309
               " 'volume set'");
3310
        ret = -1;
3311
        goto out;
3312
    }
3313

3314
    ret = glusterd_store_volinfo(volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
3315
    if (ret)
3316
        goto out;
3317

3318
    if (GLUSTERD_STATUS_STARTED == volinfo->status) {
3319
        ret = glusterd_svcs_reconfigure(volinfo);
3320
        if (ret)
3321
            goto out;
3322
    }
3323

3324
    ret = 0;
3325

3326
out:
3327
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
3328

3329
    return ret;
3330
}
3331

3332
static int
3333
_add_remove_bricks_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo,
3334
                           char *prefix)
3335
{
3336
    int ret = -1;
3337
    int count = 0;
3338
    int i = 0;
3339
    char brick_key[16] = {
3340
        0,
3341
    };
3342
    char dict_key[64] = {
3343
        /* dict_key is small as prefix is up to 32 chars */
3344
        0,
3345
    };
3346
    int keylen;
3347
    char *brick = NULL;
3348
    xlator_t *this = THIS;
3349

3350
    GF_ASSERT(dict);
3351
    GF_ASSERT(volinfo);
3352
    GF_ASSERT(prefix);
3353

3354
    ret = dict_get_int32(volinfo->rebal.dict, "count", &count);
3355
    if (ret) {
3356
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3357
               "Failed to get brick count");
3358
        goto out;
3359
    }
3360

3361
    keylen = snprintf(dict_key, sizeof(dict_key), "%s.count", prefix);
3362
    ret = dict_set_int32n(dict, dict_key, keylen, count);
3363
    if (ret) {
3364
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3365
               "Failed to set brick count in dict");
3366
        goto out;
3367
    }
3368

3369
    for (i = 1; i <= count; i++) {
3370
        keylen = snprintf(brick_key, sizeof(brick_key), "brick%d", i);
3371

3372
        ret = dict_get_strn(volinfo->rebal.dict, brick_key, keylen, &brick);
3373
        if (ret) {
3374
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
3375
                   "Unable to get %s", brick_key);
3376
            goto out;
3377
        }
3378

3379
        keylen = snprintf(dict_key, sizeof(dict_key), "%s.%s", prefix,
3380
                          brick_key);
3381
        if ((keylen < 0) || (keylen >= sizeof(dict_key))) {
3382
            ret = -1;
3383
            goto out;
3384
        }
3385
        ret = dict_set_strn(dict, dict_key, keylen, brick);
3386
        if (ret) {
3387
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3388
                   "Failed to add brick to dict");
3389
            goto out;
3390
        }
3391
        brick = NULL;
3392
    }
3393

3394
out:
3395
    return ret;
3396
}
3397

3398
/* This adds the respective task-id and all available parameters of a task into
3399
 * a dictionary
3400
 */
3401
static int
3402
_add_task_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int op, int index)
3403
{
3404
    int ret = -1;
3405
    char key[32] = {
3406
        0,
3407
    };
3408
    int keylen;
3409
    char *uuid_str = NULL;
3410
    int status = 0;
3411
    xlator_t *this = THIS;
3412

3413
    GF_ASSERT(dict);
3414
    GF_ASSERT(volinfo);
3415

3416
    switch (op) {
3417
        case GD_OP_REMOVE_BRICK:
3418
            snprintf(key, sizeof(key), "task%d", index);
3419
            ret = _add_remove_bricks_to_dict(dict, volinfo, key);
3420
            if (ret) {
3421
                gf_msg(this->name, GF_LOG_ERROR, 0,
3422
                       GD_MSG_ADD_REMOVE_BRICK_FAIL,
3423
                       "Failed to add remove bricks to dict");
3424
                goto out;
3425
            }
3426
        case GD_OP_REBALANCE:
3427
            uuid_str = gf_strdup(uuid_utoa(volinfo->rebal.rebalance_id));
3428
            status = volinfo->rebal.defrag_status;
3429
            break;
3430

3431
        default:
3432
            ret = -1;
3433
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_TASK_ID,
3434
                   "%s operation doesn't have a"
3435
                   " task_id",
3436
                   gd_op_list[op]);
3437
            goto out;
3438
    }
3439

3440
    keylen = snprintf(key, sizeof(key), "task%d.type", index);
3441
    ret = dict_set_strn(dict, key, keylen, (char *)gd_op_list[op]);
3442
    if (ret) {
3443
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3444
               "Error setting task type in dict");
3445
        goto out;
3446
    }
3447

3448
    keylen = snprintf(key, sizeof(key), "task%d.id", index);
3449

3450
    if (!uuid_str)
3451
        goto out;
3452
    ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
3453
    if (ret) {
3454
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3455
               "Error setting task id in dict");
3456
        goto out;
3457
    }
3458
    uuid_str = NULL;
3459

3460
    keylen = snprintf(key, sizeof(key), "task%d.status", index);
3461
    ret = dict_set_int32n(dict, key, keylen, status);
3462
    if (ret) {
3463
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3464
               "Error setting task status in dict");
3465
        goto out;
3466
    }
3467

3468
out:
3469
    if (uuid_str)
3470
        GF_FREE(uuid_str);
3471
    return ret;
3472
}
3473

3474
static int
3475
glusterd_aggregate_task_status(dict_t *rsp_dict, glusterd_volinfo_t *volinfo)
3476
{
3477
    int ret = -1;
3478
    int tasks = 0;
3479
    xlator_t *this = THIS;
3480

3481
    if (!gf_uuid_is_null(volinfo->rebal.rebalance_id)) {
3482
        ret = _add_task_to_dict(rsp_dict, volinfo, volinfo->rebal.op, tasks);
3483

3484
        if (ret) {
3485
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3486
                   "Failed to add task details to dict");
3487
            goto out;
3488
        }
3489
        tasks++;
3490
    }
3491
    ret = dict_set_int32_sizen(rsp_dict, "tasks", tasks);
3492
    if (ret) {
3493
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3494
               "Error setting tasks count in dict");
3495
        goto out;
3496
    }
3497
out:
3498
    return ret;
3499
}
3500

3501
static int
3502
glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
3503
                          dict_t *vol_opts)
3504
{
3505
    int ret = -1;
3506
    char pidfile[PATH_MAX] = "";
3507
    gf_boolean_t running = _gf_false;
3508
    int pid = -1;
3509
    int port = 0;
3510
    glusterd_svc_t *svc = NULL;
3511
    char key[64] = "";
3512
    int keylen;
3513
    xlator_t *this = THIS;
3514
    glusterd_conf_t *priv = NULL;
3515

3516
    priv = this->private;
3517
    GF_ASSERT(priv);
3518

3519
    if (!strcmp(server, "")) {
3520
        ret = 0;
3521
        goto out;
3522
    }
3523

3524
    glusterd_svc_build_pidfile_path(server, priv->rundir, pidfile,
3525
                                    sizeof(pidfile));
3526

3527
    if (strcmp(server, priv->quotad_svc.name) == 0)
3528
        svc = &(priv->quotad_svc);
3529
#ifdef BUILD_GNFS
3530
    else if (strcmp(server, priv->nfs_svc.name) == 0)
3531
        svc = &(priv->nfs_svc);
3532
#endif
3533
    else if (strcmp(server, priv->bitd_svc.name) == 0)
3534
        svc = &(priv->bitd_svc);
3535
    else if (strcmp(server, priv->scrub_svc.name) == 0)
3536
        svc = &(priv->scrub_svc);
3537
    else {
3538
        ret = 0;
3539
        goto out;
3540
    }
3541

3542
    // Consider service to be running only when glusterd sees it Online
3543
    if (svc->online)
3544
        running = gf_is_service_running(pidfile, &pid);
3545

3546
    /* For nfs-servers/self-heal-daemon setting
3547
     * brick<n>.hostname = "NFS Server" / "Self-heal Daemon"
3548
     * brick<n>.path = uuid
3549
     * brick<n>.port = 0
3550
     *
3551
     * This might be confusing, but cli displays the name of
3552
     * the brick as hostname+path, so this will make more sense
3553
     * when output.
3554
     */
3555

3556
    keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
3557
    if (!strcmp(server, priv->quotad_svc.name))
3558
        ret = dict_set_nstrn(dict, key, keylen, "Quota Daemon",
3559
                             SLEN("Quota Daemon"));
3560
#ifdef BUILD_GNFS
3561
    else if (!strcmp(server, priv->nfs_svc.name))
3562
        ret = dict_set_nstrn(dict, key, keylen, "NFS Server",
3563
                             SLEN("NFS Server"));
3564
#endif
3565
    else if (!strcmp(server, priv->bitd_svc.name))
3566
        ret = dict_set_nstrn(dict, key, keylen, "Bitrot Daemon",
3567
                             SLEN("Bitrot Daemon"));
3568
    else if (!strcmp(server, priv->scrub_svc.name))
3569
        ret = dict_set_nstrn(dict, key, keylen, "Scrubber Daemon",
3570
                             SLEN("Scrubber Daemon"));
3571
    if (ret) {
3572
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3573
                "Key=%s", key, NULL);
3574
        goto out;
3575
    }
3576

3577
    keylen = snprintf(key, sizeof(key), "brick%d.path", count);
3578
    ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(uuid_utoa(MY_UUID)));
3579
    if (ret) {
3580
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3581
                "Key=%s", key, NULL);
3582
        goto out;
3583
    }
3584

3585
#ifdef BUILD_GNFS
3586
    /* Port is available only for the NFS server.
3587
     * Self-heal daemon doesn't provide any port for access
3588
     * by entities other than gluster.
3589
     */
3590
    if (!strcmp(server, priv->nfs_svc.name)) {
3591
        if (dict_get_sizen(vol_opts, "nfs.port")) {
3592
            ret = dict_get_int32(vol_opts, "nfs.port", &port);
3593
            if (ret) {
3594
                gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
3595
                        "Key=nfs.port", NULL);
3596
                goto out;
3597
            }
3598
        } else
3599
            port = GF_NFS3_PORT;
3600
    }
3601
#endif
3602
    keylen = snprintf(key, sizeof(key), "brick%d.port", count);
3603
    ret = dict_set_int32n(dict, key, keylen, port);
3604
    if (ret) {
3605
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3606
                "Key=%s", key, NULL);
3607
        goto out;
3608
    }
3609

3610
    keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
3611
    ret = dict_set_int32n(dict, key, keylen, pid);
3612
    if (ret) {
3613
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3614
                "Key=%s", key, NULL);
3615
        goto out;
3616
    }
3617

3618
    keylen = snprintf(key, sizeof(key), "brick%d.status", count);
3619
    ret = dict_set_int32n(dict, key, keylen, running);
3620
    if (ret) {
3621
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3622
                "Key=%s", key, NULL);
3623
        goto out;
3624
    }
3625

3626
out:
3627
    gf_msg_debug(this->name, 0, "Returning %d", ret);
3628
    return ret;
3629
}
3630

3631
static int32_t
3632
glusterd_get_all_volnames(dict_t *dict)
3633
{
3634
    int ret = -1;
3635
    int32_t vol_count = 0;
3636
    char key[64] = "";
3637
    int keylen;
3638
    glusterd_volinfo_t *entry = NULL;
3639
    glusterd_conf_t *priv = NULL;
3640

3641
    priv = THIS->private;
3642
    GF_ASSERT(priv);
3643

3644
    cds_list_for_each_entry(entry, &priv->volumes, vol_list)
3645
    {
3646
        keylen = snprintf(key, sizeof(key), "vol%d", vol_count);
3647
        ret = dict_set_strn(dict, key, keylen, entry->volname);
3648
        if (ret)
3649
            goto out;
3650

3651
        vol_count++;
3652
    }
3653

3654
    ret = dict_set_int32_sizen(dict, "vol_count", vol_count);
3655

3656
out:
3657
    if (ret)
3658
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3659
               "failed to get all "
3660
               "volume names for status");
3661
    return ret;
3662
}
3663

3664
static int32_t
3665
glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
3666
                         int32_t count)
3667
{
3668
    int ret = -1;
3669
    int32_t pid = -1;
3670
    int32_t brick_online = -1;
3671
    char key[64] = {0};
3672
    int keylen;
3673
    char *pidfile = NULL;
3674
    xlator_t *this = THIS;
3675
    char *uuid_str = NULL;
3676

3677
    GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
3678
    GF_VALIDATE_OR_GOTO(this->name, dict, out);
3679

3680
    keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
3681
    ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
3682
                         SLEN("Self-heal Daemon"));
3683
    if (ret) {
3684
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3685
                key, NULL);
3686
        goto out;
3687
    }
3688

3689
    keylen = snprintf(key, sizeof(key), "brick%d.path", count);
3690
    uuid_str = gf_strdup(uuid_utoa(MY_UUID));
3691
    if (!uuid_str) {
3692
        ret = -1;
3693
        goto out;
3694
    }
3695
    ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
3696
    if (ret) {
3697
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3698
                key, NULL);
3699
        goto out;
3700
    }
3701
    uuid_str = NULL;
3702

3703
    /* shd doesn't have a port. but the cli needs a port key with
3704
     * a zero value to parse.
3705
     * */
3706

3707
    keylen = snprintf(key, sizeof(key), "brick%d.port", count);
3708
    ret = dict_set_int32n(dict, key, keylen, 0);
3709
    if (ret) {
3710
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3711
                key, NULL);
3712
        goto out;
3713
    }
3714

3715
    pidfile = volinfo->shd.svc.proc.pidfile;
3716

3717
    brick_online = gf_is_service_running(pidfile, &pid);
3718

3719
    /* If shd is not running, then don't print the pid */
3720
    if (!brick_online)
3721
        pid = -1;
3722
    keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
3723
    ret = dict_set_int32n(dict, key, keylen, pid);
3724
    if (ret) {
3725
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED, "Key=%s",
3726
                key, NULL);
3727
        goto out;
3728
    }
3729

3730
    keylen = snprintf(key, sizeof(key), "brick%d.status", count);
3731
    ret = dict_set_int32n(dict, key, keylen, brick_online);
3732

3733
out:
3734
    if (uuid_str)
3735
        GF_FREE(uuid_str);
3736
    if (ret)
3737
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3738
               "Returning %d. adding values to dict failed", ret);
3739

3740
    return ret;
3741
}
3742

3743
static int
3744
glusterd_op_status_volume(dict_t *dict, char **op_errstr, dict_t *rsp_dict)
3745
{
3746
    int ret = -1;
3747
    int node_count = 0;
3748
    int brick_index = -1;
3749
    int other_count = 0;
3750
    int other_index = 0;
3751
    uint32_t cmd = 0;
3752
    char *volname = NULL;
3753
    char *brick = NULL;
3754
    xlator_t *this = THIS;
3755
    glusterd_volinfo_t *volinfo = NULL;
3756
    glusterd_brickinfo_t *brickinfo = NULL;
3757
    glusterd_conf_t *priv = NULL;
3758
    dict_t *vol_opts = NULL;
3759
#ifdef BUILD_GNFS
3760
    gf_boolean_t nfs_disabled = _gf_false;
3761
#endif
3762
    gf_boolean_t shd_enabled = _gf_false;
3763
    gf_boolean_t origin_glusterd = _gf_false;
3764
    int snapd_enabled, bitrot_enabled, volume_quota_enabled;
3765

3766
    priv = this->private;
3767

3768
    GF_ASSERT(priv);
3769

3770
    GF_ASSERT(dict);
3771

3772
    origin_glusterd = is_origin_glusterd(dict);
3773

3774
    ret = dict_get_uint32(dict, "cmd", &cmd);
3775
    if (ret)
3776
        goto out;
3777

3778
    if (origin_glusterd) {
3779
        ret = 0;
3780
        if ((cmd & GF_CLI_STATUS_ALL)) {
3781
            ret = glusterd_get_all_volnames(rsp_dict);
3782
            if (ret)
3783
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLNAMES_GET_FAIL,
3784
                       "failed to get all volume "
3785
                       "names for status");
3786
        }
3787
    }
3788

3789
    ret = dict_set_uint32(rsp_dict, "cmd", cmd);
3790
    if (ret)
3791
        goto out;
3792

3793
    if (cmd & GF_CLI_STATUS_ALL)
3794
        goto out;
3795

3796
    ret = dict_get_str(dict, "volname", &volname);
3797
    if (ret)
3798
        goto out;
3799

3800
    ret = glusterd_volinfo_find(volname, &volinfo);
3801
    if (ret) {
3802
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
3803
               "Volume with name: %s "
3804
               "does not exist",
3805
               volname);
3806
        goto out;
3807
    }
3808
    vol_opts = volinfo->dict;
3809

3810
    if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
3811
        ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict, 0,
3812
                                        vol_opts);
3813
        if (ret)
3814
            goto out;
3815
        other_count++;
3816
        node_count++;
3817
#ifdef BUILD_GNFS
3818
    } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
3819
        ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict, 0,
3820
                                        vol_opts);
3821
        if (ret)
3822
            goto out;
3823
        other_count++;
3824
        node_count++;
3825
#endif
3826
    } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
3827
        ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict, 0,
3828
                                        vol_opts);
3829
        if (ret)
3830
            goto out;
3831
        other_count++;
3832
        node_count++;
3833
    } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
3834
        ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict, 0,
3835
                                        vol_opts);
3836
        if (ret)
3837
            goto out;
3838
        other_count++;
3839
        node_count++;
3840
    } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
3841
        ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict, other_index);
3842
        if (ret)
3843
            goto out;
3844
        other_count++;
3845
        node_count++;
3846
    } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
3847
        ret = glusterd_add_shd_to_dict(volinfo, rsp_dict, other_index);
3848
        if (ret)
3849
            goto out;
3850
        other_count++;
3851
        node_count++;
3852
    } else if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
3853
        ret = dict_get_str(dict, "brick", &brick);
3854
        if (ret)
3855
            goto out;
3856

3857
        ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
3858
                                                     _gf_false);
3859
        if (ret)
3860
            goto out;
3861

3862
        if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
3863
            goto out;
3864

3865
        glusterd_add_brick_to_dict(volinfo, brickinfo, rsp_dict, ++brick_index);
3866
        if (cmd & GF_CLI_STATUS_DETAIL)
3867
            glusterd_add_brick_detail_to_dict(volinfo, brickinfo, rsp_dict,
3868
                                              brick_index);
3869
        node_count++;
3870

3871
    } else if ((cmd & GF_CLI_STATUS_TASKS) != 0) {
3872
        ret = glusterd_aggregate_task_status(rsp_dict, volinfo);
3873
        goto out;
3874

3875
    } else {
3876
        snapd_enabled = glusterd_is_snapd_enabled(volinfo);
3877
        shd_enabled = gd_is_self_heal_enabled(volinfo, vol_opts);
3878
#ifdef BUILD_GNFS
3879
        nfs_disabled = dict_get_str_boolean(vol_opts, NFS_DISABLE_MAP_KEY,
3880
                                            _gf_false);
3881
#endif
3882
        volume_quota_enabled = glusterd_is_volume_quota_enabled(volinfo);
3883
        bitrot_enabled = glusterd_is_bitrot_enabled(volinfo);
3884

3885
        cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
3886
        {
3887
            brick_index++;
3888
            if (gf_uuid_compare(brickinfo->uuid, MY_UUID))
3889
                continue;
3890

3891
            glusterd_add_brick_to_dict(volinfo, brickinfo, rsp_dict,
3892
                                       brick_index);
3893

3894
            if (cmd & GF_CLI_STATUS_DETAIL) {
3895
                glusterd_add_brick_detail_to_dict(volinfo, brickinfo, rsp_dict,
3896
                                                  brick_index);
3897
            }
3898
            node_count++;
3899
        }
3900

3901
        if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
3902
            other_index = brick_index + 1;
3903
            if (snapd_enabled) {
3904
                ret = glusterd_add_snapd_to_dict(volinfo, rsp_dict,
3905
                                                 other_index);
3906
                if (ret)
3907
                    goto out;
3908
                other_count++;
3909
                other_index++;
3910
                node_count++;
3911
            }
3912

3913
            if (glusterd_is_shd_compatible_volume(volinfo)) {
3914
                if (shd_enabled) {
3915
                    ret = glusterd_add_shd_to_dict(volinfo, rsp_dict,
3916
                                                   other_index);
3917
                    if (ret)
3918
                        goto out;
3919
                    other_count++;
3920
                    other_index++;
3921
                    node_count++;
3922
                }
3923
            }
3924
#ifdef BUILD_GNFS
3925
            if (!nfs_disabled) {
3926
                ret = glusterd_add_node_to_dict(priv->nfs_svc.name, rsp_dict,
3927
                                                other_index, vol_opts);
3928
                if (ret)
3929
                    goto out;
3930
                other_index++;
3931
                other_count++;
3932
                node_count++;
3933
            }
3934
#endif
3935
            if (volume_quota_enabled) {
3936
                ret = glusterd_add_node_to_dict(priv->quotad_svc.name, rsp_dict,
3937
                                                other_index, vol_opts);
3938
                if (ret)
3939
                    goto out;
3940
                other_count++;
3941
                node_count++;
3942
                other_index++;
3943
            }
3944

3945
            if (bitrot_enabled) {
3946
                ret = glusterd_add_node_to_dict(priv->bitd_svc.name, rsp_dict,
3947
                                                other_index, vol_opts);
3948
                if (ret)
3949
                    goto out;
3950
                other_count++;
3951
                node_count++;
3952
                other_index++;
3953
                /* For handling scrub status. Scrub daemon will be
3954
                 * running automatically when bitrot is enable */
3955
                ret = glusterd_add_node_to_dict(priv->scrub_svc.name, rsp_dict,
3956
                                                other_index, vol_opts);
3957
                if (ret)
3958
                    goto out;
3959
                other_count++;
3960
                node_count++;
3961
            }
3962
        }
3963
    }
3964

3965
    ret = dict_set_int32_sizen(rsp_dict, "type", volinfo->type);
3966
    if (ret) {
3967
        gf_smsg(this->name, GF_LOG_ERROR, -ret, GD_MSG_DICT_SET_FAILED,
3968
                "Key=type", NULL);
3969
        goto out;
3970
    }
3971

3972
    ret = dict_set_int32_sizen(rsp_dict, "brick-index-max", brick_index);
3973
    if (ret) {
3974
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3975
                "Key=brick-index-max", NULL);
3976
        goto out;
3977
    }
3978
    ret = dict_set_int32_sizen(rsp_dict, "other-count", other_count);
3979
    if (ret) {
3980
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3981
                "Key=other-count", NULL);
3982
        goto out;
3983
    }
3984
    ret = dict_set_int32_sizen(rsp_dict, "count", node_count);
3985
    if (ret) {
3986
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
3987
                "Key=count", NULL);
3988
        goto out;
3989
    }
3990

3991
    /* Active tasks */
3992
    /* Tasks are added only for normal volume status request for either a
3993
     * single volume or all volumes
3994
     */
3995
    if (!glusterd_status_has_tasks(cmd))
3996
        goto out;
3997

3998
    ret = glusterd_aggregate_task_status(rsp_dict, volinfo);
3999
    if (ret)
4000
        goto out;
4001
    ret = 0;
4002

4003
out:
4004
    gf_msg_debug(this->name, 0, "Returning %d", ret);
4005

4006
    return ret;
4007
}
4008

4009
static int
4010
glusterd_op_ac_none(glusterd_op_sm_event_t *event, void *ctx)
4011
{
4012
    int ret = 0;
4013

4014
    gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
4015

4016
    return ret;
4017
}
4018

4019
static int
4020
glusterd_op_sm_locking_failed(uuid_t *txn_id)
4021
{
4022
    int ret = -1;
4023

4024
    opinfo.op_ret = -1;
4025
    opinfo.op_errstr = gf_strdup("locking failed for one of the peer.");
4026

4027
    ret = glusterd_set_txn_opinfo(txn_id, &opinfo);
4028
    if (ret)
4029
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4030
               "Unable to set "
4031
               "transaction's opinfo");
4032
    /* Inject a reject event such that unlocking gets triggered right away*/
4033
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, txn_id, NULL);
4034

4035
    return ret;
4036
}
4037

4038
static int
4039
glusterd_op_ac_send_lock(glusterd_op_sm_event_t *event, void *ctx)
4040
{
4041
    int ret = 0;
4042
    rpc_clnt_procedure_t *proc = NULL;
4043
    glusterd_conf_t *priv = NULL;
4044
    xlator_t *this = THIS;
4045
    glusterd_peerinfo_t *peerinfo = NULL;
4046
    uint32_t pending_count = 0;
4047
    dict_t *dict = NULL;
4048

4049
    priv = this->private;
4050
    GF_ASSERT(priv);
4051

4052
    RCU_READ_LOCK;
4053
    cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
4054
    {
4055
        /* Only send requests to peers who were available before the
4056
         * transaction started
4057
         */
4058
        if (peerinfo->generation > opinfo.txn_generation)
4059
            continue;
4060

4061
        if (!peerinfo->connected || !peerinfo->mgmt)
4062
            continue;
4063
        if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
4064
            (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
4065
            continue;
4066

4067
        dict = glusterd_op_get_ctx();
4068
        dict_ref(dict);
4069

4070
        proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_LOCK];
4071
        if (proc->fn) {
4072
            ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
4073
            if (ret) {
4074
                RCU_READ_UNLOCK;
4075
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4076
                       "failed to set peerinfo");
4077
                dict_unref(dict);
4078
                goto out;
4079
            }
4080

4081
            ret = proc->fn(NULL, this, dict);
4082
            if (ret) {
4083
                RCU_READ_UNLOCK;
4084
                gf_msg(this->name, GF_LOG_WARNING, 0,
4085
                       GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
4086
                       "Failed to send mgmt_v3 lock "
4087
                       "request for operation "
4088
                       "'Volume %s' to peer %s",
4089
                       gd_op_list[opinfo.op], peerinfo->hostname);
4090
                dict_unref(dict);
4091
                goto out;
4092
            }
4093
            /* Mark the peer as locked*/
4094
            peerinfo->locked = _gf_true;
4095
            pending_count++;
4096
        }
4097
    }
4098
    RCU_READ_UNLOCK;
4099

4100
    opinfo.pending_count = pending_count;
4101

4102
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4103
    if (ret)
4104
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4105
               "Unable to set "
4106
               "transaction's opinfo");
4107

4108
    if (!opinfo.pending_count)
4109
        ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
4110

4111
out:
4112
    if (ret)
4113
        ret = glusterd_op_sm_locking_failed(&event->txn_id);
4114

4115
    gf_msg_debug(this->name, 0, "Returning with %d", ret);
4116
    return ret;
4117
}
4118

4119
static int
4120
glusterd_op_ac_send_unlock(glusterd_op_sm_event_t *event, void *ctx)
4121
{
4122
    int ret = 0;
4123
    rpc_clnt_procedure_t *proc = NULL;
4124
    glusterd_conf_t *priv = NULL;
4125
    xlator_t *this = THIS;
4126
    glusterd_peerinfo_t *peerinfo = NULL;
4127
    uint32_t pending_count = 0;
4128
    dict_t *dict = NULL;
4129

4130
    priv = this->private;
4131
    GF_ASSERT(priv);
4132

4133
    RCU_READ_LOCK;
4134
    cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
4135
    {
4136
        /* Only send requests to peers who were available before the
4137
         * transaction started
4138
         */
4139
        if (peerinfo->generation > opinfo.txn_generation)
4140
            continue;
4141

4142
        if (!peerinfo->connected || !peerinfo->mgmt || !peerinfo->locked)
4143
            continue;
4144
        if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
4145
            (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
4146
            continue;
4147
        dict = glusterd_op_get_ctx();
4148
        dict_ref(dict);
4149

4150
        proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_UNLOCK];
4151
        if (proc->fn) {
4152
            ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
4153
            if (ret) {
4154
                opinfo.op_errstr = gf_strdup(
4155
                    "Unlocking failed for one of the "
4156
                    "peer.");
4157
                gf_msg(this->name, GF_LOG_ERROR, 0,
4158
                       GD_MSG_CLUSTER_UNLOCK_FAILED,
4159
                       "Unlocking failed for operation"
4160
                       " volume %s on peer %s",
4161
                       gd_op_list[opinfo.op], peerinfo->hostname);
4162
                dict_unref(dict);
4163
                continue;
4164
            }
4165

4166
            ret = proc->fn(NULL, this, dict);
4167
            if (ret) {
4168
                opinfo.op_errstr = gf_strdup(
4169
                    "Unlocking failed for one of the "
4170
                    "peer.");
4171
                gf_msg(this->name, GF_LOG_ERROR, 0,
4172
                       GD_MSG_CLUSTER_UNLOCK_FAILED,
4173
                       "Unlocking failed for operation"
4174
                       " volume %s on peer %s",
4175
                       gd_op_list[opinfo.op], peerinfo->hostname);
4176
                dict_unref(dict);
4177
                continue;
4178
            }
4179
            pending_count++;
4180
            peerinfo->locked = _gf_false;
4181
        }
4182
    }
4183
    RCU_READ_UNLOCK;
4184

4185
    opinfo.pending_count = pending_count;
4186

4187
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4188
    if (ret)
4189
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4190
               "Unable to set "
4191
               "transaction's opinfo");
4192

4193
    if (!opinfo.pending_count)
4194
        ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
4195

4196
    gf_msg_debug(this->name, 0, "Returning with %d", ret);
4197
    return ret;
4198
}
4199

4200
static int
4201
glusterd_op_ac_ack_drain(glusterd_op_sm_event_t *event, void *ctx)
4202
{
4203
    int ret = 0;
4204

4205
    if (opinfo.pending_count > 0)
4206
        opinfo.pending_count--;
4207

4208
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4209
    if (ret)
4210
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4211
               "Unable to set "
4212
               "transaction's opinfo");
4213

4214
    if (!opinfo.pending_count)
4215
        ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
4216
                                          NULL);
4217

4218
    gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
4219

4220
    return ret;
4221
}
4222

4223
static int
4224
glusterd_op_ac_send_unlock_drain(glusterd_op_sm_event_t *event, void *ctx)
4225
{
4226
    return glusterd_op_ac_ack_drain(event, ctx);
4227
}
4228

4229
static int
4230
glusterd_op_ac_lock(glusterd_op_sm_event_t *event, void *ctx)
4231
{
4232
    int32_t ret = 0;
4233
    char *volname = NULL;
4234
    char *globalname = NULL;
4235
    glusterd_op_lock_ctx_t *lock_ctx = NULL;
4236
    xlator_t *this = THIS;
4237
    uint32_t op_errno = 0;
4238
    glusterd_conf_t *conf = NULL;
4239
    time_t timeout = 0;
4240

4241
    GF_ASSERT(event);
4242
    GF_ASSERT(ctx);
4243

4244
    conf = this->private;
4245
    GF_ASSERT(conf);
4246

4247
    lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
4248

4249
    /* If the req came from a node running on older op_version
4250
     * the dict won't be present. Based on it acquiring a cluster
4251
     * or mgmt_v3 lock */
4252
    if (lock_ctx->dict == NULL) {
4253
        ret = glusterd_lock(lock_ctx->uuid);
4254
        glusterd_op_lock_send_resp(lock_ctx->req, ret);
4255
    } else {
4256
        /* Cli will add timeout key to dict if the default timeout is
4257
         * other than 2 minutes. Here we use this value to check whether
4258
         * mgmt_v3_lock_timeout should be set to default value or we
4259
         * need to change the value according to timeout value
4260
         * i.e, timeout + 120 seconds. */
4261
        ret = dict_get_time(lock_ctx->dict, "timeout", &timeout);
4262
        if (!ret)
4263
            conf->mgmt_v3_lock_timeout = timeout + 120;
4264

4265
        ret = dict_get_str(lock_ctx->dict, "volname", &volname);
4266
        if (ret)
4267
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4268
                   "Unable to acquire volname");
4269
        else {
4270
            ret = glusterd_mgmt_v3_lock(volname, lock_ctx->uuid, &op_errno,
4271
                                        "vol");
4272
            if (ret)
4273
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
4274
                       "Unable to acquire lock for %s", volname);
4275
            goto out;
4276
        }
4277
        ret = dict_get_str(lock_ctx->dict, "globalname", &globalname);
4278
        if (!ret) {
4279
            ret = glusterd_mgmt_v3_lock(globalname, lock_ctx->uuid, &op_errno,
4280
                                        "global");
4281
            if (ret)
4282
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
4283
                       "Unable to acquire lock for %s", globalname);
4284
        }
4285
    out:
4286
        glusterd_op_mgmt_v3_lock_send_resp(lock_ctx->req, &event->txn_id, ret);
4287

4288
        dict_unref(lock_ctx->dict);
4289
    }
4290

4291
    gf_msg_debug(THIS->name, 0, "Lock Returned %d", ret);
4292
    return ret;
4293
}
4294

4295
static int
4296
glusterd_op_ac_unlock(glusterd_op_sm_event_t *event, void *ctx)
4297
{
4298
    int32_t ret = 0;
4299
    char *volname = NULL;
4300
    char *globalname = NULL;
4301
    glusterd_op_lock_ctx_t *lock_ctx = NULL;
4302
    glusterd_conf_t *priv = NULL;
4303
    xlator_t *this = THIS;
4304

4305
    GF_ASSERT(event);
4306
    GF_ASSERT(ctx);
4307

4308
    priv = this->private;
4309

4310
    lock_ctx = (glusterd_op_lock_ctx_t *)ctx;
4311

4312
    /* If the req came from a node running on older op_version
4313
     * the dict won't be present. Based on it releasing the cluster
4314
     * or mgmt_v3 lock */
4315
    if (lock_ctx->dict == NULL) {
4316
        ret = glusterd_unlock(lock_ctx->uuid);
4317
        glusterd_op_unlock_send_resp(lock_ctx->req, ret);
4318
    } else {
4319
        ret = dict_get_str(lock_ctx->dict, "volname", &volname);
4320
        if (ret)
4321
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4322
                   "Unable to acquire volname");
4323
        else {
4324
            ret = glusterd_mgmt_v3_unlock(volname, lock_ctx->uuid, "vol");
4325
            if (ret)
4326
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
4327
                       "Unable to release lock for %s", volname);
4328
            goto out;
4329
        }
4330

4331
        ret = dict_get_str(lock_ctx->dict, "globalname", &globalname);
4332
        if (!ret) {
4333
            ret = glusterd_mgmt_v3_unlock(globalname, lock_ctx->uuid, "global");
4334
            if (ret)
4335
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
4336
                       "Unable to release lock for %s", globalname);
4337
        }
4338
    out:
4339
        glusterd_op_mgmt_v3_unlock_send_resp(lock_ctx->req, &event->txn_id,
4340
                                             ret);
4341

4342
        dict_unref(lock_ctx->dict);
4343
    }
4344

4345
    gf_msg_debug(this->name, 0, "Unlock Returned %d", ret);
4346

4347
    if (priv->pending_quorum_action)
4348
        glusterd_do_quorum_action();
4349
    return ret;
4350
}
4351

4352
static int
4353
glusterd_op_ac_local_unlock(glusterd_op_sm_event_t *event, void *ctx)
4354
{
4355
    int ret = 0;
4356
    uuid_t *originator = NULL;
4357

4358
    GF_ASSERT(event);
4359
    GF_ASSERT(ctx);
4360

4361
    originator = (uuid_t *)ctx;
4362

4363
    ret = glusterd_unlock(*originator);
4364

4365
    gf_msg_debug(THIS->name, 0, "Unlock Returned %d", ret);
4366

4367
    return ret;
4368
}
4369

4370
static int
4371
glusterd_op_ac_rcvd_lock_acc(glusterd_op_sm_event_t *event, void *ctx)
4372
{
4373
    int ret = 0;
4374

4375
    GF_ASSERT(event);
4376

4377
    if (opinfo.pending_count > 0)
4378
        opinfo.pending_count--;
4379

4380
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4381
    if (ret)
4382
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4383
               "Unable to set "
4384
               "transaction's opinfo");
4385

4386
    if (opinfo.pending_count > 0)
4387
        goto out;
4388

4389
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, &event->txn_id,
4390
                                      NULL);
4391

4392
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
4393

4394
out:
4395
    return ret;
4396
}
4397

4398
int
4399
glusterd_dict_set_volid(dict_t *dict, char *volname, char **op_errstr)
4400
{
4401
    int ret = -1;
4402
    glusterd_volinfo_t *volinfo = NULL;
4403
    char *volid = NULL;
4404
    char msg[1024] = {
4405
        0,
4406
    };
4407
    xlator_t *this = THIS;
4408

4409
    if (!dict || !volname) {
4410
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_INVALID_ARGUMENT, NULL);
4411
        goto out;
4412
    }
4413

4414
    ret = glusterd_volinfo_find(volname, &volinfo);
4415
    if (ret) {
4416
        snprintf(msg, sizeof(msg), FMTSTR_CHECK_VOL_EXISTS, volname);
4417
        goto out;
4418
    }
4419
    volid = gf_strdup(uuid_utoa(volinfo->volume_id));
4420
    if (!volid) {
4421
        ret = -1;
4422
        goto out;
4423
    }
4424
    ret = dict_set_dynstr_sizen(dict, "vol-id", volid);
4425
    if (ret) {
4426
        snprintf(msg, sizeof(msg),
4427
                 "Failed to set volume id of volume"
4428
                 " %s",
4429
                 volname);
4430
        GF_FREE(volid);
4431
        goto out;
4432
    }
4433
out:
4434
    if (msg[0] != '\0') {
4435
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOL_ID_SET_FAIL, "%s", msg);
4436
        *op_errstr = gf_strdup(msg);
4437
    }
4438
    return ret;
4439
}
4440

4441
int
4442
gd_set_commit_hash(dict_t *dict)
4443
{
4444
    struct timeval tv;
4445
    uint32_t hash;
4446

4447
    /*
4448
     * We need a commit hash that won't conflict with others we might have
4449
     * set, or zero which is the implicit value if we never have.  Using
4450
     * seconds<<3 like this ensures that we'll only get a collision if two
4451
     * consecutive rebalances are separated by exactly 2^29 seconds - about
4452
     * 17 years - and even then there's only a 1/8 chance of a collision in
4453
     * the low order bits.  It's far more likely that this code will have
4454
     * changed completely by then.  If not, call me in 2031.
4455
     *
4456
     * P.S. Time zone changes?  Yeah, right.
4457
     */
4458
    gettimeofday(&tv, NULL);
4459
    hash = tv.tv_sec << 3;
4460

4461
    /*
4462
     * Make sure at least one of those low-order bits is set.  The extra
4463
     * shifting is because not all machines have sub-millisecond time
4464
     * resolution.
4465
     */
4466
    hash |= 1 << ((tv.tv_usec >> 10) % 3);
4467

4468
    return dict_set_uint32(dict, "commit-hash", hash);
4469
}
4470

4471
int
4472
glusterd_op_build_payload(dict_t **req, char **op_errstr, dict_t *op_ctx)
4473
{
4474
    int ret = -1;
4475
    void *ctx = NULL;
4476
    dict_t *dict = NULL;
4477
    dict_t *req_dict = NULL;
4478
    glusterd_op_t op = GD_OP_NONE;
4479
    char *volname = NULL;
4480
    uint32_t status_cmd = GF_CLI_STATUS_NONE;
4481
    xlator_t *this = THIS;
4482
    gf_boolean_t do_common = _gf_false;
4483

4484
    GF_ASSERT(req);
4485

4486
    req_dict = dict_new();
4487
    if (!req_dict)
4488
        goto out;
4489

4490
    if (!op_ctx) {
4491
        op = glusterd_op_get_op();
4492
        ctx = (void *)glusterd_op_get_ctx();
4493
        if (!ctx) {
4494
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_OPTIONS_GIVEN,
4495
                   "Null Context for "
4496
                   "op %d",
4497
                   op);
4498
            ret = -1;
4499
            goto out;
4500
        }
4501

4502
    } else {
4503
#define GD_SYNC_OPCODE_KEY "sync-mgmt-operation"
4504
        ret = dict_get_int32(op_ctx, GD_SYNC_OPCODE_KEY, (int32_t *)&op);
4505
        if (ret) {
4506
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4507
                   "Failed to get volume"
4508
                   " operation");
4509
            goto out;
4510
        }
4511
        ctx = op_ctx;
4512
#undef GD_SYNC_OPCODE_KEY
4513
    }
4514

4515
    dict = ctx;
4516
    switch (op) {
4517
        case GD_OP_CREATE_VOLUME: {
4518
            ++glusterfs_port;
4519
            ret = dict_set_int32_sizen(dict, "port", glusterfs_port);
4520
            if (ret) {
4521
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4522
                       "Failed to set port in "
4523
                       "dictionary");
4524
                goto out;
4525
            }
4526
            dict_copy(dict, req_dict);
4527
        } break;
4528

4529
        case GD_OP_GSYNC_CREATE:
4530
        case GD_OP_GSYNC_SET: {
4531
            ret = glusterd_op_gsync_args_get(dict, op_errstr, &volname, NULL,
4532
                                             NULL);
4533
            if (ret == 0) {
4534
                ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4535
                if (ret)
4536
                    goto out;
4537
            }
4538
            dict_copy(dict, req_dict);
4539
        } break;
4540

4541
        case GD_OP_SET_VOLUME: {
4542
            ret = dict_get_str(dict, "volname", &volname);
4543
            if (ret) {
4544
                gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_DICT_GET_FAILED,
4545
                       "volname is not present in "
4546
                       "operation ctx");
4547
                goto out;
4548
            }
4549
            if (strcmp(volname, "help") && strcmp(volname, "help-xml") &&
4550
                strcasecmp(volname, "all")) {
4551
                ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4552
                if (ret)
4553
                    goto out;
4554
            }
4555
            dict_unref(req_dict);
4556
            req_dict = dict_ref(dict);
4557
        } break;
4558

4559
        case GD_OP_REMOVE_BRICK: {
4560
            dict_t *dict = ctx;
4561
            ret = dict_get_str(dict, "volname", &volname);
4562
            if (ret) {
4563
                gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_DICT_GET_FAILED,
4564
                       "volname is not present in "
4565
                       "operation ctx");
4566
                goto out;
4567
            }
4568

4569
            ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4570
            if (ret)
4571
                goto out;
4572

4573
            ret = gd_set_commit_hash(dict);
4574
            if (ret != 0)
4575
                goto out;
4576

4577
            dict_unref(req_dict);
4578
            req_dict = dict_ref(dict);
4579
        } break;
4580

4581
        case GD_OP_STATUS_VOLUME: {
4582
            ret = dict_get_uint32(dict, "cmd", &status_cmd);
4583
            if (ret) {
4584
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
4585
                       "Status command not present "
4586
                       "in op ctx");
4587
                goto out;
4588
            }
4589
            if (GF_CLI_STATUS_ALL & status_cmd) {
4590
                dict_copy(dict, req_dict);
4591
                break;
4592
            }
4593
            do_common = _gf_true;
4594
        } break;
4595

4596
        case GD_OP_DELETE_VOLUME:
4597
        case GD_OP_START_VOLUME:
4598
        case GD_OP_STOP_VOLUME:
4599
        case GD_OP_ADD_BRICK:
4600
        case GD_OP_REPLACE_BRICK:
4601
        case GD_OP_RESET_VOLUME:
4602
        case GD_OP_LOG_ROTATE:
4603
        case GD_OP_QUOTA:
4604
        case GD_OP_PROFILE_VOLUME:
4605
        case GD_OP_HEAL_VOLUME:
4606
        case GD_OP_STATEDUMP_VOLUME:
4607
        case GD_OP_CLEARLOCKS_VOLUME:
4608
        case GD_OP_DEFRAG_BRICK_VOLUME:
4609
        case GD_OP_BARRIER:
4610
        case GD_OP_BITROT:
4611
        case GD_OP_SCRUB_STATUS:
4612
        case GD_OP_SCRUB_ONDEMAND:
4613
        case GD_OP_RESET_BRICK: {
4614
            do_common = _gf_true;
4615
        } break;
4616

4617
        case GD_OP_REBALANCE: {
4618
            if (gd_set_commit_hash(dict) != 0) {
4619
                goto out;
4620
            }
4621
            do_common = _gf_true;
4622
        } break;
4623

4624
        case GD_OP_SYNC_VOLUME:
4625
        case GD_OP_COPY_FILE:
4626
        case GD_OP_SYS_EXEC:
4627
        case GD_OP_GANESHA: {
4628
            dict_copy(dict, req_dict);
4629
        } break;
4630

4631
        default:
4632
            break;
4633
    }
4634

4635
    /*
4636
     * This has been moved out of the switch so that multiple ops with
4637
     * other special needs can all "fall through" to it.
4638
     */
4639
    if (do_common) {
4640
        ret = dict_get_str(dict, "volname", &volname);
4641
        if (ret) {
4642
            gf_msg(this->name, GF_LOG_CRITICAL, -ret, GD_MSG_DICT_GET_FAILED,
4643
                   "volname is not present in "
4644
                   "operation ctx");
4645
            goto out;
4646
        }
4647

4648
        if (strcasecmp(volname, "all")) {
4649
            ret = glusterd_dict_set_volid(dict, volname, op_errstr);
4650
            if (ret)
4651
                goto out;
4652
        }
4653
        dict_copy(dict, req_dict);
4654
    }
4655

4656
    *req = req_dict;
4657
    ret = 0;
4658

4659
out:
4660
    if (ret && req_dict)
4661
        dict_unref(req_dict);
4662
    return ret;
4663
}
4664

4665
static int
4666
glusterd_op_ac_send_stage_op(glusterd_op_sm_event_t *event, void *ctx)
4667
{
4668
    int ret = 0;
4669
    int ret1 = 0;
4670
    rpc_clnt_procedure_t *proc = NULL;
4671
    glusterd_conf_t *priv = NULL;
4672
    xlator_t *this = THIS;
4673
    glusterd_peerinfo_t *peerinfo = NULL;
4674
    dict_t *dict = NULL;
4675
    dict_t *rsp_dict = NULL;
4676
    char *op_errstr = NULL;
4677
    glusterd_op_t op = GD_OP_NONE;
4678
    uint32_t pending_count = 0;
4679

4680
    priv = this->private;
4681
    GF_ASSERT(priv);
4682

4683
    op = glusterd_op_get_op();
4684

4685
    rsp_dict = dict_new();
4686
    if (!rsp_dict) {
4687
        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
4688
               "Failed to create rsp_dict");
4689
        ret = -1;
4690
        goto out;
4691
    }
4692

4693
    ret = glusterd_op_build_payload(&dict, &op_errstr, NULL);
4694
    if (ret) {
4695
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
4696
               LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
4697
        if (op_errstr == NULL)
4698
            gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
4699
        opinfo.op_errstr = op_errstr;
4700
        goto out;
4701
    }
4702

4703
    ret = glusterd_validate_quorum(this, op, dict, &op_errstr);
4704
    if (ret) {
4705
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_SERVER_QUORUM_NOT_MET,
4706
               "Server quorum not met. Rejecting operation.");
4707
        opinfo.op_errstr = op_errstr;
4708
        goto out;
4709
    }
4710

4711
    ret = glusterd_op_stage_validate(op, dict, &op_errstr, rsp_dict);
4712
    if (ret) {
4713
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
4714
               LOGSTR_STAGE_FAIL, gd_op_list[op], "localhost",
4715
               (op_errstr) ? ":" : " ", (op_errstr) ? op_errstr : " ");
4716
        if (op_errstr == NULL)
4717
            gf_asprintf(&op_errstr, OPERRSTR_STAGE_FAIL, "localhost");
4718
        opinfo.op_errstr = op_errstr;
4719
        goto out;
4720
    }
4721

4722
    RCU_READ_LOCK;
4723
    cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
4724
    {
4725
        /* Only send requests to peers who were available before the
4726
         * transaction started
4727
         */
4728
        if (peerinfo->generation > opinfo.txn_generation)
4729
            continue;
4730

4731
        if (!peerinfo->connected || !peerinfo->mgmt)
4732
            continue;
4733
        if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
4734
            (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
4735
            continue;
4736

4737
        proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_STAGE_OP];
4738
        GF_ASSERT(proc);
4739
        if (proc->fn) {
4740
            ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
4741
            if (ret) {
4742
                RCU_READ_UNLOCK;
4743
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4744
                       "failed to "
4745
                       "set peerinfo");
4746
                goto out;
4747
            }
4748

4749
            ret = proc->fn(NULL, this, dict);
4750
            if (ret) {
4751
                gf_msg(this->name, GF_LOG_WARNING, 0,
4752
                       GD_MSG_STAGE_REQ_SEND_FAIL,
4753
                       "Failed to "
4754
                       "send stage request for operation "
4755
                       "'Volume %s' to peer %s",
4756
                       gd_op_list[op], peerinfo->hostname);
4757
                continue;
4758
            }
4759
            pending_count++;
4760
        }
4761
    }
4762
    RCU_READ_UNLOCK;
4763

4764
    opinfo.pending_count = pending_count;
4765
out:
4766
    if (ret)
4767
        opinfo.op_ret = ret;
4768

4769
    ret1 = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
4770
    if (ret1)
4771
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
4772
               "Unable to set "
4773
               "transaction's opinfo");
4774

4775
    if (rsp_dict)
4776
        dict_unref(rsp_dict);
4777

4778
    if (dict)
4779
        dict_unref(dict);
4780
    if (ret) {
4781
        glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, &event->txn_id, NULL);
4782
        opinfo.op_ret = ret;
4783
    }
4784

4785
    gf_msg_debug(this->name, 0,
4786
                 "Sent stage op request for "
4787
                 "'Volume %s' to %d peers",
4788
                 gd_op_list[op], opinfo.pending_count);
4789

4790
    if (!opinfo.pending_count)
4791
        ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
4792

4793
    gf_msg_debug(this->name, 0, "Returning with %d", ret);
4794

4795
    return ret;
4796
}
4797

4798
/* This function takes a dict and converts the uuid values of key specified
4799
 * into hostnames
4800
 */
4801
static int
4802
glusterd_op_volume_dict_uuid_to_hostname(dict_t *dict, const char *key_fmt,
4803
                                         int idx_min, int idx_max)
4804
{
4805
    int ret = -1;
4806
    int i = 0;
4807
    char key[128];
4808
    int keylen;
4809
    char *uuid_str = NULL;
4810
    uuid_t uuid = {
4811
        0,
4812
    };
4813
    char *hostname = NULL;
4814
    xlator_t *this = THIS;
4815

4816
    GF_ASSERT(dict);
4817
    GF_ASSERT(key_fmt);
4818

4819
    for (i = idx_min; i < idx_max; i++) {
4820
        keylen = snprintf(key, sizeof(key), key_fmt, i);
4821
        ret = dict_get_strn(dict, key, keylen, &uuid_str);
4822
        if (ret) {
4823
            ret = 0;
4824
            continue;
4825
        }
4826

4827
        gf_msg_debug(this->name, 0, "Got uuid %s", uuid_str);
4828

4829
        ret = gf_uuid_parse(uuid_str, uuid);
4830
        /* if parsing fails don't error out
4831
         * let the original value be retained
4832
         */
4833
        if (ret) {
4834
            ret = 0;
4835
            continue;
4836
        }
4837

4838
        hostname = glusterd_uuid_to_hostname(uuid);
4839
        if (hostname) {
4840
            gf_msg_debug(this->name, 0, "%s -> %s", uuid_str, hostname);
4841
            ret = dict_set_dynstrn(dict, key, keylen, hostname);
4842
            if (ret) {
4843
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
4844
                       "Error setting hostname %s to dict", hostname);
4845
                GF_FREE(hostname);
4846
                goto out;
4847
            }
4848
        }
4849
    }
4850

4851
out:
4852
    gf_msg_debug(this->name, 0, "Returning %d", ret);
4853
    return ret;
4854
}
4855

4856
static int
4857
reassign_defrag_status(dict_t *dict, char *key, int keylen,
4858
                       gf_defrag_status_t *status)
4859
{
4860
    int ret = 0;
4861

4862
    if (!*status)
4863
        return ret;
4864

4865
    switch (*status) {
4866
        case GF_DEFRAG_STATUS_STARTED:
4867
            *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STARTED;
4868
            break;
4869

4870
        case GF_DEFRAG_STATUS_STOPPED:
4871
            *status = GF_DEFRAG_STATUS_LAYOUT_FIX_STOPPED;
4872
            break;
4873

4874
        case GF_DEFRAG_STATUS_COMPLETE:
4875
            *status = GF_DEFRAG_STATUS_LAYOUT_FIX_COMPLETE;
4876
            break;
4877

4878
        case GF_DEFRAG_STATUS_FAILED:
4879
            *status = GF_DEFRAG_STATUS_LAYOUT_FIX_FAILED;
4880
            break;
4881
        default:
4882
            break;
4883
    }
4884

4885
    ret = dict_set_int32n(dict, key, keylen, *status);
4886
    if (ret)
4887
        gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_SET_FAILED,
4888
               "failed to reset defrag %s in dict", key);
4889

4890
    return ret;
4891
}
4892

4893
/* Check and reassign the defrag_status enum got from the rebalance process
4894
 * of all peers so that the rebalance-status CLI command can display if a
4895
 * full-rebalance or just a fix-layout was carried out.
4896
 */
4897
static int
4898
glusterd_op_check_peer_defrag_status(dict_t *dict, int count)
4899
{
4900
    glusterd_volinfo_t *volinfo = NULL;
4901
    gf_defrag_status_t status = GF_DEFRAG_STATUS_NOT_STARTED;
4902
    char key[64] = {
4903
        0,
4904
    };
4905
    int keylen;
4906
    char *volname = NULL;
4907
    int ret = -1;
4908
    int i = 1;
4909

4910
    ret = dict_get_str(dict, "volname", &volname);
4911
    if (ret) {
4912
        gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
4913
               "Unable to get volume name");
4914
        goto out;
4915
    }
4916

4917
    ret = glusterd_volinfo_find(volname, &volinfo);
4918
    if (ret) {
4919
        gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_NOT_FOUND,
4920
               FMTSTR_CHECK_VOL_EXISTS, volname);
4921
        goto out;
4922
    }
4923

4924
    if (volinfo->rebal.defrag_cmd != GF_DEFRAG_CMD_START_LAYOUT_FIX) {
4925
        /* Fix layout was not issued; we don't need to reassign
4926
           the status */
4927
        ret = 0;
4928
        goto out;
4929
    }
4930

4931
    do {
4932
        keylen = snprintf(key, sizeof(key), "status-%d", i);
4933
        ret = dict_get_int32n(dict, key, keylen, (int32_t *)&status);
4934
        if (ret) {
4935
            gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_DICT_GET_FAILED,
4936
                   "failed to get defrag %s", key);
4937
            goto out;
4938
        }
4939
        ret = reassign_defrag_status(dict, key, keylen, &status);
4940
        if (ret)
4941
            goto out;
4942
        i++;
4943
    } while (i <= count);
4944

4945
    ret = 0;
4946
out:
4947
    return ret;
4948
}
4949

4950
/* This function is used to verify if op_ctx indeed
4951
   requires modification. This is necessary since the
4952
   dictionary for certain commands might not have the
4953
   necessary keys required for the op_ctx modification
4954
   to succeed.
4955

4956
   Special Cases:
4957
   - volume status all
4958
   - volume status
4959

4960
   Regular Cases:
4961
   - volume status <volname> <brick>
4962
   - volume status <volname> mem
4963
   - volume status <volname> clients
4964
   - volume status <volname> inode
4965
   - volume status <volname> fd
4966
   - volume status <volname> callpool
4967
   - volume status <volname> tasks
4968
*/
4969

4970
static gf_boolean_t
4971
glusterd_is_volume_status_modify_op_ctx(uint32_t cmd)
4972
{
4973
    if ((cmd & GF_CLI_STATUS_MASK) == GF_CLI_STATUS_NONE) {
4974
        if (cmd & GF_CLI_STATUS_BRICK)
4975
            return _gf_false;
4976
        if (cmd & GF_CLI_STATUS_ALL)
4977
            return _gf_false;
4978
        return _gf_true;
4979
    }
4980
    return _gf_false;
4981
}
4982

4983
int
4984
glusterd_op_modify_port_key(dict_t *op_ctx, int brick_index_max)
4985
{
4986
    char *port = NULL;
4987
    int i = 0;
4988
    int ret = -1;
4989
    char key[64] = {0};
4990
    int keylen;
4991
    char old_key[64] = {0};
4992
    int old_keylen;
4993

4994
    for (i = 0; i <= brick_index_max; i++) {
4995
        keylen = snprintf(key, sizeof(key), "brick%d.rdma_port", i);
4996
        ret = dict_get_strn(op_ctx, key, keylen, &port);
4997

4998
        if (ret) {
4999
            old_keylen = snprintf(old_key, sizeof(old_key), "brick%d.port", i);
5000
            ret = dict_get_strn(op_ctx, old_key, old_keylen, &port);
5001
            if (ret)
5002
                goto out;
5003

5004
            ret = dict_set_strn(op_ctx, key, keylen, port);
5005
            if (ret)
5006
                goto out;
5007
            ret = dict_set_nstrn(op_ctx, old_key, old_keylen, "\0", SLEN("\0"));
5008
            if (ret)
5009
                goto out;
5010
        }
5011
    }
5012
out:
5013
    return ret;
5014
}
5015

5016
/* This function is used to modify the op_ctx dict before sending it back
5017
 * to cli. This is useful in situations like changing the peer uuids to
5018
 * hostnames etc.
5019
 */
5020
void
5021
glusterd_op_modify_op_ctx(glusterd_op_t op, void *ctx)
5022
{
5023
    int ret = -1;
5024
    dict_t *op_ctx = NULL;
5025
    int brick_index_max = -1;
5026
    int other_count = 0;
5027
    int count = 0;
5028
    uint32_t cmd = GF_CLI_STATUS_NONE;
5029
    xlator_t *this = THIS;
5030
    char *volname = NULL;
5031
    glusterd_volinfo_t *volinfo = NULL;
5032
    char *port = 0;
5033
    int i = 0;
5034
    char key[64] = {
5035
        0,
5036
    };
5037
    int keylen;
5038

5039
    if (ctx)
5040
        op_ctx = ctx;
5041
    else
5042
        op_ctx = glusterd_op_get_ctx();
5043

5044
    if (!op_ctx) {
5045
        gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_OPCTX_NULL,
5046
               "Operation context is not present.");
5047
        goto out;
5048
    }
5049

5050
    switch (op) {
5051
        case GD_OP_STATUS_VOLUME:
5052
            ret = dict_get_uint32(op_ctx, "cmd", &cmd);
5053
            if (ret) {
5054
                gf_msg_debug(this->name, 0, "Failed to get status cmd");
5055
                goto out;
5056
            }
5057

5058
            if (!glusterd_is_volume_status_modify_op_ctx(cmd)) {
5059
                gf_msg_debug(this->name, 0,
5060
                             "op_ctx modification not required for status "
5061
                             "operation being performed");
5062
                goto out;
5063
            }
5064

5065
            ret = dict_get_int32(op_ctx, "brick-index-max", &brick_index_max);
5066
            if (ret) {
5067
                gf_msg_debug(this->name, 0, "Failed to get brick-index-max");
5068
                goto out;
5069
            }
5070

5071
            ret = dict_get_int32(op_ctx, "other-count", &other_count);
5072
            if (ret) {
5073
                gf_msg_debug(this->name, 0, "Failed to get other-count");
5074
                goto out;
5075
            }
5076

5077
            count = brick_index_max + other_count + 1;
5078

5079
            /*
5080
             * a glusterd lesser than version 3.7 will be sending the
5081
             * rdma port in older key. Changing that value from here
5082
             * to support backward compatibility
5083
             */
5084
            ret = dict_get_str(op_ctx, "volname", &volname);
5085
            if (ret)
5086
                goto out;
5087

5088
            for (i = 0; i <= brick_index_max; i++) {
5089
                keylen = snprintf(key, sizeof(key), "brick%d.rdma_port", i);
5090
                ret = dict_get_strn(op_ctx, key, keylen, &port);
5091
                if (ret) {
5092
                    ret = dict_set_nstrn(op_ctx, key, keylen, "\0", SLEN("\0"));
5093
                    if (ret)
5094
                        goto out;
5095
                }
5096
            }
5097
            ret = glusterd_volinfo_find(volname, &volinfo);
5098
            if (ret)
5099
                goto out;
5100
            /* add 'brick%d.peerid' into op_ctx with value of 'brick%d.path'.
5101
               nfs/sshd like services have this additional uuid */
5102
            {
5103
                char *uuid_str = NULL;
5104
                char *uuid = NULL;
5105
                int i;
5106

5107
                for (i = brick_index_max + 1; i < count; i++) {
5108
                    keylen = snprintf(key, sizeof(key), "brick%d.path", i);
5109
                    ret = dict_get_strn(op_ctx, key, keylen, &uuid_str);
5110
                    if (!ret) {
5111
                        keylen = snprintf(key, sizeof(key), "brick%d.peerid",
5112
                                          i);
5113
                        uuid = gf_strdup(uuid_str);
5114
                        if (!uuid) {
5115
                            gf_msg_debug(this->name, 0,
5116
                                         "unable to create dup of"
5117
                                         " uuid_str");
5118
                            continue;
5119
                        }
5120
                        ret = dict_set_dynstrn(op_ctx, key, keylen, uuid);
5121
                        if (ret != 0) {
5122
                            GF_FREE(uuid);
5123
                        }
5124
                    }
5125
                }
5126
            }
5127

5128
            ret = glusterd_op_volume_dict_uuid_to_hostname(
5129
                op_ctx, "brick%d.path", 0, count);
5130
            if (ret)
5131
                gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
5132
                       "Failed uuid to hostname conversion");
5133

5134
            break;
5135

5136
        case GD_OP_PROFILE_VOLUME:
5137
            ret = dict_get_str_boolean(op_ctx, "nfs", _gf_false);
5138
            if (!ret)
5139
                goto out;
5140

5141
            ret = dict_get_int32(op_ctx, "count", &count);
5142
            if (ret) {
5143
                gf_msg_debug(this->name, 0, "Failed to get brick count");
5144
                goto out;
5145
            }
5146

5147
            ret = glusterd_op_volume_dict_uuid_to_hostname(op_ctx, "%d-brick",
5148
                                                           1, (count + 1));
5149
            if (ret)
5150
                gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
5151
                       "Failed uuid to hostname conversion");
5152

5153
            break;
5154

5155
        /* For both rebalance and remove-brick status, the glusterd op is the
5156
         * same
5157
         */
5158
        case GD_OP_DEFRAG_BRICK_VOLUME:
5159
        case GD_OP_SCRUB_STATUS:
5160
        case GD_OP_SCRUB_ONDEMAND:
5161
            ret = dict_get_int32(op_ctx, "count", &count);
5162
            if (ret) {
5163
                gf_msg_debug(this->name, 0, "Failed to get count");
5164
                goto out;
5165
            }
5166

5167
            /* add 'node-name-%d' into op_ctx with value uuid_str.
5168
               this will be used to convert to hostname later */
5169
            {
5170
                char *uuid_str = NULL;
5171
                char *uuid = NULL;
5172
                int i;
5173

5174
                for (i = 1; i <= count; i++) {
5175
                    keylen = snprintf(key, sizeof(key), "node-uuid-%d", i);
5176
                    ret = dict_get_strn(op_ctx, key, keylen, &uuid_str);
5177
                    if (!ret) {
5178
                        keylen = snprintf(key, sizeof(key), "node-name-%d", i);
5179
                        uuid = gf_strdup(uuid_str);
5180
                        if (!uuid) {
5181
                            gf_msg_debug(this->name, 0,
5182
                                         "unable to create dup of"
5183
                                         " uuid_str");
5184
                            continue;
5185
                        }
5186
                        ret = dict_set_dynstrn(op_ctx, key, keylen, uuid);
5187
                        if (ret != 0) {
5188
                            GF_FREE(uuid);
5189
                        }
5190
                    }
5191
                }
5192
            }
5193

5194
            ret = glusterd_op_volume_dict_uuid_to_hostname(
5195
                op_ctx, "node-name-%d", 1, (count + 1));
5196
            if (ret)
5197
                gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_CONVERSION_FAILED,
5198
                       "Failed uuid to hostname conversion");
5199

5200
            /* Since Both rebalance and bitrot scrub status/ondemand
5201
             * are going to use same code path till here, we should
5202
             * break in case of scrub status.
5203
             */
5204
            if (op == GD_OP_SCRUB_STATUS || op == GD_OP_SCRUB_ONDEMAND) {
5205
                break;
5206
            }
5207

5208
            ret = glusterd_op_check_peer_defrag_status(op_ctx, count);
5209
            if (ret)
5210
                gf_msg(this->name, GF_LOG_ERROR, 0,
5211
                       GD_MSG_DEFRAG_STATUS_UPDATE_FAIL,
5212
                       "Failed to reset defrag status for fix-layout");
5213
            break;
5214

5215
        default:
5216
            ret = 0;
5217
            gf_msg_debug(this->name, 0, "op_ctx modification not required");
5218
            break;
5219
    }
5220

5221
out:
5222
    if (ret)
5223
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_OPCTX_UPDATE_FAIL,
5224
               "op_ctx modification failed");
5225
    return;
5226
}
5227

5228
int
5229
glusterd_op_commit_hook(glusterd_op_t op, dict_t *op_ctx,
5230
                        glusterd_commit_hook_type_t type)
5231
{
5232
    glusterd_conf_t *priv = NULL;
5233
    char hookdir[PATH_MAX] = {
5234
        0,
5235
    };
5236
    char scriptdir[PATH_MAX] = {
5237
        0,
5238
    };
5239
    char *type_subdir = "";
5240
    char *cmd_subdir = NULL;
5241
    int ret = -1;
5242
    int32_t len = 0;
5243

5244
    priv = THIS->private;
5245
    switch (type) {
5246
        case GD_COMMIT_HOOK_NONE:
5247
        case GD_COMMIT_HOOK_MAX:
5248
            /*Won't be called*/
5249
            break;
5250

5251
        case GD_COMMIT_HOOK_PRE:
5252
            type_subdir = "pre";
5253
            break;
5254
        case GD_COMMIT_HOOK_POST:
5255
            type_subdir = "post";
5256
            break;
5257
    }
5258

5259
    cmd_subdir = glusterd_hooks_get_hooks_cmd_subdir(op);
5260
    if (strlen(cmd_subdir) == 0)
5261
        return -1;
5262

5263
    GLUSTERD_GET_HOOKS_DIR(hookdir, GLUSTERD_HOOK_VER, priv);
5264
    len = snprintf(scriptdir, sizeof(scriptdir), "%s/%s/%s", hookdir,
5265
                   cmd_subdir, type_subdir);
5266
    if ((len < 0) || (len >= sizeof(scriptdir))) {
5267
        return -1;
5268
    }
5269

5270
    switch (type) {
5271
        case GD_COMMIT_HOOK_NONE:
5272
        case GD_COMMIT_HOOK_MAX:
5273
            /*Won't be called*/
5274
            break;
5275

5276
        case GD_COMMIT_HOOK_PRE:
5277
            ret = glusterd_hooks_run_hooks(scriptdir, op, op_ctx, type);
5278
            break;
5279
        case GD_COMMIT_HOOK_POST:
5280
            ret = glusterd_hooks_post_stub_enqueue(scriptdir, op, op_ctx);
5281
            break;
5282
    }
5283

5284
    return ret;
5285
}
5286

5287
static int
5288
glusterd_op_ac_send_commit_op(glusterd_op_sm_event_t *event, void *ctx)
5289
{
5290
    int ret = 0;
5291
    int ret1 = 0;
5292
    rpc_clnt_procedure_t *proc = NULL;
5293
    glusterd_conf_t *priv = NULL;
5294
    xlator_t *this = THIS;
5295
    dict_t *dict = NULL;
5296
    glusterd_peerinfo_t *peerinfo = NULL;
5297
    char *op_errstr = NULL;
5298
    glusterd_op_t op = GD_OP_NONE;
5299
    uint32_t pending_count = 0;
5300

5301
    priv = this->private;
5302
    GF_ASSERT(priv);
5303

5304
    op = glusterd_op_get_op();
5305

5306
    ret = glusterd_op_build_payload(&dict, &op_errstr, NULL);
5307
    if (ret) {
5308
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL,
5309
               LOGSTR_BUILD_PAYLOAD, gd_op_list[op]);
5310
        if (op_errstr == NULL)
5311
            gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
5312
        opinfo.op_errstr = op_errstr;
5313
        goto out;
5314
    }
5315

5316
    ret = glusterd_op_commit_perform(op, dict, &op_errstr,
5317
                                     NULL);  // rsp_dict invalid for source
5318
    if (ret) {
5319
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
5320
               LOGSTR_COMMIT_FAIL, gd_op_list[op], "localhost",
5321
               (op_errstr) ? ":" : " ", (op_errstr) ? op_errstr : " ");
5322
        if (op_errstr == NULL)
5323
            gf_asprintf(&op_errstr, OPERRSTR_COMMIT_FAIL, "localhost");
5324
        opinfo.op_errstr = op_errstr;
5325
        goto out;
5326
    }
5327

5328
    RCU_READ_LOCK;
5329
    cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
5330
    {
5331
        /* Only send requests to peers who were available before the
5332
         * transaction started
5333
         */
5334
        if (peerinfo->generation > opinfo.txn_generation)
5335
            continue;
5336

5337
        if (!peerinfo->connected || !peerinfo->mgmt)
5338
            continue;
5339
        if ((peerinfo->state != GD_FRIEND_STATE_BEFRIENDED) &&
5340
            (glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
5341
            continue;
5342

5343
        proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_COMMIT_OP];
5344
        GF_ASSERT(proc);
5345
        if (proc->fn) {
5346
            ret = dict_set_static_ptr(dict, "peerinfo", peerinfo);
5347
            if (ret) {
5348
                RCU_READ_UNLOCK;
5349
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5350
                       "failed to set peerinfo");
5351
                goto out;
5352
            }
5353
            ret = proc->fn(NULL, this, dict);
5354
            if (ret) {
5355
                gf_msg(this->name, GF_LOG_WARNING, 0,
5356
                       GD_MSG_COMMIT_REQ_SEND_FAIL,
5357
                       "Failed to "
5358
                       "send commit request for operation "
5359
                       "'Volume %s' to peer %s",
5360
                       gd_op_list[op], peerinfo->hostname);
5361
                continue;
5362
            }
5363
            pending_count++;
5364
        }
5365
    }
5366
    RCU_READ_UNLOCK;
5367

5368
    opinfo.pending_count = pending_count;
5369
    gf_msg_debug(this->name, 0,
5370
                 "Sent commit op req for 'Volume %s' "
5371
                 "to %d peers",
5372
                 gd_op_list[op], opinfo.pending_count);
5373
out:
5374
    if (dict)
5375
        dict_unref(dict);
5376

5377
    if (ret)
5378
        opinfo.op_ret = ret;
5379

5380
    ret1 = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5381
    if (ret1)
5382
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5383
               "Unable to set "
5384
               "transaction's opinfo");
5385

5386
    if (ret) {
5387
        glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT, &event->txn_id, NULL);
5388
        opinfo.op_ret = ret;
5389
    }
5390

5391
    if (!opinfo.pending_count) {
5392
        if (op == GD_OP_REPLACE_BRICK) {
5393
            ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
5394
        } else {
5395
            glusterd_op_modify_op_ctx(op, NULL);
5396
            ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
5397
        }
5398
        goto err;
5399
    }
5400

5401
err:
5402
    gf_msg_debug(this->name, 0, "Returning with %d", ret);
5403

5404
    return ret;
5405
}
5406

5407
static int
5408
glusterd_op_ac_rcvd_stage_op_acc(glusterd_op_sm_event_t *event, void *ctx)
5409
{
5410
    int ret = 0;
5411

5412
    GF_ASSERT(event);
5413

5414
    if (opinfo.pending_count > 0)
5415
        opinfo.pending_count--;
5416

5417
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5418
    if (ret)
5419
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5420
               "Unable to set "
5421
               "transaction's opinfo");
5422

5423
    if (opinfo.pending_count > 0)
5424
        goto out;
5425

5426
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_STAGE_ACC, &event->txn_id,
5427
                                      NULL);
5428

5429
out:
5430
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5431

5432
    return ret;
5433
}
5434

5435
static int
5436
glusterd_op_ac_stage_op_failed(glusterd_op_sm_event_t *event, void *ctx)
5437
{
5438
    int ret = 0;
5439

5440
    GF_ASSERT(event);
5441

5442
    if (opinfo.pending_count > 0)
5443
        opinfo.pending_count--;
5444

5445
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5446
    if (ret)
5447
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5448
               "Unable to set "
5449
               "transaction's opinfo");
5450

5451
    if (opinfo.pending_count > 0)
5452
        goto out;
5453

5454
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
5455
                                      NULL);
5456

5457
out:
5458
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5459

5460
    return ret;
5461
}
5462

5463
static int
5464
glusterd_op_ac_commit_op_failed(glusterd_op_sm_event_t *event, void *ctx)
5465
{
5466
    int ret = 0;
5467

5468
    GF_ASSERT(event);
5469

5470
    if (opinfo.pending_count > 0)
5471
        opinfo.pending_count--;
5472

5473
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5474
    if (ret)
5475
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5476
               "Unable to set "
5477
               "transaction's opinfo");
5478

5479
    if (opinfo.pending_count > 0)
5480
        goto out;
5481

5482
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
5483
                                      NULL);
5484

5485
out:
5486
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5487

5488
    return ret;
5489
}
5490

5491
static int
5492
glusterd_remove_pending_entry(struct cds_list_head *list, void *elem)
5493
{
5494
    glusterd_pending_node_t *pending_node = NULL;
5495
    glusterd_pending_node_t *tmp = NULL;
5496
    int ret = 0;
5497

5498
    cds_list_for_each_entry_safe(pending_node, tmp, list, list)
5499
    {
5500
        if (elem == pending_node->node) {
5501
            cds_list_del_init(&pending_node->list);
5502
            GF_FREE(pending_node);
5503
            ret = 0;
5504
            goto out;
5505
        }
5506
    }
5507
out:
5508
    gf_msg_debug(THIS->name, 0, "returning %d", ret);
5509
    return ret;
5510
}
5511

5512
static int
5513
glusterd_op_ac_brick_op_failed(glusterd_op_sm_event_t *event, void *ctx)
5514
{
5515
    int ret = 0;
5516
    glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
5517
    gf_boolean_t free_errstr = _gf_false;
5518
    xlator_t *this = THIS;
5519

5520
    GF_ASSERT(event);
5521
    GF_ASSERT(ctx);
5522
    ev_ctx = ctx;
5523

5524
    ret = glusterd_remove_pending_entry(&opinfo.pending_bricks,
5525
                                        ev_ctx->pending_node->node);
5526
    if (ret) {
5527
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_RESPONSE,
5528
               "unknown response received ");
5529
        ret = -1;
5530
        free_errstr = _gf_true;
5531
        goto out;
5532
    }
5533
    if (opinfo.brick_pending_count > 0)
5534
        opinfo.brick_pending_count--;
5535
    if (opinfo.op_ret == 0)
5536
        opinfo.op_ret = ev_ctx->op_ret;
5537

5538
    if (opinfo.op_errstr == NULL)
5539
        opinfo.op_errstr = ev_ctx->op_errstr;
5540
    else
5541
        free_errstr = _gf_true;
5542

5543
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5544
    if (ret)
5545
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5546
               "Unable to set "
5547
               "transaction's opinfo");
5548

5549
    if (opinfo.brick_pending_count > 0)
5550
        goto out;
5551

5552
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
5553
                                      ev_ctx->commit_ctx);
5554

5555
out:
5556
    if (ev_ctx->rsp_dict)
5557
        dict_unref(ev_ctx->rsp_dict);
5558
    if (free_errstr && ev_ctx->op_errstr)
5559
        GF_FREE(ev_ctx->op_errstr);
5560
    GF_FREE(ctx);
5561
    gf_msg_debug(this->name, 0, "Returning %d", ret);
5562

5563
    return ret;
5564
}
5565

5566
static int
5567
glusterd_op_ac_rcvd_commit_op_acc(glusterd_op_sm_event_t *event, void *ctx)
5568
{
5569
    int ret = 0;
5570
    gf_boolean_t commit_ack_inject = _gf_true;
5571
    glusterd_op_t op = GD_OP_NONE;
5572
    xlator_t *this = THIS;
5573

5574
    op = glusterd_op_get_op();
5575
    GF_ASSERT(event);
5576

5577
    if (opinfo.pending_count > 0)
5578
        opinfo.pending_count--;
5579

5580
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5581
    if (ret)
5582
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5583
               "Unable to set "
5584
               "transaction's opinfo");
5585

5586
    if (opinfo.pending_count > 0)
5587
        goto out;
5588

5589
    if (op == GD_OP_REPLACE_BRICK) {
5590
        ret = glusterd_op_sm_inject_all_acc(&event->txn_id);
5591
        if (ret) {
5592
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_RBOP_START_FAIL,
5593
                   "Couldn't start "
5594
                   "replace-brick operation.");
5595
            goto out;
5596
        }
5597

5598
        commit_ack_inject = _gf_false;
5599
        goto out;
5600
    }
5601

5602
out:
5603
    if (commit_ack_inject) {
5604
        if (ret)
5605
            ret = glusterd_op_sm_inject_event(GD_OP_EVENT_RCVD_RJT,
5606
                                              &event->txn_id, NULL);
5607
        else if (!opinfo.pending_count) {
5608
            glusterd_op_modify_op_ctx(op, NULL);
5609
            ret = glusterd_op_sm_inject_event(GD_OP_EVENT_COMMIT_ACC,
5610
                                              &event->txn_id, NULL);
5611
        }
5612
        /*else do nothing*/
5613
    }
5614

5615
    return ret;
5616
}
5617

5618
static int
5619
glusterd_op_ac_rcvd_unlock_acc(glusterd_op_sm_event_t *event, void *ctx)
5620
{
5621
    int ret = 0;
5622

5623
    GF_ASSERT(event);
5624

5625
    if (opinfo.pending_count > 0)
5626
        opinfo.pending_count--;
5627

5628
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5629
    if (ret)
5630
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5631
               "Unable to set "
5632
               "transaction's opinfo");
5633

5634
    if (opinfo.pending_count > 0)
5635
        goto out;
5636

5637
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACC, &event->txn_id,
5638
                                      NULL);
5639

5640
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5641

5642
out:
5643
    return ret;
5644
}
5645

5646
int32_t
5647
glusterd_op_clear_errstr(void)
5648
{
5649
    opinfo.op_errstr = NULL;
5650
    return 0;
5651
}
5652

5653
int32_t
5654
glusterd_op_set_ctx(void *ctx)
5655
{
5656
    opinfo.op_ctx = ctx;
5657

5658
    return 0;
5659
}
5660

5661
int32_t
5662
glusterd_op_reset_ctx(void)
5663
{
5664
    glusterd_op_set_ctx(NULL);
5665

5666
    return 0;
5667
}
5668

5669
int32_t
5670
glusterd_op_txn_complete(uuid_t *txn_id)
5671
{
5672
    int32_t ret = -1;
5673
    glusterd_conf_t *priv = NULL;
5674
    int32_t op = -1;
5675
    int32_t op_ret = 0;
5676
    int32_t op_errno = 0;
5677
    rpcsvc_request_t *req = NULL;
5678
    void *ctx = NULL;
5679
    char *op_errstr = NULL;
5680
    char *volname = NULL;
5681
    xlator_t *this = THIS;
5682

5683
    priv = this->private;
5684
    GF_ASSERT(priv);
5685

5686
    op = glusterd_op_get_op();
5687
    ctx = glusterd_op_get_ctx();
5688
    op_ret = opinfo.op_ret;
5689
    op_errno = opinfo.op_errno;
5690
    req = opinfo.req;
5691
    if (opinfo.op_errstr)
5692
        op_errstr = opinfo.op_errstr;
5693

5694
    opinfo.op_ret = 0;
5695
    opinfo.op_errno = 0;
5696

5697
    ret = dict_get_str(ctx, "volname", &volname);
5698
    if (ret)
5699
        gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
5700
               "No Volume name present. "
5701
               "Locks have not been held.");
5702

5703
    if (volname) {
5704
        ret = glusterd_mgmt_v3_unlock(volname, MY_UUID, "vol");
5705
        if (ret)
5706
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
5707
                   "Unable to release lock for %s", volname);
5708
    }
5709

5710
    ret = glusterd_op_send_cli_response(op, op_ret, op_errno, req, ctx,
5711
                                        op_errstr);
5712

5713
    if (ret) {
5714
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_CLI_RESP,
5715
               "Responding to cli failed, "
5716
               "ret: %d",
5717
               ret);
5718
        // Ignore this error, else state machine blocks
5719
        ret = 0;
5720
    }
5721

5722
    if (op_errstr && (strcmp(op_errstr, "")))
5723
        GF_FREE(op_errstr);
5724

5725
    if (priv->pending_quorum_action)
5726
        glusterd_do_quorum_action();
5727

5728
    /* Clearing the transaction opinfo */
5729
    ret = glusterd_clear_txn_opinfo(txn_id);
5730
    if (ret)
5731
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
5732
               "Unable to clear transaction's opinfo");
5733

5734
    gf_msg_debug(this->name, 0, "Returning %d", ret);
5735
    return ret;
5736
}
5737

5738
static int
5739
glusterd_op_ac_unlocked_all(glusterd_op_sm_event_t *event, void *ctx)
5740
{
5741
    int ret = 0;
5742

5743
    GF_ASSERT(event);
5744

5745
    ret = glusterd_op_txn_complete(&event->txn_id);
5746

5747
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
5748

5749
    return ret;
5750
}
5751

5752
static int
5753
glusterd_op_ac_stage_op(glusterd_op_sm_event_t *event, void *ctx)
5754
{
5755
    int ret = -1;
5756
    glusterd_req_ctx_t *req_ctx = NULL;
5757
    int32_t status = 0;
5758
    dict_t *rsp_dict = NULL;
5759
    char *op_errstr = NULL;
5760
    dict_t *dict = NULL;
5761
    xlator_t *this = THIS;
5762
    uuid_t *txn_id = NULL;
5763
    glusterd_op_info_t txn_op_info = {
5764
        GD_OP_STATE_DEFAULT,
5765
    };
5766
    glusterd_conf_t *priv = NULL;
5767

5768
    priv = this->private;
5769
    GF_ASSERT(priv);
5770

5771
    GF_ASSERT(ctx);
5772

5773
    req_ctx = ctx;
5774

5775
    dict = req_ctx->dict;
5776

5777
    rsp_dict = dict_new();
5778
    if (!rsp_dict) {
5779
        gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
5780
               "Failed to get new dictionary");
5781
        return -1;
5782
    }
5783

5784
    status = glusterd_op_stage_validate(req_ctx->op, dict, &op_errstr,
5785
                                        rsp_dict);
5786

5787
    if (status) {
5788
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VALIDATE_FAILED,
5789
               "Stage failed on operation"
5790
               " 'Volume %s', Status : %d",
5791
               gd_op_list[req_ctx->op], status);
5792
    }
5793

5794
    txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
5795

5796
    if (txn_id)
5797
        gf_uuid_copy(*txn_id, event->txn_id);
5798
    else {
5799
        ret = -1;
5800
        goto out;
5801
    }
5802
    ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
5803

5804
    ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
5805
    if (ret) {
5806
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5807
               "Failed to set transaction id.");
5808
        GF_FREE(txn_id);
5809
        txn_id = NULL;
5810
        goto out;
5811
    }
5812

5813
    ret = glusterd_op_stage_send_resp(req_ctx->req, req_ctx->op, status,
5814
                                      op_errstr, rsp_dict);
5815

5816
out:
5817
    if (op_errstr && (strcmp(op_errstr, "")))
5818
        GF_FREE(op_errstr);
5819

5820
    gf_msg_debug(this->name, 0, "Returning with %d", ret);
5821

5822
    /* for no volname transactions, the txn_opinfo needs to be cleaned up
5823
     * as there's no unlock event triggered. However if the originator node of
5824
     * this transaction is still running with a version lower than 60000,
5825
     * txn_opinfo can't be cleared as that'll lead to a race of referring op_ctx
5826
     * after it's being freed.
5827
     */
5828
    if (txn_op_info.skip_locking && priv->op_version >= GD_OP_VERSION_6_0 &&
5829
        txn_id)
5830
        ret = glusterd_clear_txn_opinfo(txn_id);
5831

5832
    if (rsp_dict)
5833
        dict_unref(rsp_dict);
5834

5835
    return ret;
5836
}
5837

5838
static gf_boolean_t
5839
glusterd_need_brick_op(glusterd_op_t op)
5840
{
5841
    gf_boolean_t ret = _gf_false;
5842

5843
    GF_ASSERT(GD_OP_NONE < op && op < GD_OP_MAX);
5844

5845
    switch (op) {
5846
        case GD_OP_PROFILE_VOLUME:
5847
        case GD_OP_STATUS_VOLUME:
5848
        case GD_OP_DEFRAG_BRICK_VOLUME:
5849
        case GD_OP_HEAL_VOLUME:
5850
        case GD_OP_SCRUB_STATUS:
5851
        case GD_OP_SCRUB_ONDEMAND:
5852
            ret = _gf_true;
5853
            break;
5854
        default:
5855
            ret = _gf_false;
5856
    }
5857

5858
    return ret;
5859
}
5860

5861
dict_t *
5862
glusterd_op_init_commit_rsp_dict(glusterd_op_t op)
5863
{
5864
    dict_t *rsp_dict = NULL;
5865
    dict_t *op_ctx = NULL;
5866

5867
    GF_ASSERT(GD_OP_NONE < op && op < GD_OP_MAX);
5868

5869
    if (glusterd_need_brick_op(op)) {
5870
        op_ctx = glusterd_op_get_ctx();
5871
        GF_ASSERT(op_ctx);
5872
        rsp_dict = dict_ref(op_ctx);
5873
    } else {
5874
        rsp_dict = dict_new();
5875
    }
5876

5877
    return rsp_dict;
5878
}
5879

5880
static int
5881
glusterd_op_ac_commit_op(glusterd_op_sm_event_t *event, void *ctx)
5882
{
5883
    int ret = 0;
5884
    glusterd_req_ctx_t *req_ctx = NULL;
5885
    int32_t status = 0;
5886
    char *op_errstr = NULL;
5887
    dict_t *dict = NULL;
5888
    dict_t *rsp_dict = NULL;
5889
    xlator_t *this = THIS;
5890
    uuid_t *txn_id = NULL;
5891
    glusterd_op_info_t txn_op_info = {
5892
        GD_OP_STATE_DEFAULT,
5893
    };
5894
    gf_boolean_t need_cleanup = _gf_true;
5895

5896
    GF_ASSERT(ctx);
5897

5898
    req_ctx = ctx;
5899

5900
    dict = req_ctx->dict;
5901

5902
    rsp_dict = glusterd_op_init_commit_rsp_dict(req_ctx->op);
5903
    if (NULL == rsp_dict)
5904
        return -1;
5905

5906
    if (GD_OP_CLEARLOCKS_VOLUME == req_ctx->op) {
5907
        /*clear locks should be run only on
5908
         * originator glusterd*/
5909
        status = 0;
5910

5911
    } else {
5912
        status = glusterd_op_commit_perform(req_ctx->op, dict, &op_errstr,
5913
                                            rsp_dict);
5914
    }
5915

5916
    if (status)
5917
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
5918
               "Commit of operation "
5919
               "'Volume %s' failed: %d",
5920
               gd_op_list[req_ctx->op], status);
5921

5922
    txn_id = GF_MALLOC(sizeof(uuid_t), gf_common_mt_uuid_t);
5923

5924
    if (txn_id)
5925
        gf_uuid_copy(*txn_id, event->txn_id);
5926
    else {
5927
        ret = -1;
5928
        goto out;
5929
    }
5930
    ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
5931
    if (ret) {
5932
        gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
5933
                         GD_MSG_TRANS_OPINFO_GET_FAIL,
5934
                         "Unable to get transaction opinfo "
5935
                         "for transaction ID : %s",
5936
                         uuid_utoa(event->txn_id));
5937
        goto out;
5938
    }
5939

5940
    ret = dict_set_bin(rsp_dict, "transaction_id", txn_id, sizeof(*txn_id));
5941
    if (ret) {
5942
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
5943
               "Failed to set transaction id.");
5944
        if (txn_op_info.skip_locking)
5945
            ret = glusterd_clear_txn_opinfo(txn_id);
5946
        need_cleanup = _gf_false;
5947
        GF_FREE(txn_id);
5948
        goto out;
5949
    }
5950

5951
    ret = glusterd_op_commit_send_resp(req_ctx->req, req_ctx->op, status,
5952
                                       op_errstr, rsp_dict);
5953

5954
out:
5955
    if (op_errstr && (strcmp(op_errstr, "")))
5956
        GF_FREE(op_errstr);
5957

5958
    if (rsp_dict)
5959
        dict_unref(rsp_dict);
5960
    /* for no volname transactions, the txn_opinfo needs to be cleaned up
5961
     * as there's no unlock event triggered
5962
     */
5963
    if (need_cleanup && txn_id && txn_op_info.skip_locking)
5964
        ret = glusterd_clear_txn_opinfo(txn_id);
5965
    gf_msg_debug(this->name, 0, "Returning with %d", ret);
5966

5967
    return ret;
5968
}
5969

5970
static int
5971
glusterd_op_ac_send_commit_failed(glusterd_op_sm_event_t *event, void *ctx)
5972
{
5973
    int ret = 0;
5974
    glusterd_req_ctx_t *req_ctx = NULL;
5975
    dict_t *op_ctx = NULL;
5976

5977
    GF_ASSERT(ctx);
5978

5979
    req_ctx = ctx;
5980

5981
    op_ctx = glusterd_op_get_ctx();
5982

5983
    ret = glusterd_op_commit_send_resp(req_ctx->req, req_ctx->op, opinfo.op_ret,
5984
                                       opinfo.op_errstr, op_ctx);
5985

5986
    if (opinfo.op_errstr && (strcmp(opinfo.op_errstr, ""))) {
5987
        GF_FREE(opinfo.op_errstr);
5988
        opinfo.op_errstr = NULL;
5989
    }
5990

5991
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
5992
    if (ret)
5993
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
5994
               "Unable to set "
5995
               "transaction's opinfo");
5996

5997
    gf_msg_debug(THIS->name, 0, "Returning with %d", ret);
5998
    return ret;
5999
}
6000

6001
static int
6002
glusterd_op_sm_transition_state(glusterd_op_info_t *opinfo,
6003
                                glusterd_op_sm_t *state,
6004
                                glusterd_op_sm_event_type_t event_type)
6005
{
6006
    glusterd_conf_t *conf = NULL;
6007

6008
    GF_ASSERT(state);
6009
    GF_ASSERT(opinfo);
6010

6011
    conf = THIS->private;
6012
    GF_ASSERT(conf);
6013

6014
    (void)glusterd_sm_tr_log_transition_add(&conf->op_sm_log, opinfo->state,
6015
                                            state[event_type].next_state,
6016
                                            event_type);
6017

6018
    opinfo->state = state[event_type].next_state;
6019
    return 0;
6020
}
6021

6022
int32_t
6023
glusterd_op_stage_validate(glusterd_op_t op, dict_t *dict, char **op_errstr,
6024
                           dict_t *rsp_dict)
6025
{
6026
    int ret = -1;
6027
    xlator_t *this = THIS;
6028

6029
    switch (op) {
6030
        case GD_OP_CREATE_VOLUME:
6031
            ret = glusterd_op_stage_create_volume(dict, op_errstr, rsp_dict);
6032
            break;
6033

6034
        case GD_OP_START_VOLUME:
6035
            ret = glusterd_op_stage_start_volume(dict, op_errstr, rsp_dict);
6036
            break;
6037

6038
        case GD_OP_STOP_VOLUME:
6039
            ret = glusterd_op_stage_stop_volume(dict, op_errstr);
6040
            break;
6041

6042
        case GD_OP_DELETE_VOLUME:
6043
            ret = glusterd_op_stage_delete_volume(dict, op_errstr);
6044
            break;
6045

6046
        case GD_OP_ADD_BRICK:
6047
            ret = glusterd_op_stage_add_brick(dict, op_errstr, rsp_dict);
6048
            break;
6049

6050
        case GD_OP_REPLACE_BRICK:
6051
            ret = glusterd_op_stage_replace_brick(dict, op_errstr, rsp_dict);
6052
            break;
6053

6054
        case GD_OP_SET_VOLUME:
6055
            ret = glusterd_op_stage_set_volume(dict, op_errstr);
6056
            break;
6057

6058
        case GD_OP_GANESHA:
6059
            ret = glusterd_op_stage_set_ganesha(dict, op_errstr);
6060
            break;
6061

6062
        case GD_OP_RESET_VOLUME:
6063
            ret = glusterd_op_stage_reset_volume(dict, op_errstr);
6064
            break;
6065
        case GD_OP_REMOVE_BRICK:
6066
            ret = glusterd_op_stage_remove_brick(dict, op_errstr);
6067
            break;
6068

6069
        case GD_OP_LOG_ROTATE:
6070
            ret = glusterd_op_stage_log_rotate(dict, op_errstr);
6071
            break;
6072

6073
        case GD_OP_SYNC_VOLUME:
6074
            ret = glusterd_op_stage_sync_volume(dict, op_errstr);
6075
            break;
6076

6077
        case GD_OP_GSYNC_CREATE:
6078
            ret = glusterd_op_stage_gsync_create(dict, op_errstr);
6079
            break;
6080

6081
        case GD_OP_GSYNC_SET:
6082
            ret = glusterd_op_stage_gsync_set(dict, op_errstr);
6083
            break;
6084

6085
        case GD_OP_PROFILE_VOLUME:
6086
            ret = glusterd_op_stage_stats_volume(dict, op_errstr);
6087
            break;
6088

6089
        case GD_OP_QUOTA:
6090
            ret = glusterd_op_stage_quota(dict, op_errstr, rsp_dict);
6091
            break;
6092

6093
        case GD_OP_STATUS_VOLUME:
6094
            ret = glusterd_op_stage_status_volume(dict, op_errstr);
6095
            break;
6096

6097
        case GD_OP_REBALANCE:
6098
        case GD_OP_DEFRAG_BRICK_VOLUME:
6099
            ret = glusterd_op_stage_rebalance(dict, op_errstr);
6100
            break;
6101

6102
        case GD_OP_HEAL_VOLUME:
6103
            ret = glusterd_op_stage_heal_volume(dict, op_errstr);
6104
            break;
6105

6106
        case GD_OP_STATEDUMP_VOLUME:
6107
            ret = glusterd_op_stage_statedump_volume(dict, op_errstr);
6108
            break;
6109
        case GD_OP_CLEARLOCKS_VOLUME:
6110
            ret = glusterd_op_stage_clearlocks_volume(dict, op_errstr);
6111
            break;
6112

6113
        case GD_OP_COPY_FILE:
6114
            ret = glusterd_op_stage_copy_file(dict, op_errstr);
6115
            break;
6116

6117
        case GD_OP_SYS_EXEC:
6118
            ret = glusterd_op_stage_sys_exec(dict, op_errstr);
6119
            break;
6120

6121
        case GD_OP_BARRIER:
6122
            ret = glusterd_op_stage_barrier(dict, op_errstr);
6123
            break;
6124

6125
        case GD_OP_BITROT:
6126
        case GD_OP_SCRUB_STATUS:
6127
        case GD_OP_SCRUB_ONDEMAND:
6128
            ret = glusterd_op_stage_bitrot(dict, op_errstr, rsp_dict);
6129
            break;
6130

6131
        default:
6132
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
6133
                   "Unknown op %s", gd_op_list[op]);
6134
    }
6135

6136
    gf_msg_debug(this->name, 0, "OP = %d. Returning %d", op, ret);
6137
    return ret;
6138
}
6139

6140
static void
6141
glusterd_wait_for_blockers(glusterd_conf_t *priv)
6142
{
6143
    while (GF_ATOMIC_GET(priv->blockers)) {
6144
        synccond_wait(&priv->cond_blockers, &priv->big_lock);
6145
    }
6146
}
6147

6148
int32_t
6149
glusterd_op_commit_perform(glusterd_op_t op, dict_t *dict, char **op_errstr,
6150
                           dict_t *rsp_dict)
6151
{
6152
    int ret = -1;
6153
    xlator_t *this = THIS;
6154

6155
    glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_PRE);
6156
    switch (op) {
6157
        case GD_OP_CREATE_VOLUME:
6158
            ret = glusterd_op_create_volume(dict, op_errstr);
6159
            break;
6160

6161
        case GD_OP_START_VOLUME:
6162
            ret = glusterd_op_start_volume(dict, op_errstr);
6163
            break;
6164

6165
        case GD_OP_STOP_VOLUME:
6166
            ret = glusterd_op_stop_volume(dict);
6167
            break;
6168

6169
        case GD_OP_DELETE_VOLUME:
6170
            glusterd_wait_for_blockers(this->private);
6171
            ret = glusterd_op_delete_volume(dict);
6172
            break;
6173

6174
        case GD_OP_ADD_BRICK:
6175
            glusterd_wait_for_blockers(this->private);
6176
            ret = glusterd_op_add_brick(dict, op_errstr);
6177
            break;
6178

6179
        case GD_OP_REPLACE_BRICK:
6180
            glusterd_wait_for_blockers(this->private);
6181
            ret = glusterd_op_replace_brick(dict, rsp_dict);
6182
            break;
6183

6184
        case GD_OP_SET_VOLUME:
6185
            ret = glusterd_op_set_volume(dict, op_errstr);
6186
            break;
6187
        case GD_OP_GANESHA:
6188
            ret = glusterd_op_set_ganesha(dict, op_errstr);
6189
            break;
6190
        case GD_OP_RESET_VOLUME:
6191
            ret = glusterd_op_reset_volume(dict, op_errstr);
6192
            break;
6193

6194
        case GD_OP_REMOVE_BRICK:
6195
            glusterd_wait_for_blockers(this->private);
6196
            ret = glusterd_op_remove_brick(dict, op_errstr);
6197
            break;
6198

6199
        case GD_OP_LOG_ROTATE:
6200
            ret = glusterd_op_log_rotate(dict);
6201
            break;
6202

6203
        case GD_OP_SYNC_VOLUME:
6204
            ret = glusterd_op_sync_volume(dict, op_errstr, rsp_dict);
6205
            break;
6206

6207
        case GD_OP_GSYNC_CREATE:
6208
            ret = glusterd_op_gsync_create(dict, op_errstr, rsp_dict);
6209
            break;
6210

6211
        case GD_OP_GSYNC_SET:
6212
            ret = glusterd_op_gsync_set(dict, op_errstr, rsp_dict);
6213
            break;
6214

6215
        case GD_OP_PROFILE_VOLUME:
6216
            ret = glusterd_op_stats_volume(dict, op_errstr, rsp_dict);
6217
            break;
6218

6219
        case GD_OP_QUOTA:
6220
            ret = glusterd_op_quota(dict, op_errstr, rsp_dict);
6221
            break;
6222

6223
        case GD_OP_STATUS_VOLUME:
6224
            ret = glusterd_op_status_volume(dict, op_errstr, rsp_dict);
6225
            break;
6226

6227
        case GD_OP_REBALANCE:
6228
        case GD_OP_DEFRAG_BRICK_VOLUME:
6229
            ret = glusterd_op_rebalance(dict, op_errstr, rsp_dict);
6230
            break;
6231

6232
        case GD_OP_HEAL_VOLUME:
6233
            ret = glusterd_op_heal_volume(dict, op_errstr);
6234
            break;
6235

6236
        case GD_OP_STATEDUMP_VOLUME:
6237
            ret = glusterd_op_statedump_volume(dict, op_errstr);
6238
            break;
6239

6240
        case GD_OP_CLEARLOCKS_VOLUME:
6241
            ret = glusterd_op_clearlocks_volume(dict, op_errstr, rsp_dict);
6242
            break;
6243

6244
        case GD_OP_COPY_FILE:
6245
            ret = glusterd_op_copy_file(dict, op_errstr);
6246
            break;
6247

6248
        case GD_OP_SYS_EXEC:
6249
            ret = glusterd_op_sys_exec(dict, op_errstr, rsp_dict);
6250
            break;
6251

6252
        case GD_OP_BARRIER:
6253
            ret = glusterd_op_barrier(dict, op_errstr);
6254
            break;
6255

6256
        case GD_OP_BITROT:
6257
        case GD_OP_SCRUB_STATUS:
6258
        case GD_OP_SCRUB_ONDEMAND:
6259
            ret = glusterd_op_bitrot(dict, op_errstr, rsp_dict);
6260
            break;
6261

6262
        default:
6263
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
6264
                   "Unknown op %s", gd_op_list[op]);
6265
            break;
6266
    }
6267

6268
    if (ret == 0)
6269
        glusterd_op_commit_hook(op, dict, GD_COMMIT_HOOK_POST);
6270

6271
    gf_msg_debug(this->name, 0, "Returning %d", ret);
6272
    return ret;
6273
}
6274

6275
static int
6276
glusterd_bricks_select_stop_volume(dict_t *dict, char **op_errstr,
6277
                                   struct cds_list_head *selected)
6278
{
6279
    int ret = 0;
6280
    int flags = 0;
6281
    char *volname = NULL;
6282
    glusterd_volinfo_t *volinfo = NULL;
6283
    glusterd_brickinfo_t *brickinfo = NULL;
6284
    glusterd_pending_node_t *pending_node = NULL;
6285

6286
    ret = glusterd_op_stop_volume_args_get(dict, &volname, &flags);
6287
    if (ret)
6288
        goto out;
6289

6290
    ret = glusterd_volinfo_find(volname, &volinfo);
6291
    if (ret) {
6292
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
6293
               FMTSTR_CHECK_VOL_EXISTS, volname);
6294
        gf_asprintf(op_errstr, FMTSTR_CHECK_VOL_EXISTS, volname);
6295
        goto out;
6296
    }
6297

6298
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6299
    {
6300
        if (glusterd_is_brick_started(brickinfo)) {
6301
            pending_node = GF_CALLOC(1, sizeof(*pending_node),
6302
                                     gf_gld_mt_pending_node_t);
6303
            if (!pending_node) {
6304
                ret = -1;
6305
                goto out;
6306
            } else {
6307
                pending_node->node = brickinfo;
6308
                pending_node->type = GD_NODE_BRICK;
6309
                cds_list_add_tail(&pending_node->list, selected);
6310
                pending_node = NULL;
6311
            }
6312
            /*
6313
             * This is not really the right place to do it, but
6314
             * it's the most convenient.
6315
             * TBD: move this to *after* the RPC
6316
             */
6317
            brickinfo->status = GF_BRICK_STOPPED;
6318
        }
6319
    }
6320

6321
out:
6322
    return ret;
6323
}
6324

6325
static int
6326
glusterd_bricks_select_remove_brick(dict_t *dict, char **op_errstr,
6327
                                    struct cds_list_head *selected)
6328
{
6329
    int ret = -1;
6330
    char *volname = NULL;
6331
    glusterd_volinfo_t *volinfo = NULL;
6332
    glusterd_brickinfo_t *brickinfo = NULL;
6333
    char *brick = NULL;
6334
    int32_t count = 0;
6335
    int32_t i = 1;
6336
    char key[64] = {
6337
        0,
6338
    };
6339
    int keylen;
6340
    glusterd_pending_node_t *pending_node = NULL;
6341
    int32_t command = 0;
6342
    int32_t force = 0;
6343

6344
    ret = dict_get_str(dict, "volname", &volname);
6345

6346
    if (ret) {
6347
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6348
               "Unable to get volume name");
6349
        goto out;
6350
    }
6351

6352
    ret = glusterd_volinfo_find(volname, &volinfo);
6353

6354
    if (ret) {
6355
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
6356
               "Unable to allocate memory");
6357
        goto out;
6358
    }
6359

6360
    ret = dict_get_int32(dict, "count", &count);
6361
    if (ret) {
6362
        gf_msg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
6363
               "Unable to get count");
6364
        goto out;
6365
    }
6366

6367
    ret = dict_get_int32(dict, "command", &command);
6368
    if (ret) {
6369
        gf_msg("glusterd", GF_LOG_ERROR, -ret, GD_MSG_DICT_GET_FAILED,
6370
               "Unable to get command");
6371
        goto out;
6372
    }
6373

6374
    ret = dict_get_int32(dict, "force", &force);
6375
    if (ret) {
6376
        gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_DICT_GET_FAILED,
6377
               "force flag is not set");
6378
        ret = 0;
6379
        goto out;
6380
    }
6381

6382
    while (i <= count) {
6383
        keylen = snprintf(key, sizeof(key), "brick%d", i);
6384

6385
        ret = dict_get_strn(dict, key, keylen, &brick);
6386
        if (ret) {
6387
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6388
                   "Unable to get brick");
6389
            goto out;
6390
        }
6391

6392
        ret = glusterd_volume_brickinfo_get_by_brick(brick, volinfo, &brickinfo,
6393
                                                     _gf_false);
6394

6395
        if (ret)
6396
            goto out;
6397

6398
        if (glusterd_is_brick_started(brickinfo)) {
6399
            pending_node = GF_CALLOC(1, sizeof(*pending_node),
6400
                                     gf_gld_mt_pending_node_t);
6401
            if (!pending_node) {
6402
                ret = -1;
6403
                goto out;
6404
            } else {
6405
                pending_node->node = brickinfo;
6406
                pending_node->type = GD_NODE_BRICK;
6407
                cds_list_add_tail(&pending_node->list, selected);
6408
                pending_node = NULL;
6409
            }
6410
            /*
6411
             * This is not really the right place to do it, but
6412
             * it's the most convenient.
6413
             * TBD: move this to *after* the RPC
6414
             */
6415
            brickinfo->status = GF_BRICK_STOPPED;
6416
        }
6417
        i++;
6418
    }
6419

6420
out:
6421
    return ret;
6422
}
6423

6424
static int
6425
glusterd_bricks_select_profile_volume(dict_t *dict, char **op_errstr,
6426
                                      struct cds_list_head *selected)
6427
{
6428
    int ret = -1;
6429
    char *volname = NULL;
6430
    char msg[2048] = {
6431
        0,
6432
    };
6433
    glusterd_conf_t *priv = NULL;
6434
    glusterd_volinfo_t *volinfo = NULL;
6435
    xlator_t *this = THIS;
6436
    int32_t stats_op = GF_CLI_STATS_NONE;
6437
    glusterd_brickinfo_t *brickinfo = NULL;
6438
    glusterd_pending_node_t *pending_node = NULL;
6439
    char *brick = NULL;
6440
    int32_t pid = -1;
6441
    char pidfile[PATH_MAX] = {0};
6442

6443
    priv = this->private;
6444
    GF_ASSERT(priv);
6445

6446
    ret = dict_get_str(dict, "volname", &volname);
6447
    if (ret) {
6448
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6449
               "volume name get failed");
6450
        goto out;
6451
    }
6452

6453
    ret = glusterd_volinfo_find(volname, &volinfo);
6454
    if (ret) {
6455
        snprintf(msg, sizeof(msg), "Volume %s does not exists", volname);
6456

6457
        *op_errstr = gf_strdup(msg);
6458
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
6459
        goto out;
6460
    }
6461

6462
    ret = dict_get_int32(dict, "op", &stats_op);
6463
    if (ret) {
6464
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6465
               "volume profile op get failed");
6466
        goto out;
6467
    }
6468

6469
    switch (stats_op) {
6470
        case GF_CLI_STATS_START:
6471
        case GF_CLI_STATS_STOP:
6472
            goto out;
6473
            break;
6474
        case GF_CLI_STATS_INFO:
6475
#ifdef BUILD_GNFS
6476
            ret = dict_get_str_boolean(dict, "nfs", _gf_false);
6477
            if (ret) {
6478
                if (!priv->nfs_svc.online) {
6479
                    ret = -1;
6480
                    gf_msg(this->name, GF_LOG_ERROR, 0,
6481
                           GD_MSG_NFS_SERVER_NOT_RUNNING,
6482
                           "NFS server"
6483
                           " is not running");
6484
                    goto out;
6485
                }
6486
                pending_node = GF_CALLOC(1, sizeof(*pending_node),
6487
                                         gf_gld_mt_pending_node_t);
6488
                if (!pending_node) {
6489
                    ret = -1;
6490
                    goto out;
6491
                }
6492
                pending_node->node = &(priv->nfs_svc);
6493
                pending_node->type = GD_NODE_NFS;
6494
                cds_list_add_tail(&pending_node->list, selected);
6495
                pending_node = NULL;
6496

6497
                ret = 0;
6498
                goto out;
6499
            }
6500
#endif
6501
            cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6502
            {
6503
                if (glusterd_is_brick_started(brickinfo)) {
6504
                    /*
6505
                     * In normal use, glusterd_is_brick_started
6506
                     * will give us the answer we need.  However,
6507
                     * in our tests the brick gets detached behind
6508
                     * our back, so we need to double-check this
6509
                     * way.
6510
                     */
6511
                    GLUSTERD_GET_BRICK_PIDFILE(pidfile, volinfo, brickinfo,
6512
                                               priv);
6513
                    if (!gf_is_service_running(pidfile, &pid)) {
6514
                        continue;
6515
                    }
6516
                    pending_node = GF_CALLOC(1, sizeof(*pending_node),
6517
                                             gf_gld_mt_pending_node_t);
6518
                    if (!pending_node) {
6519
                        ret = -1;
6520
                        goto out;
6521
                    } else {
6522
                        pending_node->node = brickinfo;
6523
                        pending_node->type = GD_NODE_BRICK;
6524
                        cds_list_add_tail(&pending_node->list, selected);
6525
                        pending_node = NULL;
6526
                    }
6527
                }
6528
            }
6529
            break;
6530

6531
        case GF_CLI_STATS_TOP:
6532
#ifdef BUILD_GNFS
6533
            ret = dict_get_str_boolean(dict, "nfs", _gf_false);
6534
            if (ret) {
6535
                if (!priv->nfs_svc.online) {
6536
                    ret = -1;
6537
                    gf_msg(this->name, GF_LOG_ERROR, 0,
6538
                           GD_MSG_NFS_SERVER_NOT_RUNNING,
6539
                           "NFS server"
6540
                           " is not running");
6541
                    goto out;
6542
                }
6543
                pending_node = GF_CALLOC(1, sizeof(*pending_node),
6544
                                         gf_gld_mt_pending_node_t);
6545
                if (!pending_node) {
6546
                    ret = -1;
6547
                    goto out;
6548
                }
6549
                pending_node->node = &(priv->nfs_svc);
6550
                pending_node->type = GD_NODE_NFS;
6551
                cds_list_add_tail(&pending_node->list, selected);
6552
                pending_node = NULL;
6553

6554
                ret = 0;
6555
                goto out;
6556
            }
6557
#endif
6558
            ret = dict_get_str(dict, "brick", &brick);
6559
            if (!ret) {
6560
                ret = glusterd_volume_brickinfo_get_by_brick(
6561
                    brick, volinfo, &brickinfo, _gf_true);
6562
                if (ret)
6563
                    goto out;
6564

6565
                if (!glusterd_is_brick_started(brickinfo))
6566
                    goto out;
6567

6568
                pending_node = GF_CALLOC(1, sizeof(*pending_node),
6569
                                         gf_gld_mt_pending_node_t);
6570
                if (!pending_node) {
6571
                    ret = -1;
6572
                    goto out;
6573
                } else {
6574
                    pending_node->node = brickinfo;
6575
                    pending_node->type = GD_NODE_BRICK;
6576
                    cds_list_add_tail(&pending_node->list, selected);
6577
                    pending_node = NULL;
6578
                    goto out;
6579
                }
6580
            }
6581
            ret = 0;
6582
            cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6583
            {
6584
                if (glusterd_is_brick_started(brickinfo)) {
6585
                    pending_node = GF_CALLOC(1, sizeof(*pending_node),
6586
                                             gf_gld_mt_pending_node_t);
6587
                    if (!pending_node) {
6588
                        ret = -1;
6589
                        goto out;
6590
                    } else {
6591
                        pending_node->node = brickinfo;
6592
                        pending_node->type = GD_NODE_BRICK;
6593
                        cds_list_add_tail(&pending_node->list, selected);
6594
                        pending_node = NULL;
6595
                    }
6596
                }
6597
            }
6598
            break;
6599

6600
        default:
6601
            GF_ASSERT(0);
6602
            gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_INVALID_ENTRY,
6603
                   "Invalid profile op: %d", stats_op);
6604
            ret = -1;
6605
            goto out;
6606
            break;
6607
    }
6608

6609
out:
6610
    gf_msg_debug("glusterd", 0, "Returning %d", ret);
6611

6612
    return ret;
6613
}
6614

6615
int
6616
_get_hxl_children_count(glusterd_volinfo_t *volinfo)
6617
{
6618
    if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
6619
        return volinfo->disperse_count;
6620
    } else {
6621
        return volinfo->replica_count;
6622
    }
6623
}
6624

6625
static int
6626
_add_hxlator_to_dict(dict_t *dict, glusterd_volinfo_t *volinfo, int index,
6627
                     int count)
6628
{
6629
    int ret = -1;
6630
    char key[64] = {
6631
        0,
6632
    };
6633
    int keylen;
6634
    char *xname = NULL;
6635
    char *xl_type = 0;
6636

6637
    if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
6638
        xl_type = "disperse";
6639
    } else {
6640
        xl_type = "replicate";
6641
    }
6642
    keylen = snprintf(key, sizeof(key), "xl-%d", count);
6643
    ret = gf_asprintf(&xname, "%s-%s-%d", volinfo->volname, xl_type, index);
6644
    if (ret == -1)
6645
        goto out;
6646

6647
    ret = dict_set_dynstrn(dict, key, keylen, xname);
6648
    if (ret)
6649
        goto out;
6650

6651
    ret = dict_set_int32(dict, xname, index);
6652
out:
6653
    return ret;
6654
}
6655

6656
int
6657
get_replica_index_for_per_replica_cmd(glusterd_volinfo_t *volinfo, dict_t *dict)
6658
{
6659
    int ret = 0;
6660
    char *hostname = NULL;
6661
    char *path = NULL;
6662
    int index = 0;
6663
    glusterd_brickinfo_t *brickinfo = NULL;
6664
    int cmd_replica_index = -1;
6665
    int replica_count = -1;
6666

6667
    if (!dict) {
6668
        ret = -1;
6669
        goto out;
6670
    }
6671

6672
    ret = dict_get_str(dict, "per-replica-cmd-hostname", &hostname);
6673
    if (ret)
6674
        goto out;
6675
    ret = dict_get_str(dict, "per-replica-cmd-path", &path);
6676
    if (ret)
6677
        goto out;
6678

6679
    replica_count = volinfo->replica_count;
6680

6681
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6682
    {
6683
        if (gf_uuid_is_null(brickinfo->uuid))
6684
            (void)glusterd_resolve_brick(brickinfo);
6685
        if (!strcmp(brickinfo->path, path) &&
6686
            !strcmp(brickinfo->hostname, hostname)) {
6687
            cmd_replica_index = index / (replica_count);
6688
            goto out;
6689
        }
6690
        index++;
6691
    }
6692

6693
out:
6694
    if (ret)
6695
        cmd_replica_index = -1;
6696

6697
    return cmd_replica_index;
6698
}
6699

6700
int
6701
_select_hxlator_with_matching_brick(xlator_t *this, glusterd_volinfo_t *volinfo,
6702
                                    dict_t *dict, int *index)
6703
{
6704
    char *path = NULL;
6705
    glusterd_brickinfo_t *brickinfo = NULL;
6706
    int hxl_children = 0;
6707

6708
    if (!dict || dict_get_str(dict, "per-replica-cmd-path", &path))
6709
        return -1;
6710

6711
    hxl_children = _get_hxl_children_count(volinfo);
6712
    if ((*index) == 0)
6713
        (*index)++;
6714

6715
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6716
    {
6717
        if (gf_uuid_is_null(brickinfo->uuid))
6718
            (void)glusterd_resolve_brick(brickinfo);
6719

6720
        if ((!gf_uuid_compare(MY_UUID, brickinfo->uuid)) &&
6721
            (!strncmp(brickinfo->path, path, strlen(path)))) {
6722
            _add_hxlator_to_dict(dict, volinfo, ((*index) - 1) / hxl_children,
6723
                                 0);
6724
            return 1;
6725
        }
6726
        (*index)++;
6727
    }
6728

6729
    return 0;
6730
}
6731
void
6732
_select_hxlators_with_local_bricks(xlator_t *this, glusterd_volinfo_t *volinfo,
6733
                                   dict_t *dict, int *index, int *hxlator_count)
6734
{
6735
    glusterd_brickinfo_t *brickinfo = NULL;
6736
    int hxl_children = 0;
6737
    gf_boolean_t add = _gf_false;
6738

6739
    hxl_children = _get_hxl_children_count(volinfo);
6740

6741
    if ((*index) == 0)
6742
        (*index)++;
6743

6744
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6745
    {
6746
        if (gf_uuid_is_null(brickinfo->uuid))
6747
            (void)glusterd_resolve_brick(brickinfo);
6748

6749
        if (!gf_uuid_compare(MY_UUID, brickinfo->uuid))
6750
            add = _gf_true;
6751

6752
        if ((*index) % hxl_children == 0) {
6753
            if (add) {
6754
                _add_hxlator_to_dict(dict, volinfo,
6755
                                     ((*index) - 1) / hxl_children,
6756
                                     (*hxlator_count));
6757
                (*hxlator_count)++;
6758
            }
6759
            add = _gf_false;
6760
        }
6761

6762
        (*index)++;
6763
    }
6764
}
6765

6766
int
6767
_select_hxlators_for_full_self_heal(xlator_t *this, glusterd_volinfo_t *volinfo,
6768
                                    dict_t *dict, int *index,
6769
                                    int *hxlator_count)
6770
{
6771
    glusterd_brickinfo_t *brickinfo = NULL;
6772
    int hxl_children = 0;
6773
    uuid_t candidate = {0};
6774
    int brick_index = 0;
6775
    glusterd_peerinfo_t *peerinfo = NULL;
6776
    int delta = 0;
6777
    uuid_t candidate_max = {0};
6778

6779
    if ((*index) == 0)
6780
        (*index)++;
6781
    if (volinfo->type == GF_CLUSTER_TYPE_DISPERSE) {
6782
        hxl_children = volinfo->disperse_count;
6783
    } else {
6784
        hxl_children = volinfo->replica_count;
6785
    }
6786

6787
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6788
    {
6789
        if (gf_uuid_compare(brickinfo->uuid, candidate_max) > 0) {
6790
            if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
6791
                gf_uuid_copy(candidate_max, brickinfo->uuid);
6792
            } else {
6793
                peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
6794
                if (peerinfo && peerinfo->connected) {
6795
                    gf_uuid_copy(candidate_max, brickinfo->uuid);
6796
                }
6797
            }
6798
        }
6799
    }
6800

6801
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6802
    {
6803
        if (gf_uuid_is_null(brickinfo->uuid))
6804
            (void)glusterd_resolve_brick(brickinfo);
6805

6806
        delta %= hxl_children;
6807
        if ((*index + delta) == (brick_index + hxl_children)) {
6808
            if (!gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
6809
                gf_uuid_copy(candidate, brickinfo->uuid);
6810
            } else {
6811
                peerinfo = glusterd_peerinfo_find(brickinfo->uuid, NULL);
6812
                if (peerinfo && peerinfo->connected) {
6813
                    gf_uuid_copy(candidate, brickinfo->uuid);
6814
                } else if (peerinfo &&
6815
                           (!gf_uuid_compare(candidate_max, MY_UUID))) {
6816
                    _add_hxlator_to_dict(dict, volinfo,
6817
                                         ((*index) - 1) / hxl_children,
6818
                                         (*hxlator_count));
6819
                    (*hxlator_count)++;
6820
                }
6821
            }
6822

6823
            if (!gf_uuid_compare(MY_UUID, candidate)) {
6824
                _add_hxlator_to_dict(dict, volinfo,
6825
                                     ((*index) - 1) / hxl_children,
6826
                                     (*hxlator_count));
6827
                (*hxlator_count)++;
6828
            }
6829
            gf_uuid_clear(candidate);
6830
            brick_index += hxl_children;
6831
            delta++;
6832
        }
6833

6834
        (*index)++;
6835
    }
6836
    return *hxlator_count;
6837
}
6838

6839
static int
6840
glusterd_bricks_select_snap(dict_t *dict, char **op_errstr,
6841
                            struct cds_list_head *selected)
6842
{
6843
    int ret = -1;
6844
    xlator_t *this = THIS;
6845
    glusterd_pending_node_t *pending_node = NULL;
6846
    glusterd_volinfo_t *volinfo = NULL;
6847
    char *volname = NULL;
6848
    glusterd_brickinfo_t *brickinfo = NULL;
6849
    int brick_index = -1;
6850

6851
    ret = dict_get_str(dict, "volname", &volname);
6852
    if (ret) {
6853
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
6854
               "Unable to get"
6855
               " volname");
6856
        goto out;
6857
    }
6858
    ret = glusterd_volinfo_find(volname, &volinfo);
6859
    if (ret)
6860
        goto out;
6861

6862
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6863
    {
6864
        brick_index++;
6865
        if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
6866
            !glusterd_is_brick_started(brickinfo)) {
6867
            continue;
6868
        }
6869
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
6870
                                 gf_gld_mt_pending_node_t);
6871
        if (!pending_node) {
6872
            ret = -1;
6873
            goto out;
6874
        }
6875
        pending_node->node = brickinfo;
6876
        pending_node->type = GD_NODE_BRICK;
6877
        pending_node->index = brick_index;
6878
        cds_list_add_tail(&pending_node->list, selected);
6879
        pending_node = NULL;
6880
    }
6881

6882
    ret = 0;
6883

6884
out:
6885
    gf_msg_debug(this->name, 0, "Returning ret %d", ret);
6886
    return ret;
6887
}
6888

6889
static int
6890
fill_shd_status_for_local_bricks(dict_t *dict, glusterd_volinfo_t *volinfo,
6891
                                 cli_cmd_type type, int *index,
6892
                                 dict_t *req_dict)
6893
{
6894
    glusterd_brickinfo_t *brickinfo = NULL;
6895
    static char *msg = "self-heal-daemon is not running on";
6896
    char key[32] = {
6897
        0,
6898
    };
6899
    int keylen;
6900
    char value[128] = {
6901
        0,
6902
    };
6903
    int ret = 0;
6904
    xlator_t *this = THIS;
6905
    int cmd_replica_index = -1;
6906

6907
    if (type == PER_HEAL_XL) {
6908
        cmd_replica_index = get_replica_index_for_per_replica_cmd(volinfo,
6909
                                                                  req_dict);
6910
        if (cmd_replica_index == -1) {
6911
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REPLICA_INDEX_GET_FAIL,
6912
                   "Could not find the "
6913
                   "replica index for per replica type command");
6914
            ret = -1;
6915
            goto out;
6916
        }
6917
    }
6918

6919
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
6920
    {
6921
        if (gf_uuid_is_null(brickinfo->uuid))
6922
            (void)glusterd_resolve_brick(brickinfo);
6923

6924
        if (gf_uuid_compare(MY_UUID, brickinfo->uuid)) {
6925
            (*index)++;
6926
            continue;
6927
        }
6928

6929
        if (type == PER_HEAL_XL) {
6930
            if (cmd_replica_index != ((*index) / volinfo->replica_count)) {
6931
                (*index)++;
6932
                continue;
6933
            }
6934
        }
6935
        keylen = snprintf(key, sizeof(key), "%d-status", (*index));
6936
        snprintf(value, sizeof(value), "%s %s", msg, uuid_utoa(MY_UUID));
6937
        ret = dict_set_dynstrn(dict, key, keylen, gf_strdup(value));
6938
        if (ret) {
6939
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
6940
                   "Unable to"
6941
                   "set the dictionary for shd status msg");
6942
            goto out;
6943
        }
6944
        keylen = snprintf(key, sizeof(key), "%d-shd-status", (*index));
6945
        ret = dict_set_nstrn(dict, key, keylen, "off", SLEN("off"));
6946
        if (ret) {
6947
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SET_FAILED,
6948
                   "Unable to"
6949
                   " set dictionary for shd status msg");
6950
            goto out;
6951
        }
6952

6953
        (*index)++;
6954
    }
6955

6956
out:
6957
    return ret;
6958
}
6959
int
6960
glusterd_shd_select_brick_xlator(dict_t *dict, gf_xl_afr_op_t heal_op,
6961
                                 glusterd_volinfo_t *volinfo, int *index,
6962
                                 int *hxlator_count, dict_t *rsp_dict)
6963
{
6964
    int ret = -1;
6965
    xlator_t *this = THIS;
6966
    glusterd_svc_t *svc = NULL;
6967

6968
    svc = &(volinfo->shd.svc);
6969

6970
    switch (heal_op) {
6971
        case GF_SHD_OP_INDEX_SUMMARY:
6972
        case GF_SHD_OP_STATISTICS_HEAL_COUNT:
6973
            if (!svc->online) {
6974
                if (!rsp_dict) {
6975
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
6976
                           "Received "
6977
                           "empty ctx.");
6978
                    goto out;
6979
                }
6980

6981
                ret = fill_shd_status_for_local_bricks(
6982
                    rsp_dict, volinfo, ALL_HEAL_XL, index, dict);
6983
                if (ret)
6984
                    gf_msg(this->name, GF_LOG_ERROR, 0,
6985
                           GD_MSG_SHD_STATUS_SET_FAIL,
6986
                           "Unable to "
6987
                           "fill the shd status for the local "
6988
                           "bricks");
6989
                goto out;
6990
            }
6991
            break;
6992

6993
        case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
6994
            if (!svc->online) {
6995
                if (!rsp_dict) {
6996
                    gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPCTX_NULL,
6997
                           "Received "
6998
                           "empty ctx.");
6999
                    goto out;
7000
                }
7001
                ret = fill_shd_status_for_local_bricks(
7002
                    rsp_dict, volinfo, PER_HEAL_XL, index, dict);
7003
                if (ret)
7004
                    gf_msg(this->name, GF_LOG_ERROR, 0,
7005
                           GD_MSG_SHD_STATUS_SET_FAIL,
7006
                           "Unable to "
7007
                           "fill the shd status for the local"
7008
                           " bricks.");
7009
                goto out;
7010
            }
7011
            break;
7012

7013
        default:
7014
            break;
7015
    }
7016

7017
    switch (heal_op) {
7018
        case GF_SHD_OP_HEAL_FULL:
7019
            _select_hxlators_for_full_self_heal(this, volinfo, dict, index,
7020
                                                hxlator_count);
7021
            break;
7022
        case GF_SHD_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
7023
            (*hxlator_count) += _select_hxlator_with_matching_brick(
7024
                this, volinfo, dict, index);
7025
            break;
7026
        default:
7027
            _select_hxlators_with_local_bricks(this, volinfo, dict, index,
7028
                                               hxlator_count);
7029
            break;
7030
    }
7031
    ret = (*hxlator_count);
7032
out:
7033
    return ret;
7034
}
7035

7036
static int
7037
glusterd_bricks_select_heal_volume(dict_t *dict, char **op_errstr,
7038
                                   struct cds_list_head *selected,
7039
                                   dict_t *rsp_dict)
7040
{
7041
    int ret = -1;
7042
    char *volname = NULL;
7043
    glusterd_volinfo_t *volinfo = NULL;
7044
    xlator_t *this = THIS;
7045
    char msg[2048] = {
7046
        0,
7047
    };
7048
    glusterd_pending_node_t *pending_node = NULL;
7049
    gf_xl_afr_op_t heal_op = GF_SHD_OP_INVALID;
7050
    int hxlator_count = 0;
7051
    int index = 0;
7052

7053
    ret = dict_get_str(dict, "volname", &volname);
7054
    if (ret) {
7055
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7056
               "volume name get failed");
7057
        goto out;
7058
    }
7059

7060
    ret = glusterd_volinfo_find(volname, &volinfo);
7061
    if (ret) {
7062
        snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
7063

7064
        *op_errstr = gf_strdup(msg);
7065
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
7066
        goto out;
7067
    }
7068

7069
    ret = dict_get_int32(dict, "heal-op", (int32_t *)&heal_op);
7070
    if (ret || (heal_op == GF_SHD_OP_INVALID)) {
7071
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7072
               "heal op invalid");
7073
        goto out;
7074
    }
7075
    ret = glusterd_shd_select_brick_xlator(dict, heal_op, volinfo, &index,
7076
                                           &hxlator_count, rsp_dict);
7077
    if (ret < 0) {
7078
        goto out;
7079
    }
7080

7081
    if (!hxlator_count)
7082
        goto out;
7083
    if (hxlator_count == -1) {
7084
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_XLATOR_COUNT_GET_FAIL,
7085
               "Could not determine the"
7086
               "translator count");
7087
        ret = -1;
7088
        goto out;
7089
    }
7090

7091
    ret = dict_set_int32_sizen(dict, "count", hxlator_count);
7092
    if (ret)
7093
        goto out;
7094
    pending_node = GF_CALLOC(1, sizeof(*pending_node),
7095
                             gf_gld_mt_pending_node_t);
7096
    if (!pending_node) {
7097
        ret = -1;
7098
        goto out;
7099
    } else {
7100
        pending_node->node = &(volinfo->shd.svc);
7101
        pending_node->type = GD_NODE_SHD;
7102
        cds_list_add_tail(&pending_node->list, selected);
7103
        pending_node = NULL;
7104
    }
7105

7106
out:
7107
    gf_msg_debug(this->name, 0, "Returning ret %d", ret);
7108
    return ret;
7109
}
7110

7111
static int
7112
glusterd_bricks_select_rebalance_volume(dict_t *dict, char **op_errstr,
7113
                                        struct cds_list_head *selected)
7114
{
7115
    int ret = -1;
7116
    char *volname = NULL;
7117
    glusterd_volinfo_t *volinfo = NULL;
7118
    char msg[2048] = {
7119
        0,
7120
    };
7121
    glusterd_pending_node_t *pending_node = NULL;
7122

7123
    ret = dict_get_str(dict, "volname", &volname);
7124
    if (ret) {
7125
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7126
               "volume name get failed");
7127
        goto out;
7128
    }
7129

7130
    ret = glusterd_volinfo_find(volname, &volinfo);
7131
    if (ret) {
7132
        snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
7133

7134
        *op_errstr = gf_strdup(msg);
7135
        gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND, "%s", msg);
7136
        goto out;
7137
    }
7138
    pending_node = GF_CALLOC(1, sizeof(*pending_node),
7139
                             gf_gld_mt_pending_node_t);
7140
    if (!pending_node) {
7141
        ret = -1;
7142
        goto out;
7143
    } else {
7144
        pending_node->node = volinfo;
7145
        pending_node->type = GD_NODE_REBALANCE;
7146
        cds_list_add_tail(&pending_node->list, selected);
7147
        pending_node = NULL;
7148
    }
7149

7150
out:
7151
    return ret;
7152
}
7153

7154
static int
7155
glusterd_bricks_select_status_volume(dict_t *dict, char **op_errstr,
7156
                                     struct cds_list_head *selected)
7157
{
7158
    int ret = -1;
7159
    int cmd = 0;
7160
    int brick_index = -1;
7161
    char *volname = NULL;
7162
    char *brickname = NULL;
7163
    glusterd_volinfo_t *volinfo = NULL;
7164
    glusterd_brickinfo_t *brickinfo = NULL;
7165
    glusterd_pending_node_t *pending_node = NULL;
7166
    xlator_t *this = THIS;
7167
    glusterd_conf_t *priv = NULL;
7168
    glusterd_svc_t *svc = NULL;
7169

7170
    GF_ASSERT(dict);
7171

7172
    priv = this->private;
7173
    GF_ASSERT(priv);
7174

7175
    ret = dict_get_int32(dict, "cmd", &cmd);
7176
    if (ret) {
7177
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7178
               "Unable to get status type");
7179
        goto out;
7180
    }
7181

7182
    if (cmd & GF_CLI_STATUS_ALL)
7183
        goto out;
7184

7185
    switch (cmd & GF_CLI_STATUS_MASK) {
7186
        case GF_CLI_STATUS_MEM:
7187
        case GF_CLI_STATUS_CLIENTS:
7188
        case GF_CLI_STATUS_INODE:
7189
        case GF_CLI_STATUS_FD:
7190
        case GF_CLI_STATUS_CALLPOOL:
7191
        case GF_CLI_STATUS_NFS:
7192
        case GF_CLI_STATUS_SHD:
7193
        case GF_CLI_STATUS_QUOTAD:
7194
        case GF_CLI_STATUS_SNAPD:
7195
        case GF_CLI_STATUS_BITD:
7196
        case GF_CLI_STATUS_SCRUB:
7197
        case GF_CLI_STATUS_CLIENT_LIST:
7198
            break;
7199
        default:
7200
            goto out;
7201
    }
7202
    ret = dict_get_str(dict, "volname", &volname);
7203
    if (ret) {
7204
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7205
               "Unable to get volname");
7206
        goto out;
7207
    }
7208
    ret = glusterd_volinfo_find(volname, &volinfo);
7209
    if (ret) {
7210
        goto out;
7211
    }
7212

7213
    if ((cmd & GF_CLI_STATUS_BRICK) != 0) {
7214
        ret = dict_get_str(dict, "brick", &brickname);
7215
        if (ret) {
7216
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7217
                   "Unable to get brick");
7218
            goto out;
7219
        }
7220
        ret = glusterd_volume_brickinfo_get_by_brick(brickname, volinfo,
7221
                                                     &brickinfo, _gf_false);
7222
        if (ret)
7223
            goto out;
7224

7225
        if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
7226
            !glusterd_is_brick_started(brickinfo))
7227
            goto out;
7228

7229
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7230
                                 gf_gld_mt_pending_node_t);
7231
        if (!pending_node) {
7232
            ret = -1;
7233
            goto out;
7234
        }
7235
        pending_node->node = brickinfo;
7236
        pending_node->type = GD_NODE_BRICK;
7237
        pending_node->index = 0;
7238
        cds_list_add_tail(&pending_node->list, selected);
7239

7240
        ret = 0;
7241
#ifdef BUILD_GNFS
7242
    } else if ((cmd & GF_CLI_STATUS_NFS) != 0) {
7243
        if (!priv->nfs_svc.online) {
7244
            ret = -1;
7245
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_NFS_SERVER_NOT_RUNNING,
7246
                   "NFS server is not running");
7247
            goto out;
7248
        }
7249
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7250
                                 gf_gld_mt_pending_node_t);
7251
        if (!pending_node) {
7252
            ret = -1;
7253
            goto out;
7254
        }
7255
        pending_node->node = &(priv->nfs_svc);
7256
        pending_node->type = GD_NODE_NFS;
7257
        pending_node->index = 0;
7258
        cds_list_add_tail(&pending_node->list, selected);
7259

7260
        ret = 0;
7261
#endif
7262
    } else if ((cmd & GF_CLI_STATUS_SHD) != 0) {
7263
        svc = &(volinfo->shd.svc);
7264
        if (!svc->online) {
7265
            ret = -1;
7266
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SELF_HEALD_DISABLED,
7267
                   "Self-heal daemon is not running");
7268
            goto out;
7269
        }
7270
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7271
                                 gf_gld_mt_pending_node_t);
7272
        if (!pending_node) {
7273
            ret = -1;
7274
            goto out;
7275
        }
7276
        pending_node->node = svc;
7277
        pending_node->type = GD_NODE_SHD;
7278
        pending_node->index = 0;
7279
        cds_list_add_tail(&pending_node->list, selected);
7280

7281
        ret = 0;
7282
    } else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0) {
7283
        if (!priv->quotad_svc.online) {
7284
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_QUOTAD_NOT_RUNNING,
7285
                   "Quotad is not "
7286
                   "running");
7287
            ret = -1;
7288
            goto out;
7289
        }
7290
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7291
                                 gf_gld_mt_pending_node_t);
7292
        if (!pending_node) {
7293
            ret = -1;
7294
            goto out;
7295
        }
7296
        pending_node->node = &(priv->quotad_svc);
7297
        pending_node->type = GD_NODE_QUOTAD;
7298
        pending_node->index = 0;
7299
        cds_list_add_tail(&pending_node->list, selected);
7300

7301
        ret = 0;
7302
    } else if ((cmd & GF_CLI_STATUS_BITD) != 0) {
7303
        if (!priv->bitd_svc.online) {
7304
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BITROT_NOT_RUNNING,
7305
                   "Bitrot is not "
7306
                   "running");
7307
            ret = -1;
7308
            goto out;
7309
        }
7310
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7311
                                 gf_gld_mt_pending_node_t);
7312
        if (!pending_node) {
7313
            ret = -1;
7314
            goto out;
7315
        }
7316
        pending_node->node = &(priv->bitd_svc);
7317
        pending_node->type = GD_NODE_BITD;
7318
        pending_node->index = 0;
7319
        cds_list_add_tail(&pending_node->list, selected);
7320

7321
        ret = 0;
7322
    } else if ((cmd & GF_CLI_STATUS_SCRUB) != 0) {
7323
        if (!priv->scrub_svc.online) {
7324
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SCRUBBER_NOT_RUNNING,
7325
                   "Scrubber is not "
7326
                   "running");
7327
            ret = -1;
7328
            goto out;
7329
        }
7330
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7331
                                 gf_gld_mt_pending_node_t);
7332
        if (!pending_node) {
7333
            ret = -1;
7334
            goto out;
7335
        }
7336
        pending_node->node = &(priv->scrub_svc);
7337
        pending_node->type = GD_NODE_SCRUB;
7338
        pending_node->index = 0;
7339
        cds_list_add_tail(&pending_node->list, selected);
7340

7341
        ret = 0;
7342
    } else if ((cmd & GF_CLI_STATUS_SNAPD) != 0) {
7343
        if (!volinfo->snapd.svc.online) {
7344
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SNAPD_NOT_RUNNING,
7345
                   "snapd is not "
7346
                   "running");
7347
            ret = -1;
7348
            goto out;
7349
        }
7350
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7351
                                 gf_gld_mt_pending_node_t);
7352
        if (!pending_node) {
7353
            gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
7354
                   "failed to allocate "
7355
                   "memory for pending node");
7356
            ret = -1;
7357
            goto out;
7358
        }
7359

7360
        pending_node->node = (void *)(&volinfo->snapd);
7361
        pending_node->type = GD_NODE_SNAPD;
7362
        pending_node->index = 0;
7363
        cds_list_add_tail(&pending_node->list, selected);
7364

7365
        ret = 0;
7366
    } else {
7367
        cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
7368
        {
7369
            brick_index++;
7370
            if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
7371
                !glusterd_is_brick_started(brickinfo)) {
7372
                continue;
7373
            }
7374
            pending_node = GF_CALLOC(1, sizeof(*pending_node),
7375
                                     gf_gld_mt_pending_node_t);
7376
            if (!pending_node) {
7377
                ret = -1;
7378
                gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
7379
                       "Unable to allocate memory");
7380
                goto out;
7381
            }
7382
            pending_node->node = brickinfo;
7383
            pending_node->type = GD_NODE_BRICK;
7384
            pending_node->index = brick_index;
7385
            cds_list_add_tail(&pending_node->list, selected);
7386
            pending_node = NULL;
7387
        }
7388
    }
7389
out:
7390
    return ret;
7391
}
7392

7393
static int
7394
glusterd_bricks_select_scrub(dict_t *dict, char **op_errstr,
7395
                             struct cds_list_head *selected)
7396
{
7397
    int ret = -1;
7398
    char *volname = NULL;
7399
    char msg[2048] = {
7400
        0,
7401
    };
7402
    xlator_t *this = THIS;
7403
    glusterd_conf_t *priv = NULL;
7404
    glusterd_volinfo_t *volinfo = NULL;
7405
    glusterd_pending_node_t *pending_node = NULL;
7406

7407
    priv = this->private;
7408
    GF_ASSERT(priv);
7409

7410
    GF_ASSERT(dict);
7411

7412
    ret = dict_get_str(dict, "volname", &volname);
7413
    if (ret) {
7414
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7415
               "Unable to get"
7416
               " volname");
7417
        goto out;
7418
    }
7419

7420
    ret = glusterd_volinfo_find(volname, &volinfo);
7421
    if (ret) {
7422
        snprintf(msg, sizeof(msg), "Volume %s does not exist", volname);
7423

7424
        *op_errstr = gf_strdup(msg);
7425
        gf_msg(this->name, GF_LOG_ERROR, EINVAL, GD_MSG_VOL_NOT_FOUND, "%s",
7426
               msg);
7427
        goto out;
7428
    }
7429

7430
    if (!priv->scrub_svc.online) {
7431
        ret = 0;
7432
        snprintf(msg, sizeof(msg), "Scrubber daemon is not running");
7433

7434
        gf_msg_debug(this->name, 0, "%s", msg);
7435
        goto out;
7436
    }
7437

7438
    pending_node = GF_CALLOC(1, sizeof(*pending_node),
7439
                             gf_gld_mt_pending_node_t);
7440
    if (!pending_node) {
7441
        ret = -1;
7442
        goto out;
7443
    }
7444

7445
    pending_node->node = &(priv->scrub_svc);
7446
    pending_node->type = GD_NODE_SCRUB;
7447
    cds_list_add_tail(&pending_node->list, selected);
7448
    pending_node = NULL;
7449
out:
7450
    gf_msg_debug(this->name, 0, "Returning %d", ret);
7451
    return ret;
7452
}
7453
/* Select the bricks to send the barrier request to.
7454
 * This selects the bricks of the given volume which are present on this peer
7455
 * and are running
7456
 */
7457
static int
7458
glusterd_bricks_select_barrier(dict_t *dict, struct cds_list_head *selected)
7459
{
7460
    int ret = -1;
7461
    char *volname = NULL;
7462
    glusterd_volinfo_t *volinfo = NULL;
7463
    glusterd_brickinfo_t *brickinfo = NULL;
7464
    glusterd_pending_node_t *pending_node = NULL;
7465

7466
    GF_ASSERT(dict);
7467

7468
    ret = dict_get_str(dict, "volname", &volname);
7469
    if (ret) {
7470
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_DICT_GET_FAILED,
7471
               "Failed to get volname");
7472
        goto out;
7473
    }
7474

7475
    ret = glusterd_volinfo_find(volname, &volinfo);
7476
    if (ret) {
7477
        gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOL_NOT_FOUND,
7478
               "Failed to find volume %s", volname);
7479
        goto out;
7480
    }
7481

7482
    cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
7483
    {
7484
        if (gf_uuid_compare(brickinfo->uuid, MY_UUID) ||
7485
            !glusterd_is_brick_started(brickinfo)) {
7486
            continue;
7487
        }
7488
        pending_node = GF_CALLOC(1, sizeof(*pending_node),
7489
                                 gf_gld_mt_pending_node_t);
7490
        if (!pending_node) {
7491
            ret = -1;
7492
            goto out;
7493
        }
7494
        pending_node->node = brickinfo;
7495
        pending_node->type = GD_NODE_BRICK;
7496
        cds_list_add_tail(&pending_node->list, selected);
7497
        pending_node = NULL;
7498
    }
7499

7500
out:
7501
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
7502
    return ret;
7503
}
7504

7505
static int
7506
glusterd_clear_pending_nodes(struct cds_list_head *list)
7507
{
7508
    glusterd_pending_node_t *pending_node = NULL;
7509
    glusterd_pending_node_t *tmp = NULL;
7510

7511
    cds_list_for_each_entry_safe(pending_node, tmp, list, list)
7512
    {
7513
        cds_list_del_init(&pending_node->list);
7514
        GF_FREE(pending_node);
7515
    }
7516

7517
    return 0;
7518
}
7519

7520
static int
7521
glusterd_op_ac_send_brick_op(glusterd_op_sm_event_t *event, void *ctx)
7522
{
7523
    int ret = 0;
7524
    rpc_clnt_procedure_t *proc = NULL;
7525
    glusterd_conf_t *priv = NULL;
7526
    xlator_t *this = THIS;
7527
    glusterd_op_t op = GD_OP_NONE;
7528
    glusterd_req_ctx_t *req_ctx = NULL;
7529
    char *op_errstr = NULL;
7530
    gf_boolean_t free_req_ctx = _gf_false;
7531

7532
    priv = this->private;
7533

7534
    if (ctx) {
7535
        req_ctx = ctx;
7536
    } else {
7537
        req_ctx = GF_CALLOC(1, sizeof(*req_ctx), gf_gld_mt_op_allack_ctx_t);
7538
        if (!req_ctx)
7539
            goto out;
7540
        free_req_ctx = _gf_true;
7541
        op = glusterd_op_get_op();
7542
        req_ctx->op = op;
7543
        gf_uuid_copy(req_ctx->uuid, MY_UUID);
7544
        ret = glusterd_op_build_payload(&req_ctx->dict, &op_errstr, NULL);
7545
        if (ret) {
7546
            gf_msg(this->name, GF_LOG_ERROR, 0,
7547
                   GD_MSG_BRICK_OP_PAYLOAD_BUILD_FAIL, LOGSTR_BUILD_PAYLOAD,
7548
                   gd_op_list[op]);
7549
            if (op_errstr == NULL)
7550
                gf_asprintf(&op_errstr, OPERRSTR_BUILD_PAYLOAD);
7551
            opinfo.op_errstr = op_errstr;
7552
            goto out;
7553
        }
7554
    }
7555

7556
    proc = &priv->gfs_mgmt->proctable[GLUSTERD_BRICK_OP];
7557
    if (proc->fn) {
7558
        ret = proc->fn(NULL, this, req_ctx);
7559
        if (ret)
7560
            goto out;
7561
    }
7562

7563
    if (!opinfo.pending_count && !opinfo.brick_pending_count) {
7564
        glusterd_clear_pending_nodes(&opinfo.pending_bricks);
7565
        ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
7566
                                          req_ctx);
7567
    }
7568

7569
out:
7570
    if (ret && free_req_ctx)
7571
        GF_FREE(req_ctx);
7572
    gf_msg_debug(this->name, 0, "Returning with %d", ret);
7573

7574
    return ret;
7575
}
7576

7577
static int
7578
glusterd_op_ac_rcvd_brick_op_acc(glusterd_op_sm_event_t *event, void *ctx)
7579
{
7580
    int ret = -1;
7581
    glusterd_op_brick_rsp_ctx_t *ev_ctx = NULL;
7582
    char *op_errstr = NULL;
7583
    glusterd_op_t op = GD_OP_NONE;
7584
    gd_node_type type = GD_NODE_NONE;
7585
    dict_t *op_ctx = NULL;
7586
    glusterd_req_ctx_t *req_ctx = NULL;
7587
    void *pending_entry = NULL;
7588
    xlator_t *this = THIS;
7589

7590
    GF_VALIDATE_OR_GOTO(this->name, event, out);
7591
    GF_VALIDATE_OR_GOTO(this->name, ctx, out);
7592
    ev_ctx = ctx;
7593
    GF_VALIDATE_OR_GOTO(this->name, ev_ctx, out);
7594

7595
    req_ctx = ev_ctx->commit_ctx;
7596
    GF_VALIDATE_OR_GOTO(this->name, req_ctx, out);
7597

7598
    op = req_ctx->op;
7599
    op_ctx = glusterd_op_get_ctx();
7600
    pending_entry = ev_ctx->pending_node->node;
7601
    type = ev_ctx->pending_node->type;
7602

7603
    ret = glusterd_remove_pending_entry(&opinfo.pending_bricks, pending_entry);
7604
    if (ret) {
7605
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_UNKNOWN_RESPONSE,
7606
               "unknown response received ");
7607
        ret = -1;
7608
        goto out;
7609
    }
7610

7611
    if (opinfo.brick_pending_count > 0)
7612
        opinfo.brick_pending_count--;
7613

7614
    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
7615
    if (ret)
7616
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
7617
               "Unable to set "
7618
               "transaction's opinfo");
7619

7620
    glusterd_handle_node_rsp(req_ctx->dict, pending_entry, op, ev_ctx->rsp_dict,
7621
                             op_ctx, &op_errstr, type);
7622

7623
    if (opinfo.brick_pending_count > 0)
7624
        goto out;
7625

7626
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_ALL_ACK, &event->txn_id,
7627
                                      ev_ctx->commit_ctx);
7628

7629
out:
7630
    if (ev_ctx && ev_ctx->rsp_dict)
7631
        dict_unref(ev_ctx->rsp_dict);
7632
    GF_FREE(ev_ctx);
7633
    gf_msg_debug(this->name, 0, "Returning %d", ret);
7634
    return ret;
7635
}
7636

7637
int32_t
7638
glusterd_op_bricks_select(glusterd_op_t op, dict_t *dict, char **op_errstr,
7639
                          struct cds_list_head *selected, dict_t *rsp_dict)
7640
{
7641
    int ret = 0;
7642

7643
    GF_ASSERT(dict);
7644
    GF_ASSERT(op_errstr);
7645
    GF_ASSERT(op > GD_OP_NONE);
7646
    GF_ASSERT(op < GD_OP_MAX);
7647

7648
    switch (op) {
7649
        case GD_OP_STOP_VOLUME:
7650
            ret = glusterd_bricks_select_stop_volume(dict, op_errstr, selected);
7651
            break;
7652
        case GD_OP_REMOVE_BRICK:
7653
            ret = glusterd_bricks_select_remove_brick(dict, op_errstr,
7654
                                                      selected);
7655
            break;
7656

7657
        case GD_OP_PROFILE_VOLUME:
7658
            ret = glusterd_bricks_select_profile_volume(dict, op_errstr,
7659
                                                        selected);
7660
            break;
7661

7662
        case GD_OP_HEAL_VOLUME:
7663
            ret = glusterd_bricks_select_heal_volume(dict, op_errstr, selected,
7664
                                                     rsp_dict);
7665
            break;
7666

7667
        case GD_OP_STATUS_VOLUME:
7668
            ret = glusterd_bricks_select_status_volume(dict, op_errstr,
7669
                                                       selected);
7670
            break;
7671
        case GD_OP_DEFRAG_BRICK_VOLUME:
7672
            ret = glusterd_bricks_select_rebalance_volume(dict, op_errstr,
7673
                                                          selected);
7674
            break;
7675

7676
        case GD_OP_BARRIER:
7677
            ret = glusterd_bricks_select_barrier(dict, selected);
7678
            break;
7679
        case GD_OP_SNAP:
7680
            ret = glusterd_bricks_select_snap(dict, op_errstr, selected);
7681
            break;
7682
        case GD_OP_SCRUB_STATUS:
7683
        case GD_OP_SCRUB_ONDEMAND:
7684
            ret = glusterd_bricks_select_scrub(dict, op_errstr, selected);
7685
            break;
7686
        default:
7687
            break;
7688
    }
7689

7690
    gf_msg_debug(THIS->name, 0, "Returning %d", ret);
7691

7692
    return ret;
7693
}
7694

7695
glusterd_op_sm_t glusterd_op_state_default[] = {
7696
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_NONE
7697
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_send_lock},  // EVENT_START_LOCK
7698
    {GD_OP_STATE_LOCKED, glusterd_op_ac_lock},          // EVENT_LOCK
7699
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_RCVD_ACC
7700
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_ALL_ACC
7701
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_STAGE_ACC
7702
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_COMMIT_ACC
7703
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_RCVD_RJT
7704
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_STAGE_OP
7705
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_COMMIT_OP
7706
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},       // EVENT_UNLOCK
7707
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_START_UNLOCK
7708
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},         // EVENT_ALL_ACK
7709
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7710
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_none},  // EVENT_MAX
7711
};
7712

7713
glusterd_op_sm_t glusterd_op_state_lock_sent[] = {
7714
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},           // EVENT_NONE
7715
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},           // EVENT_START_LOCK
7716
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_lock},           // EVENT_LOCK
7717
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_rcvd_lock_acc},  // EVENT_RCVD_ACC
7718
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_send_stage_op},  // EVENT_ALL_ACC
7719
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7720
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7721
    {GD_OP_STATE_ACK_DRAIN,
7722
     glusterd_op_ac_send_unlock_drain},            // EVENT_RCVD_RJT
7723
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},  // EVENT_STAGE_OP
7724
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7725
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},  // EVENT_UNLOCK
7726
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_START_UNLOCK
7727
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},  // EVENT_ALL_ACK
7728
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7729
    {GD_OP_STATE_LOCK_SENT, glusterd_op_ac_none},  // EVENT_MAX
7730
};
7731

7732
glusterd_op_sm_t glusterd_op_state_locked[] = {
7733
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_NONE
7734
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_START_LOCK
7735
    {GD_OP_STATE_LOCKED, glusterd_op_ac_lock},      // EVENT_LOCK
7736
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_RCVD_ACC
7737
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_ALL_ACC
7738
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_STAGE_ACC
7739
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_COMMIT_ACC
7740
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_RCVD_RJT
7741
    {GD_OP_STATE_STAGED, glusterd_op_ac_stage_op},  // EVENT_STAGE_OP
7742
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_COMMIT_OP
7743
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},   // EVENT_UNLOCK
7744
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_START_UNLOCK
7745
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},      // EVENT_ALL_ACK
7746
    {GD_OP_STATE_DEFAULT,
7747
     glusterd_op_ac_local_unlock},              // EVENT_LOCAL_UNLOCK_NO_RESP
7748
    {GD_OP_STATE_LOCKED, glusterd_op_ac_none},  // EVENT_MAX
7749
};
7750

7751
glusterd_op_sm_t glusterd_op_state_stage_op_sent[] = {
7752
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},  // EVENT_NONE
7753
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},  // EVENT_START_LOCK
7754
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_lock},  // EVENT_LOCK
7755
    {GD_OP_STATE_STAGE_OP_SENT,
7756
     glusterd_op_ac_rcvd_stage_op_acc},  // EVENT_RCVD_ACC
7757
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_send_brick_op},  // EVENT_ALL_ACC
7758
    {GD_OP_STATE_BRICK_OP_SENT,
7759
     glusterd_op_ac_send_brick_op},                    // EVENT_STAGE_ACC
7760
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7761
    {GD_OP_STATE_STAGE_OP_FAILED,
7762
     glusterd_op_ac_stage_op_failed},                  // EVENT_RCVD_RJT
7763
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},  // EVENT_STAGE_OP
7764
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7765
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},      // EVENT_UNLOCK
7766
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},      // EVENT_START_UNLOCK
7767
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},  // EVENT_ALL_ACK
7768
    {GD_OP_STATE_STAGE_OP_SENT,
7769
     glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7770
    {GD_OP_STATE_STAGE_OP_SENT, glusterd_op_ac_none},  // EVENT_MAX
7771
};
7772

7773
glusterd_op_sm_t glusterd_op_state_stage_op_failed[] = {
7774
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_NONE
7775
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_START_LOCK
7776
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_lock},  // EVENT_LOCK
7777
    {GD_OP_STATE_STAGE_OP_FAILED,
7778
     glusterd_op_ac_stage_op_failed},                    // EVENT_RCVD_ACC
7779
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_ALL_ACC
7780
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7781
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7782
    {GD_OP_STATE_STAGE_OP_FAILED,
7783
     glusterd_op_ac_stage_op_failed},                    // EVENT_RCVD_RJT
7784
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_STAGE_OP
7785
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7786
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},        // EVENT_UNLOCK
7787
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},        // EVENT_START_UNLOCK
7788
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},  // EVENT_ALL_ACK
7789
    {GD_OP_STATE_STAGE_OP_FAILED,
7790
     glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7791
    {GD_OP_STATE_STAGE_OP_FAILED, glusterd_op_ac_none},  // EVENT_MAX
7792
};
7793

7794
glusterd_op_sm_t glusterd_op_state_staged[] = {
7795
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_NONE
7796
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_START_LOCK
7797
    {GD_OP_STATE_STAGED, glusterd_op_ac_lock},  // EVENT_LOCK
7798
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_RCVD_ACC
7799
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_ALL_ACC
7800
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7801
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7802
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_RCVD_RJT
7803
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_STAGE_OP
7804
    {GD_OP_STATE_BRICK_COMMITTED,
7805
     glusterd_op_ac_send_brick_op},                // EVENT_COMMIT_OP
7806
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},  // EVENT_UNLOCK
7807
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},     // EVENT_START_UNLOCK
7808
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},     // EVENT_ALL_ACK
7809
    {GD_OP_STATE_DEFAULT,
7810
     glusterd_op_ac_local_unlock},              // EVENT_LOCAL_UNLOCK_NO_RESP
7811
    {GD_OP_STATE_STAGED, glusterd_op_ac_none},  // EVENT_MAX
7812
};
7813

7814
glusterd_op_sm_t glusterd_op_state_brick_op_sent[] = {
7815
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_NONE
7816
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_START_LOCK
7817
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_lock},  // EVENT_LOCK
7818
    {GD_OP_STATE_BRICK_OP_SENT,
7819
     glusterd_op_ac_rcvd_brick_op_acc},                // EVENT_RCVD_ACC
7820
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_ALL_ACC
7821
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7822
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7823
    {GD_OP_STATE_BRICK_OP_FAILED,
7824
     glusterd_op_ac_brick_op_failed},                  // EVENT_RCVD_RJT
7825
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_BRICK_OP
7826
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7827
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},      // EVENT_UNLOCK
7828
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},      // EVENT_START_UNLOCK
7829
    {GD_OP_STATE_COMMIT_OP_SENT,
7830
     glusterd_op_ac_send_commit_op},  // EVENT_ALL_ACK
7831
    {GD_OP_STATE_BRICK_OP_SENT,
7832
     glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7833
    {GD_OP_STATE_BRICK_OP_SENT, glusterd_op_ac_none},  // EVENT_MAX
7834
};
7835

7836
glusterd_op_sm_t glusterd_op_state_brick_op_failed[] = {
7837
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_NONE
7838
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_START_LOCK
7839
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_lock},  // EVENT_LOCK
7840
    {GD_OP_STATE_BRICK_OP_FAILED,
7841
     glusterd_op_ac_brick_op_failed},                    // EVENT_RCVD_ACC
7842
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_ALL_ACC
7843
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7844
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7845
    {GD_OP_STATE_BRICK_OP_FAILED,
7846
     glusterd_op_ac_brick_op_failed},                    // EVENT_RCVD_RJT
7847
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_BRICK_OP
7848
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7849
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},        // EVENT_UNLOCK
7850
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},        // EVENT_START_UNLOCK
7851
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},  // EVENT_ALL_ACK
7852
    {GD_OP_STATE_BRICK_OP_FAILED,
7853
     glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7854
    {GD_OP_STATE_BRICK_OP_FAILED, glusterd_op_ac_none},  // EVENT_MAX
7855
};
7856

7857
glusterd_op_sm_t glusterd_op_state_brick_committed[] = {
7858
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_NONE
7859
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_START_LOCK
7860
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_lock},  // EVENT_LOCK
7861
    {GD_OP_STATE_BRICK_COMMITTED,
7862
     glusterd_op_ac_rcvd_brick_op_acc},                  // EVENT_RCVD_ACC
7863
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_ALL_ACC
7864
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7865
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7866
    {GD_OP_STATE_BRICK_COMMIT_FAILED,
7867
     glusterd_op_ac_brick_op_failed},                    // EVENT_RCVD_RJT
7868
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_STAGE_OP
7869
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7870
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},        // EVENT_UNLOCK
7871
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_START_UNLOCK
7872
    {GD_OP_STATE_COMMITED, glusterd_op_ac_commit_op},    // EVENT_ALL_ACK
7873
    {GD_OP_STATE_DEFAULT,
7874
     glusterd_op_ac_local_unlock},  // EVENT_LOCAL_UNLOCK_NO_RESP
7875
    {GD_OP_STATE_BRICK_COMMITTED, glusterd_op_ac_none},  // EVENT_MAX
7876
};
7877

7878
glusterd_op_sm_t glusterd_op_state_brick_commit_failed[] = {
7879
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_NONE
7880
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_START_LOCK
7881
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_lock},  // EVENT_LOCK
7882
    {GD_OP_STATE_BRICK_COMMIT_FAILED,
7883
     glusterd_op_ac_brick_op_failed},                        // EVENT_RCVD_ACC
7884
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_ALL_ACC
7885
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7886
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7887
    {GD_OP_STATE_BRICK_COMMIT_FAILED,
7888
     glusterd_op_ac_brick_op_failed},                        // EVENT_RCVD_RJT
7889
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_STAGE_OP
7890
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7891
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},            // EVENT_UNLOCK
7892
    {GD_OP_STATE_BRICK_COMMIT_FAILED,
7893
     glusterd_op_ac_none},  // EVENT_START_UNLOCK
7894
    {GD_OP_STATE_BRICK_COMMIT_FAILED,
7895
     glusterd_op_ac_send_commit_failed},  // EVENT_ALL_ACK
7896
    {GD_OP_STATE_DEFAULT,
7897
     glusterd_op_ac_local_unlock},  // EVENT_LOCAL_UNLOCK_NO_RESP
7898
    {GD_OP_STATE_BRICK_COMMIT_FAILED, glusterd_op_ac_none},  // EVENT_MAX
7899
};
7900

7901
glusterd_op_sm_t glusterd_op_state_commit_op_failed[] = {
7902
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_NONE
7903
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_START_LOCK
7904
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_lock},  // EVENT_LOCK
7905
    {GD_OP_STATE_COMMIT_OP_FAILED,
7906
     glusterd_op_ac_commit_op_failed},                    // EVENT_RCVD_ACC
7907
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_ALL_ACC
7908
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7909
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7910
    {GD_OP_STATE_COMMIT_OP_FAILED,
7911
     glusterd_op_ac_commit_op_failed},                    // EVENT_RCVD_RJT
7912
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_STAGE_OP
7913
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7914
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},         // EVENT_UNLOCK
7915
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},         // EVENT_START_UNLOCK
7916
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},  // EVENT_ALL_ACK
7917
    {GD_OP_STATE_COMMIT_OP_FAILED,
7918
     glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7919
    {GD_OP_STATE_COMMIT_OP_FAILED, glusterd_op_ac_none},  // EVENT_MAX
7920
};
7921

7922
glusterd_op_sm_t glusterd_op_state_commit_op_sent[] = {
7923
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},  // EVENT_NONE
7924
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},  // EVENT_START_LOCK
7925
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_lock},  // EVENT_LOCK
7926
    {GD_OP_STATE_COMMIT_OP_SENT,
7927
     glusterd_op_ac_rcvd_commit_op_acc},                    // EVENT_RCVD_ACC
7928
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},  // EVENT_ALL_ACC
7929
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},      // EVENT_STAGE_ACC
7930
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},  // EVENT_COMMIT_ACC
7931
    {GD_OP_STATE_COMMIT_OP_FAILED,
7932
     glusterd_op_ac_commit_op_failed},                  // EVENT_RCVD_RJT
7933
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},  // EVENT_STAGE_OP
7934
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7935
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},       // EVENT_UNLOCK
7936
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},       // EVENT_START_UNLOCK
7937
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},  // EVENT_ALL_ACK
7938
    {GD_OP_STATE_COMMIT_OP_SENT,
7939
     glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7940
    {GD_OP_STATE_COMMIT_OP_SENT, glusterd_op_ac_none},  // EVENT_MAX
7941
};
7942

7943
glusterd_op_sm_t glusterd_op_state_committed[] = {
7944
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_NONE
7945
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_START_LOCK
7946
    {GD_OP_STATE_COMMITED, glusterd_op_ac_lock},   // EVENT_LOCK
7947
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_RCVD_ACC
7948
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_ALL_ACC
7949
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_STAGE_ACC
7950
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_COMMIT_ACC
7951
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_RCVD_RJT
7952
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_STAGE_OP
7953
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_COMMIT_OP
7954
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},  // EVENT_UNLOCK
7955
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_START_UNLOCK
7956
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},   // EVENT_ALL_ACK
7957
    {GD_OP_STATE_DEFAULT,
7958
     glusterd_op_ac_local_unlock},                // EVENT_LOCAL_UNLOCK_NO_RESP
7959
    {GD_OP_STATE_COMMITED, glusterd_op_ac_none},  // EVENT_MAX
7960
};
7961

7962
glusterd_op_sm_t glusterd_op_state_unlock_sent[] = {
7963
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},  // EVENT_NONE
7964
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},  // EVENT_START_LOCK
7965
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_lock},  // EVENT_LOCK
7966
    {GD_OP_STATE_UNLOCK_SENT,
7967
     glusterd_op_ac_rcvd_unlock_acc},                    // EVENT_RCVD_ACC
7968
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlocked_all},  // EVENT_ALL_ACC
7969
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},      // EVENT_STAGE_ACC
7970
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},      // EVENT_COMMIT_ACC
7971
    {GD_OP_STATE_UNLOCK_SENT,
7972
     glusterd_op_ac_rcvd_unlock_acc},                // EVENT_RCVD_RJT
7973
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},  // EVENT_STAGE_OP
7974
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7975
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},    // EVENT_UNLOCK
7976
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},    // EVENT_START_UNLOCK
7977
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},  // EVENT_ALL_ACK
7978
    {GD_OP_STATE_UNLOCK_SENT,
7979
     glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
7980
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_none},  // EVENT_MAX
7981
};
7982

7983
glusterd_op_sm_t glusterd_op_state_ack_drain[] = {
7984
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_NONE
7985
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_START_LOCK
7986
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_lock},  // EVENT_LOCK
7987
    {GD_OP_STATE_ACK_DRAIN,
7988
     glusterd_op_ac_send_unlock_drain},            // EVENT_RCVD_ACC
7989
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_ALL_ACC
7990
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_STAGE_ACC
7991
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_COMMIT_ACC
7992
    {GD_OP_STATE_ACK_DRAIN,
7993
     glusterd_op_ac_send_unlock_drain},            // EVENT_RCVD_RJT
7994
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_STAGE_OP
7995
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_COMMIT_OP
7996
    {GD_OP_STATE_DEFAULT, glusterd_op_ac_unlock},  // EVENT_UNLOCK
7997
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_START_UNLOCK
7998
    {GD_OP_STATE_UNLOCK_SENT, glusterd_op_ac_send_unlock},  // EVENT_ALL_ACK
7999
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_LOCAL_UNLOCK_NO_RESP
8000
    {GD_OP_STATE_ACK_DRAIN, glusterd_op_ac_none},  // EVENT_MAX
8001
};
8002

8003
glusterd_op_sm_t *glusterd_op_state_table[] = {
8004
    glusterd_op_state_default,         glusterd_op_state_lock_sent,
8005
    glusterd_op_state_locked,          glusterd_op_state_stage_op_sent,
8006
    glusterd_op_state_staged,          glusterd_op_state_commit_op_sent,
8007
    glusterd_op_state_committed,       glusterd_op_state_unlock_sent,
8008
    glusterd_op_state_stage_op_failed, glusterd_op_state_commit_op_failed,
8009
    glusterd_op_state_brick_op_sent,   glusterd_op_state_brick_op_failed,
8010
    glusterd_op_state_brick_committed, glusterd_op_state_brick_commit_failed,
8011
    glusterd_op_state_ack_drain};
8012

8013
int
8014
glusterd_op_sm_new_event(glusterd_op_sm_event_type_t event_type,
8015
                         glusterd_op_sm_event_t **new_event)
8016
{
8017
    glusterd_op_sm_event_t *event = NULL;
8018

8019
    GF_ASSERT(new_event);
8020
    GF_ASSERT(GD_OP_EVENT_NONE <= event_type && GD_OP_EVENT_MAX > event_type);
8021

8022
    event = GF_CALLOC(1, sizeof(*event), gf_gld_mt_op_sm_event_t);
8023

8024
    if (!event)
8025
        return -1;
8026

8027
    *new_event = event;
8028
    event->event = event_type;
8029
    CDS_INIT_LIST_HEAD(&event->list);
8030

8031
    return 0;
8032
}
8033

8034
int
8035
glusterd_op_sm_inject_event(glusterd_op_sm_event_type_t event_type,
8036
                            uuid_t *txn_id, void *ctx)
8037
{
8038
    int32_t ret = -1;
8039
    glusterd_op_sm_event_t *event = NULL;
8040

8041
    GF_ASSERT(event_type < GD_OP_EVENT_MAX && event_type >= GD_OP_EVENT_NONE);
8042

8043
    ret = glusterd_op_sm_new_event(event_type, &event);
8044

8045
    if (ret)
8046
        goto out;
8047

8048
    event->ctx = ctx;
8049

8050
    if (txn_id)
8051
        gf_uuid_copy(event->txn_id, *txn_id);
8052

8053
    gf_msg_debug(THIS->name, 0, "Enqueue event: '%s'",
8054
                 glusterd_op_sm_event_name_get(event->event));
8055
    cds_list_add_tail(&event->list, &gd_op_sm_queue);
8056

8057
out:
8058
    return ret;
8059
}
8060

8061
void
8062
glusterd_destroy_req_ctx(glusterd_req_ctx_t *ctx)
8063
{
8064
    if (!ctx)
8065
        return;
8066
    if (ctx->dict)
8067
        dict_unref(ctx->dict);
8068
    GF_FREE(ctx);
8069
}
8070

8071
void
8072
glusterd_destroy_local_unlock_ctx(uuid_t *ctx)
8073
{
8074
    if (!ctx)
8075
        return;
8076
    GF_FREE(ctx);
8077
}
8078

8079
void
8080
glusterd_destroy_op_event_ctx(glusterd_op_sm_event_t *event)
8081
{
8082
    if (!event)
8083
        return;
8084

8085
    switch (event->event) {
8086
        case GD_OP_EVENT_LOCK:
8087
        case GD_OP_EVENT_UNLOCK:
8088
            glusterd_destroy_lock_ctx(event->ctx);
8089
            break;
8090
        case GD_OP_EVENT_STAGE_OP:
8091
        case GD_OP_EVENT_ALL_ACK:
8092
            glusterd_destroy_req_ctx(event->ctx);
8093
            break;
8094
        case GD_OP_EVENT_LOCAL_UNLOCK_NO_RESP:
8095
            glusterd_destroy_local_unlock_ctx(event->ctx);
8096
            break;
8097
        default:
8098
            break;
8099
    }
8100
}
8101

8102
int
8103
glusterd_op_sm(void)
8104
{
8105
    glusterd_op_sm_event_t *event = NULL;
8106
    glusterd_op_sm_event_t *tmp = NULL;
8107
    int ret = -1;
8108
    int lock_err = 0;
8109
    glusterd_op_sm_ac_fn handler = NULL;
8110
    glusterd_op_sm_t *state = NULL;
8111
    glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
8112
    xlator_t *this = THIS;
8113
    glusterd_op_info_t txn_op_info;
8114
    glusterd_conf_t *priv = NULL;
8115

8116
    priv = this->private;
8117
    GF_ASSERT(priv);
8118

8119
    ret = synclock_trylock(&gd_op_sm_lock);
8120
    if (ret) {
8121
        lock_err = errno;
8122
        gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_LOCK_FAIL,
8123
               "lock failed due to %s", strerror(lock_err));
8124
        goto lock_failed;
8125
    }
8126

8127
    while (!cds_list_empty(&gd_op_sm_queue)) {
8128
        cds_list_for_each_entry_safe(event, tmp, &gd_op_sm_queue, list)
8129
        {
8130
            cds_list_del_init(&event->list);
8131
            event_type = event->event;
8132
            gf_msg_debug(this->name, 0,
8133
                         "Dequeued event of "
8134
                         "type: '%s'",
8135
                         glusterd_op_sm_event_name_get(event_type));
8136

8137
            gf_msg_debug(this->name, 0, "transaction ID = %s",
8138
                         uuid_utoa(event->txn_id));
8139

8140
            ret = glusterd_get_txn_opinfo(&event->txn_id, &txn_op_info);
8141
            if (ret) {
8142
                gf_msg_callingfn(this->name, GF_LOG_ERROR, 0,
8143
                                 GD_MSG_TRANS_OPINFO_GET_FAIL,
8144
                                 "Unable to get transaction "
8145
                                 "opinfo for transaction ID :"
8146
                                 "%s",
8147
                                 uuid_utoa(event->txn_id));
8148
                glusterd_destroy_op_event_ctx(event);
8149
                GF_FREE(event);
8150
                continue;
8151
            } else
8152
                opinfo = txn_op_info;
8153

8154
            state = glusterd_op_state_table[opinfo.state];
8155

8156
            GF_ASSERT(state);
8157

8158
            handler = state[event_type].handler;
8159
            GF_ASSERT(handler);
8160

8161
            ret = handler(event, event->ctx);
8162

8163
            if (ret) {
8164
                gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_HANDLER_RETURNED,
8165
                       "handler returned: %d", ret);
8166
                glusterd_destroy_op_event_ctx(event);
8167
                GF_FREE(event);
8168
                continue;
8169
            }
8170

8171
            ret = glusterd_op_sm_transition_state(&opinfo, state, event_type);
8172

8173
            if (ret) {
8174
                gf_msg(this->name, GF_LOG_ERROR, 0,
8175
                       GD_MSG_EVENT_STATE_TRANSITION_FAIL,
8176
                       "Unable to transition"
8177
                       "state from '%s' to '%s'",
8178
                       glusterd_op_sm_state_name_get(opinfo.state),
8179
                       glusterd_op_sm_state_name_get(
8180
                           state[event_type].next_state));
8181
                (void)synclock_unlock(&gd_op_sm_lock);
8182
                return ret;
8183
            }
8184

8185
            if ((state[event_type].next_state == GD_OP_STATE_DEFAULT) &&
8186
                (event_type == GD_OP_EVENT_UNLOCK)) {
8187
                /* Clearing the transaction opinfo */
8188
                ret = glusterd_clear_txn_opinfo(&event->txn_id);
8189
                if (ret)
8190
                    gf_msg(this->name, GF_LOG_ERROR, 0,
8191
                           GD_MSG_TRANS_OPINFO_CLEAR_FAIL,
8192
                           "Unable to clear "
8193
                           "transaction's opinfo");
8194
            } else {
8195
                if ((priv->op_version < GD_OP_VERSION_6_0) ||
8196
                    !(event_type == GD_OP_EVENT_STAGE_OP &&
8197
                      opinfo.state == GD_OP_STATE_STAGED &&
8198
                      opinfo.skip_locking)) {
8199
                    ret = glusterd_set_txn_opinfo(&event->txn_id, &opinfo);
8200
                    if (ret)
8201
                        gf_msg(this->name, GF_LOG_ERROR, 0,
8202
                               GD_MSG_TRANS_OPINFO_SET_FAIL,
8203
                               "Unable to set "
8204
                               "transaction's opinfo");
8205
                }
8206
            }
8207

8208
            glusterd_destroy_op_event_ctx(event);
8209
            GF_FREE(event);
8210
        }
8211
    }
8212

8213
    (void)synclock_unlock(&gd_op_sm_lock);
8214
    ret = 0;
8215

8216
lock_failed:
8217

8218
    return ret;
8219
}
8220

8221
int32_t
8222
glusterd_op_set_op(glusterd_op_t op)
8223
{
8224
    GF_ASSERT(op < GD_OP_MAX);
8225
    GF_ASSERT(op > GD_OP_NONE);
8226

8227
    opinfo.op = op;
8228

8229
    return 0;
8230
}
8231

8232
int32_t
8233
glusterd_op_get_op(void)
8234
{
8235
    return opinfo.op;
8236
}
8237

8238
int32_t
8239
glusterd_op_set_req(rpcsvc_request_t *req)
8240
{
8241
    GF_ASSERT(req);
8242
    opinfo.req = req;
8243
    return 0;
8244
}
8245

8246
int32_t
8247
glusterd_op_clear_op(void)
8248
{
8249
    opinfo.op = GD_OP_NONE;
8250

8251
    return 0;
8252
}
8253

8254
int32_t
8255
glusterd_op_free_ctx(glusterd_op_t op, void *ctx)
8256
{
8257
    if (ctx) {
8258
        switch (op) {
8259
            case GD_OP_CREATE_VOLUME:
8260
            case GD_OP_DELETE_VOLUME:
8261
            case GD_OP_STOP_VOLUME:
8262
            case GD_OP_ADD_BRICK:
8263
            case GD_OP_REMOVE_BRICK:
8264
            case GD_OP_REPLACE_BRICK:
8265
            case GD_OP_LOG_ROTATE:
8266
            case GD_OP_SYNC_VOLUME:
8267
            case GD_OP_SET_VOLUME:
8268
            case GD_OP_START_VOLUME:
8269
            case GD_OP_RESET_VOLUME:
8270
            case GD_OP_GSYNC_SET:
8271
            case GD_OP_QUOTA:
8272
            case GD_OP_PROFILE_VOLUME:
8273
            case GD_OP_STATUS_VOLUME:
8274
            case GD_OP_REBALANCE:
8275
            case GD_OP_HEAL_VOLUME:
8276
            case GD_OP_STATEDUMP_VOLUME:
8277
            case GD_OP_CLEARLOCKS_VOLUME:
8278
            case GD_OP_DEFRAG_BRICK_VOLUME:
8279
            case GD_OP_MAX_OPVERSION:
8280
                dict_unref(ctx);
8281
                break;
8282
            default:
8283
                GF_ASSERT(0);
8284
                break;
8285
        }
8286
    }
8287

8288
    glusterd_op_reset_ctx();
8289
    return 0;
8290
}
8291

8292
void *
8293
glusterd_op_get_ctx(void)
8294
{
8295
    return opinfo.op_ctx;
8296
}
8297

8298
int
8299
glusterd_op_sm_init(void)
8300
{
8301
    CDS_INIT_LIST_HEAD(&gd_op_sm_queue);
8302
    synclock_init(&gd_op_sm_lock, SYNC_LOCK_DEFAULT);
8303
    return 0;
8304
}
8305

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.