glusterfs

Форк
0
/
glusterfsd-mgmt.c 
3041 строка · 82.4 Кб
1
/*
2
   Copyright (c) 2007-2012 Red Hat, Inc. <http://www.redhat.com>
3
   This file is part of GlusterFS.
4

5
   This file is licensed to you under your choice of the GNU Lesser
6
   General Public License, version 3 or any later version (LGPLv3 or
7
   later), or the GNU General Public License, version 2 (GPLv2), in all
8
   cases as published by the Free Software Foundation.
9
*/
10
#include <stdio.h>
11
#include <stdlib.h>
12

13
#include <glusterfs/statedump.h>
14
#include <glusterfs/syscall.h>
15
#include <glusterfs/monitoring.h>
16
#include "glusterd1-xdr.h"
17
#include "rpc-clnt.h"
18
#include "glusterfsd-messages.h"
19
#include "glusterfs3.h"
20
#include "portmap-xdr.h"
21
#include "glusterfsd.h"
22
#include "cli1-xdr.h"
23
#include "server.h"
24

25
static gf_boolean_t is_mgmt_rpc_reconnect = _gf_false;
26

27
static gf_boolean_t need_emancipate = _gf_false;
28

29
static int
30
mgmt_cbk_spec(struct rpc_clnt *rpc, void *mydata, void *data)
31
{
32
    glusterfs_ctx_t *ctx = NULL;
33

34
    ctx = glusterfsd_ctx;
35
    gf_log("mgmt", GF_LOG_INFO, "Volume file changed");
36

37
    glusterfs_volfile_fetch(ctx);
38
    return 0;
39
}
40

41
static int
42
mgmt_process_volfile(const char *volfile, ssize_t size, char *volfile_id,
43
                     dict_t *dict)
44
{
45
    glusterfs_ctx_t *ctx = NULL;
46
    int ret = 0;
47
    FILE *tmpfp = NULL;
48
    gf_volfile_t *volfile_obj = NULL;
49
    gf_volfile_t *volfile_tmp = NULL;
50
    char sha256_hash[SHA256_DIGEST_LENGTH] = {
51
        0,
52
    };
53
    int tmp_fd = -1;
54
    char template[] = "/tmp/glfs.volfile.XXXXXX";
55

56
    glusterfs_compute_sha256((const unsigned char *)volfile, size, sha256_hash);
57
    ctx = THIS->ctx;
58
    LOCK(&ctx->volfile_lock);
59
    {
60
        list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
61
        {
62
            if (!strcmp(volfile_id, volfile_obj->vol_id)) {
63
                if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
64
                            sizeof(volfile_obj->volfile_checksum))) {
65
                    UNLOCK(&ctx->volfile_lock);
66
                    gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_40,
67
                            NULL);
68
                    goto out;
69
                }
70
                volfile_tmp = volfile_obj;
71
                break;
72
            }
73
        }
74

75
        /* coverity[secure_temp] mkstemp uses 0600 as the mode */
76
        tmp_fd = mkstemp(template);
77
        if (-1 == tmp_fd) {
78
            UNLOCK(&ctx->volfile_lock);
79
            gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
80
                    "create template=%s", template, NULL);
81
            ret = -1;
82
            goto out;
83
        }
84

85
        /* Calling unlink so that when the file is closed or program
86
         * terminates the temporary file is deleted.
87
         */
88
        ret = sys_unlink(template);
89
        if (ret < 0) {
90
            gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
91
                    "delete template=%s", template, NULL);
92
            ret = 0;
93
        }
94

95
        tmpfp = fdopen(tmp_fd, "w+b");
96
        if (!tmpfp) {
97
            ret = -1;
98
            goto unlock;
99
        }
100

101
        fwrite(volfile, size, 1, tmpfp);
102
        fflush(tmpfp);
103
        if (ferror(tmpfp)) {
104
            ret = -1;
105
            goto unlock;
106
        }
107

108
        if (!volfile_tmp) {
109
            /* There is no checksum in the list, which means simple attach
110
             * the volfile
111
             */
112
            ret = glusterfs_process_svc_attach_volfp(ctx, tmpfp, volfile_id,
113
                                                     sha256_hash, dict);
114
            goto unlock;
115
        }
116
        ret = glusterfs_mux_volfile_reconfigure(tmpfp, ctx, volfile_obj,
117
                                                sha256_hash, dict);
118
        if (ret < 0) {
119
            gf_msg_debug("glusterfsd-mgmt", EINVAL, "Reconfigure failed !!");
120
        }
121
    }
122
unlock:
123
    UNLOCK(&ctx->volfile_lock);
124
out:
125
    if (tmpfp)
126
        fclose(tmpfp);
127
    else if (tmp_fd != -1)
128
        sys_close(tmp_fd);
129
    return ret;
130
}
131

132
static int
133
mgmt_cbk_event(struct rpc_clnt *rpc, void *mydata, void *data)
134
{
135
    return 0;
136
}
137

138
static struct iobuf *
139
glusterfs_serialize_reply(rpcsvc_request_t *req, void *arg,
140
                          struct iovec *outmsg, xdrproc_t xdrproc)
141
{
142
    struct iobuf *iob = NULL;
143
    ssize_t retlen = -1;
144
    ssize_t xdr_size = 0;
145

146
    /* First, get the io buffer into which the reply in arg will
147
     * be serialized.
148
     */
149
    xdr_size = xdr_sizeof(xdrproc, arg);
150
    iob = iobuf_get2(req->svc->ctx->iobuf_pool, xdr_size);
151
    if (!iob) {
152
        gf_log(THIS->name, GF_LOG_ERROR, "Failed to get iobuf");
153
        goto ret;
154
    }
155

156
    iobuf_to_iovec(iob, outmsg);
157
    /* Use the given serializer to translate the give C structure in arg
158
     * to XDR format which will be written into the buffer in outmsg.
159
     */
160
    /* retlen is used to received the error since size_t is unsigned and we
161
     * need -1 for error notification during encoding.
162
     */
163
    retlen = xdr_serialize_generic(*outmsg, arg, xdrproc);
164
    if (retlen == -1) {
165
        gf_log(THIS->name, GF_LOG_ERROR, "Failed to encode message");
166
        GF_FREE(iob);
167
        goto ret;
168
    }
169

170
    outmsg->iov_len = retlen;
171
ret:
172
    if (retlen == -1) {
173
        iob = NULL;
174
    }
175

176
    return iob;
177
}
178

179
static int
180
glusterfs_submit_reply(rpcsvc_request_t *req, void *arg, struct iovec *payload,
181
                       int payloadcount, struct iobref *iobref,
182
                       xdrproc_t xdrproc)
183
{
184
    struct iobuf *iob = NULL;
185
    int ret = -1;
186
    struct iovec rsp = {
187
        0,
188
    };
189
    char new_iobref = 0;
190

191
    if (!req) {
192
        GF_ASSERT(req);
193
        goto out;
194
    }
195

196
    if (!iobref) {
197
        iobref = iobref_new();
198
        if (!iobref) {
199
            gf_log(THIS->name, GF_LOG_ERROR, "out of memory");
200
            goto out;
201
        }
202

203
        new_iobref = 1;
204
    }
205

206
    iob = glusterfs_serialize_reply(req, arg, &rsp, xdrproc);
207
    if (!iob) {
208
        gf_log_callingfn(THIS->name, GF_LOG_ERROR, "Failed to serialize reply");
209
    } else {
210
        iobref_add(iobref, iob);
211
    }
212

213
    ret = rpcsvc_submit_generic(req, &rsp, 1, payload, payloadcount, iobref);
214

215
    if (ret == -1) {
216
        gf_log(THIS->name, GF_LOG_ERROR, "Reply submission failed");
217
        goto out;
218
    }
219

220
    ret = 0;
221
out:
222
    if (iob)
223
        iobuf_unref(iob);
224

225
    if (new_iobref && iobref)
226
        iobref_unref(iobref);
227

228
    return ret;
229
}
230

231
static int
232
glusterfs_terminate_response_send(rpcsvc_request_t *req, int op_ret)
233
{
234
    gd1_mgmt_brick_op_rsp rsp = {
235
        0,
236
    };
237
    dict_t *dict = NULL;
238
    int ret = 0;
239

240
    rsp.op_ret = op_ret;
241
    rsp.op_errno = 0;
242
    rsp.op_errstr = "";
243
    dict = dict_new();
244

245
    if (dict)
246
        ret = dict_allocate_and_serialize(dict, &rsp.output.output_val,
247
                                          &rsp.output.output_len);
248

249
    if (ret == 0)
250
        ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
251
                                     (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
252

253
    GF_FREE(rsp.output.output_val);
254
    if (dict)
255
        dict_unref(dict);
256
    return ret;
257
}
258

259
static int
260
glusterfs_handle_terminate(rpcsvc_request_t *req)
261
{
262
    gd1_mgmt_brick_op_req xlator_req = {
263
        0,
264
    };
265
    ssize_t ret;
266
    glusterfs_ctx_t *ctx = NULL;
267
    xlator_t *top = NULL;
268
    xlator_t *victim = NULL;
269
    xlator_t *tvictim = NULL;
270
    xlator_list_t **trav_p = NULL;
271
    gf_boolean_t lockflag = _gf_false;
272
    gf_boolean_t still_bricks_attached = _gf_false;
273
    dict_t *dict = NULL;
274
    xlator_t *this = NULL;
275
    char *value = NULL;
276
    gf_boolean_t graceful_cleanup = _gf_false;
277

278
    this = THIS;
279
    ret = xdr_to_generic(req->msg[0], &xlator_req,
280
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
281
    if (ret < 0) {
282
        req->rpc_err = GARBAGE_ARGS;
283
        return -1;
284
    }
285
    ctx = glusterfsd_ctx;
286

287
    dict = dict_new();
288
    if (!dict) {
289
        return -1;
290
    }
291
    if (xlator_req.dict.dict_len) {
292
        ret = dict_unserialize(xlator_req.dict.dict_val,
293
                               xlator_req.dict.dict_len, &dict);
294
        if (ret < 0) {
295
            gf_log(this->name, GF_LOG_ERROR,
296
                   "Failed to unserialize "
297
                   "req-buffer to dictionary");
298
            goto err;
299
        }
300
    }
301

302
    ret = dict_get_str(dict, GLUSTER_BRICK_GRACEFUL_CLEANUP, &value);
303
    if (!ret) {
304
        ret = gf_string2boolean(value, &graceful_cleanup);
305
        if (ret)
306
            graceful_cleanup = _gf_false;
307
    }
308

309
    LOCK(&ctx->volfile_lock);
310
    {
311
        /* Find the xlator_list_t that points to our victim. */
312
        if (glusterfsd_ctx->active) {
313
            top = glusterfsd_ctx->active->first;
314
            for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
315
                victim = (*trav_p)->xlator;
316
                if (!victim->cleanup_starting &&
317
                    strcmp(victim->name, xlator_req.name) == 0) {
318
                    break;
319
                }
320
            }
321
        }
322

323
        if (!top)
324
            goto err;
325
    }
326
    if (!*trav_p) {
327
        gf_log(this->name, GF_LOG_ERROR, "can't terminate %s - not found",
328
               xlator_req.name);
329
        /*
330
         * Used to be -ENOENT.  However, the caller asked us to
331
         * make sure it's down and if it's already down that's
332
         * good enough.
333
         */
334
        glusterfs_terminate_response_send(req, 0);
335
        goto err;
336
    }
337

338
    glusterfs_terminate_response_send(req, 0);
339
    for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
340
        tvictim = (*trav_p)->xlator;
341
        if (!tvictim->cleanup_starting &&
342
            !strcmp(tvictim->name, xlator_req.name)) {
343
            continue;
344
        }
345
        if (!tvictim->cleanup_starting) {
346
            still_bricks_attached = _gf_true;
347
            break;
348
        }
349
    }
350
    /* Cleanup brick resource gracefully if enabled is true */
351
    if (!still_bricks_attached && !graceful_cleanup) {
352
        gf_log(this->name, GF_LOG_INFO,
353
               "terminating after loss of last child %s", xlator_req.name);
354
        rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name);
355
        kill(getpid(), SIGTERM);
356
    } else {
357
        /* Check if detach brick is a last brick */
358
        if (!still_bricks_attached && graceful_cleanup)
359
            ctx->cleanup_starting = 1;
360
        /* TODO cleanup sequence needs to be done properly for
361
           Quota and Changelog
362
        */
363
        if (victim->cleanup_starting)
364
            goto err;
365

366
        rpc_clnt_mgmt_pmap_signout(glusterfsd_ctx, xlator_req.name);
367
        victim->cleanup_starting = 1;
368

369
        UNLOCK(&ctx->volfile_lock);
370
        lockflag = _gf_true;
371

372
        gf_log(this->name, GF_LOG_INFO,
373
               "detaching not-only child %s "
374
               " graceful_cleanup %d",
375
               xlator_req.name, graceful_cleanup);
376
        top->notify(top, GF_EVENT_CLEANUP, victim);
377
    }
378
err:
379
    if (!lockflag)
380
        UNLOCK(&ctx->volfile_lock);
381
    if (xlator_req.dict.dict_val)
382
        free(xlator_req.dict.dict_val);
383
    if (xlator_req.input.input_val)
384
        free(xlator_req.input.input_val);
385
    if (dict)
386
        dict_unref(dict);
387
    free(xlator_req.name);
388
    xlator_req.name = NULL;
389
    return 0;
390
}
391

392
static int
393
glusterfs_translator_info_response_send(rpcsvc_request_t *req, int ret,
394
                                        char *msg, dict_t *output)
395
{
396
    gd1_mgmt_brick_op_rsp rsp = {
397
        0,
398
    };
399
    gf_boolean_t free_ptr = _gf_false;
400
    GF_ASSERT(req);
401

402
    rsp.op_ret = ret;
403
    rsp.op_errno = 0;
404
    if (ret && msg && msg[0])
405
        rsp.op_errstr = msg;
406
    else
407
        rsp.op_errstr = "";
408

409
    ret = -1;
410
    if (output) {
411
        ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
412
                                          &rsp.output.output_len);
413
    }
414
    if (!ret)
415
        free_ptr = _gf_true;
416

417
    glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
418
                           (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
419
    ret = 0;
420
    if (free_ptr)
421
        GF_FREE(rsp.output.output_val);
422
    return ret;
423
}
424

425
static int
426
glusterfs_xlator_op_response_send(rpcsvc_request_t *req, int op_ret, char *msg,
427
                                  dict_t *output)
428
{
429
    gd1_mgmt_brick_op_rsp rsp = {
430
        0,
431
    };
432
    int ret = -1;
433
    gf_boolean_t free_ptr = _gf_false;
434
    GF_ASSERT(req);
435

436
    rsp.op_ret = op_ret;
437
    rsp.op_errno = 0;
438
    if (op_ret && msg && msg[0])
439
        rsp.op_errstr = msg;
440
    else
441
        rsp.op_errstr = "";
442

443
    if (output) {
444
        ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
445
                                          &rsp.output.output_len);
446
    }
447
    if (!ret)
448
        free_ptr = _gf_true;
449

450
    ret = glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
451
                                 (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
452

453
    if (free_ptr)
454
        GF_FREE(rsp.output.output_val);
455

456
    return ret;
457
}
458

459
static int
460
glusterfs_volume_top_perf(const char *brick_path, dict_t *dict,
461
                          gf_boolean_t write_test)
462
{
463
    int32_t fd = -1;
464
    int32_t output_fd = -1;
465
    char export_path[PATH_MAX] = {
466
        0,
467
    };
468
    char *buf = NULL;
469
    int32_t iter = 0;
470
    int32_t ret = -1;
471
    uint64_t total_blks = 0;
472
    uint32_t blk_size;
473
    uint32_t blk_count;
474
    double throughput = 0;
475
    double time = 0;
476
    struct timeval begin, end = {
477
                              0,
478
                          };
479

480
    GF_ASSERT(brick_path);
481

482
    ret = dict_get_uint32(dict, "blk-size", &blk_size);
483
    if (ret)
484
        goto out;
485
    ret = dict_get_uint32(dict, "blk-cnt", &blk_count);
486
    if (ret)
487
        goto out;
488

489
    if (!(blk_size > 0) || !(blk_count > 0))
490
        goto out;
491

492
    buf = GF_CALLOC(1, blk_size * sizeof(*buf), gf_common_mt_char);
493
    if (!buf) {
494
        ret = -1;
495
        gf_log("glusterd", GF_LOG_ERROR, "Could not allocate memory");
496
        goto out;
497
    }
498

499
    snprintf(export_path, sizeof(export_path), "%s/%s", brick_path,
500
             ".gf-tmp-stats-perf");
501
    fd = open(export_path, O_CREAT | O_RDWR, S_IRWXU);
502
    if (-1 == fd) {
503
        ret = -1;
504
        gf_log("glusterd", GF_LOG_ERROR, "Could not open tmp file");
505
        goto out;
506
    }
507

508
    gettimeofday(&begin, NULL);
509
    for (iter = 0; iter < blk_count; iter++) {
510
        ret = sys_write(fd, buf, blk_size);
511
        if (ret != blk_size) {
512
            ret = -1;
513
            goto out;
514
        }
515
        total_blks += ret;
516
    }
517
    gettimeofday(&end, NULL);
518
    if (total_blks != ((uint64_t)blk_size * blk_count)) {
519
        gf_log("glusterd", GF_LOG_WARNING, "Error in write");
520
        ret = -1;
521
        goto out;
522
    }
523

524
    time = gf_tvdiff(&begin, &end);
525
    throughput = total_blks / time;
526
    gf_log("glusterd", GF_LOG_INFO,
527
           "Throughput %.2f Mbps time %.2f secs "
528
           "bytes written %" PRId64,
529
           throughput, time, total_blks);
530

531
    /* if it's a write test, we are done. Otherwise, we continue to the read
532
     * part */
533
    if (write_test == _gf_true) {
534
        ret = 0;
535
        goto out;
536
    }
537

538
    ret = sys_fsync(fd);
539
    if (ret) {
540
        gf_log("glusterd", GF_LOG_ERROR, "could not flush cache");
541
        goto out;
542
    }
543
    ret = sys_lseek(fd, 0L, 0);
544
    if (ret != 0) {
545
        gf_log("glusterd", GF_LOG_ERROR, "could not seek back to start");
546
        ret = -1;
547
        goto out;
548
    }
549

550
    output_fd = open("/dev/null", O_RDWR);
551
    if (-1 == output_fd) {
552
        ret = -1;
553
        gf_log("glusterd", GF_LOG_ERROR, "Could not open output file");
554
        goto out;
555
    }
556

557
    total_blks = 0;
558

559
    gettimeofday(&begin, NULL);
560
    for (iter = 0; iter < blk_count; iter++) {
561
        ret = sys_read(fd, buf, blk_size);
562
        if (ret != blk_size) {
563
            ret = -1;
564
            goto out;
565
        }
566
        ret = sys_write(output_fd, buf, blk_size);
567
        if (ret != blk_size) {
568
            ret = -1;
569
            goto out;
570
        }
571
        total_blks += ret;
572
    }
573
    gettimeofday(&end, NULL);
574
    if (total_blks != ((uint64_t)blk_size * blk_count)) {
575
        ret = -1;
576
        gf_log("glusterd", GF_LOG_WARNING, "Error in read");
577
        goto out;
578
    }
579

580
    time = gf_tvdiff(&begin, &end);
581
    throughput = total_blks / time;
582
    gf_log("glusterd", GF_LOG_INFO,
583
           "Throughput %.2f Mbps time %.2f secs "
584
           "bytes read %" PRId64,
585
           throughput, time, total_blks);
586
    ret = 0;
587
out:
588
    if (fd >= 0)
589
        sys_close(fd);
590
    if (output_fd >= 0)
591
        sys_close(output_fd);
592
    GF_FREE(buf);
593
    sys_unlink(export_path);
594
    if (ret == 0) {
595
        ret = dict_set_double(dict, "time", time);
596
        if (ret)
597
            goto end;
598
        ret = dict_set_double(dict, "throughput", throughput);
599
        if (ret)
600
            goto end;
601
    }
602
end:
603
    return ret;
604
}
605

606
static int
607
glusterfs_handle_translator_info_get(rpcsvc_request_t *req)
608
{
609
    int32_t ret = -1;
610
    gd1_mgmt_brick_op_req xlator_req = {
611
        0,
612
    };
613
    dict_t *dict = NULL;
614
    xlator_t *this = NULL;
615
    gf1_cli_top_op top_op = 0;
616
    xlator_t *any = NULL;
617
    xlator_t *xlator = NULL;
618
    glusterfs_graph_t *active = NULL;
619
    glusterfs_ctx_t *ctx = NULL;
620
    char msg[2048] = {
621
        0,
622
    };
623
    dict_t *output = NULL;
624

625
    GF_ASSERT(req);
626
    this = THIS;
627
    GF_ASSERT(this);
628

629
    ret = xdr_to_generic(req->msg[0], &xlator_req,
630
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
631
    if (ret < 0) {
632
        // failed to decode msg;
633
        req->rpc_err = GARBAGE_ARGS;
634
        goto out;
635
    }
636

637
    dict = dict_new();
638
    ret = dict_unserialize(xlator_req.input.input_val,
639
                           xlator_req.input.input_len, &dict);
640
    if (ret < 0) {
641
        gf_log(this->name, GF_LOG_ERROR,
642
               "failed to "
643
               "unserialize req-buffer to dictionary");
644
        goto out;
645
    }
646

647
    ret = dict_get_int32(dict, "top-op", (int32_t *)&top_op);
648
    if (ret)
649
        goto cont;
650
    if (GF_CLI_TOP_READ_PERF == top_op) {
651
        ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_false);
652
    } else if (GF_CLI_TOP_WRITE_PERF == top_op) {
653
        ret = glusterfs_volume_top_perf(xlator_req.name, dict, _gf_true);
654
    }
655

656
cont:
657
    ctx = glusterfsd_ctx;
658
    GF_ASSERT(ctx);
659
    active = ctx->active;
660
    if (active == NULL) {
661
        gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
662
        ret = -1;
663
        goto out;
664
    }
665
    any = active->first;
666

667
    xlator = get_xlator_by_name(any, xlator_req.name);
668
    if (!xlator) {
669
        ret = -1;
670
        snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);
671
        goto out;
672
    }
673

674
    if (strcmp(xlator->type, "debug/io-stats")) {
675
        xlator = get_xlator_by_type(xlator, "debug/io-stats");
676
        if (!xlator) {
677
            ret = -1;
678
            snprintf(msg, sizeof(msg),
679
                     "xlator-type debug/io-stats is not loaded");
680
            goto out;
681
        }
682
    }
683

684
    output = dict_new();
685
    ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_INFO, dict, output);
686

687
out:
688
    ret = glusterfs_translator_info_response_send(req, ret, msg, output);
689

690
    free(xlator_req.name);
691
    free(xlator_req.input.input_val);
692
    if (xlator_req.dict.dict_val)
693
        free(xlator_req.dict.dict_val);
694
    if (output)
695
        dict_unref(output);
696
    if (dict)
697
        dict_unref(dict);
698
    return ret;
699
}
700

701
static int
702
glusterfs_handle_translator_op(rpcsvc_request_t *req)
703
{
704
    int32_t ret = -1;
705
    int32_t op_ret = 0;
706
    gd1_mgmt_brick_op_req xlator_req = {
707
        0,
708
    };
709
    dict_t *input = NULL;
710
    xlator_t *xlator = NULL;
711
    xlator_t *any = NULL;
712
    dict_t *output = NULL;
713
    char key[32] = {0};
714
    int len;
715
    char *xname = NULL;
716
    glusterfs_ctx_t *ctx = NULL;
717
    glusterfs_graph_t *active = NULL;
718
    xlator_t *this = NULL;
719
    int i = 0;
720
    int count = 0;
721

722
    GF_ASSERT(req);
723
    this = THIS;
724
    GF_ASSERT(this);
725

726
    ret = xdr_to_generic(req->msg[0], &xlator_req,
727
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
728
    if (ret < 0) {
729
        // failed to decode msg;
730
        req->rpc_err = GARBAGE_ARGS;
731
        goto out;
732
    }
733

734
    ctx = glusterfsd_ctx;
735
    active = ctx->active;
736
    if (!active) {
737
        ret = -1;
738
        gf_smsg(this->name, GF_LOG_ERROR, EAGAIN, glusterfsd_msg_38,
739
                "brick-op_no.=%d", xlator_req.op, NULL);
740
        goto out;
741
    }
742
    any = active->first;
743
    input = dict_new();
744
    ret = dict_unserialize(xlator_req.input.input_val,
745
                           xlator_req.input.input_len, &input);
746
    if (ret < 0) {
747
        gf_log(this->name, GF_LOG_ERROR,
748
               "failed to "
749
               "unserialize req-buffer to dictionary");
750
        goto out;
751
    } else {
752
        input->extra_stdfree = xlator_req.input.input_val;
753
    }
754

755
    ret = dict_get_int32(input, "count", &count);
756

757
    output = dict_new();
758
    if (!output) {
759
        ret = -1;
760
        goto out;
761
    }
762

763
    for (i = 0; i < count; i++) {
764
        len = snprintf(key, sizeof(key), "xl-%d", i);
765
        ret = dict_get_strn(input, key, len, &xname);
766
        if (ret) {
767
            gf_log(this->name, GF_LOG_ERROR,
768
                   "Couldn't get "
769
                   "xlator %s ",
770
                   key);
771
            goto out;
772
        }
773
        xlator = xlator_search_by_name(any, xname);
774
        if (!xlator) {
775
            gf_log(this->name, GF_LOG_ERROR,
776
                   "xlator %s is not "
777
                   "loaded",
778
                   xname);
779
            goto out;
780
        }
781
    }
782
    for (i = 0; i < count; i++) {
783
        len = snprintf(key, sizeof(key), "xl-%d", i);
784
        ret = dict_get_strn(input, key, len, &xname);
785
        xlator = xlator_search_by_name(any, xname);
786
        XLATOR_NOTIFY(ret, xlator, GF_EVENT_TRANSLATOR_OP, input, output);
787
        /* If notify fails for an xlator we need to capture it but
788
         * continue with the loop. */
789
        if (ret)
790
            op_ret = -1;
791
    }
792
    ret = op_ret;
793
out:
794
    glusterfs_xlator_op_response_send(req, ret, "", output);
795
    if (input)
796
        dict_unref(input);
797
    if (output)
798
        dict_unref(output);
799
    free(xlator_req.name);  // malloced by xdr
800

801
    return 0;
802
}
803

804
static int
805
glusterfs_handle_bitrot(rpcsvc_request_t *req)
806
{
807
    int32_t ret = -1;
808
    gd1_mgmt_brick_op_req xlator_req = {
809
        0,
810
    };
811
    dict_t *input = NULL;
812
    dict_t *output = NULL;
813
    xlator_t *any = NULL;
814
    xlator_t *this = NULL;
815
    xlator_t *xlator = NULL;
816
    char msg[2048] = {
817
        0,
818
    };
819
    char xname[1024] = {
820
        0,
821
    };
822
    glusterfs_ctx_t *ctx = NULL;
823
    glusterfs_graph_t *active = NULL;
824
    char *scrub_opt = NULL;
825

826
    GF_ASSERT(req);
827
    this = THIS;
828
    GF_ASSERT(this);
829

830
    ret = xdr_to_generic(req->msg[0], &xlator_req,
831
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
832

833
    if (ret < 0) {
834
        /*failed to decode msg;*/
835
        req->rpc_err = GARBAGE_ARGS;
836
        goto out;
837
    }
838

839
    ctx = glusterfsd_ctx;
840
    GF_ASSERT(ctx);
841

842
    active = ctx->active;
843
    if (!active) {
844
        req->rpc_err = GARBAGE_ARGS;
845
        goto out;
846
    }
847

848
    any = active->first;
849

850
    input = dict_new();
851
    if (!input)
852
        goto out;
853

854
    ret = dict_unserialize(xlator_req.input.input_val,
855
                           xlator_req.input.input_len, &input);
856

857
    if (ret < 0) {
858
        gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_35, NULL);
859
        goto out;
860
    }
861

862
    /* Send scrubber request to bitrot xlator */
863
    snprintf(xname, sizeof(xname), "%s-bit-rot-0", xlator_req.name);
864
    xlator = xlator_search_by_name(any, xname);
865
    if (!xlator) {
866
        snprintf(msg, sizeof(msg), "xlator %s is not loaded", xname);
867
        gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_36, NULL);
868
        goto out;
869
    }
870

871
    output = dict_new();
872
    if (!output) {
873
        ret = -1;
874
        goto out;
875
    }
876

877
    ret = dict_get_str(input, "scrub-value", &scrub_opt);
878
    if (ret) {
879
        snprintf(msg, sizeof(msg), "Failed to get scrub value");
880
        gf_smsg(this->name, GF_LOG_ERROR, 0, glusterfsd_msg_37, NULL);
881
        ret = -1;
882
        goto out;
883
    }
884

885
    if (!strncmp(scrub_opt, "status", SLEN("status"))) {
886
        ret = xlator->notify(xlator, GF_EVENT_SCRUB_STATUS, input, output);
887
    } else if (!strncmp(scrub_opt, "ondemand", SLEN("ondemand"))) {
888
        ret = xlator->notify(xlator, GF_EVENT_SCRUB_ONDEMAND, input, output);
889
        if (ret == -2) {
890
            snprintf(msg, sizeof(msg),
891
                     "Scrubber is in "
892
                     "Pause/Inactive/Running state");
893
            ret = -1;
894
            goto out;
895
        }
896
    }
897
out:
898
    glusterfs_translator_info_response_send(req, ret, msg, output);
899

900
    if (input)
901
        dict_unref(input);
902
    free(xlator_req.input.input_val); /*malloced by xdr*/
903
    if (xlator_req.dict.dict_val)
904
        free(xlator_req.dict.dict_val);
905
    if (output)
906
        dict_unref(output);
907
    free(xlator_req.name);
908

909
    return 0;
910
}
911

912
static int
913
glusterfs_handle_attach(rpcsvc_request_t *req)
914
{
915
    int32_t ret = -1;
916
    gd1_mgmt_brick_op_req xlator_req = {
917
        0,
918
    };
919
    xlator_t *this = NULL;
920
    xlator_t *nextchild = NULL;
921
    glusterfs_graph_t *newgraph = NULL;
922
    glusterfs_ctx_t *ctx = NULL;
923
    xlator_t *srv_xl = NULL;
924
    server_conf_t *srv_conf = NULL;
925

926
    GF_ASSERT(req);
927
    this = THIS;
928
    GF_ASSERT(this);
929

930
    ctx = this->ctx;
931
    if (!ctx->cmd_args.volfile_id) {
932
        gf_log(THIS->name, GF_LOG_ERROR,
933
               "No volfile-id provided, erroring out");
934
        return -1;
935
    }
936

937
    ret = xdr_to_generic(req->msg[0], &xlator_req,
938
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
939

940
    if (ret < 0) {
941
        /*failed to decode msg;*/
942
        req->rpc_err = GARBAGE_ARGS;
943
        return -1;
944
    }
945
    ret = 0;
946

947
    if (!this->ctx->active) {
948
        gf_log(this->name, GF_LOG_WARNING,
949
               "got attach for %s but no active graph", xlator_req.name);
950
        goto post_unlock;
951
    }
952

953
    gf_log(this->name, GF_LOG_INFO, "got attach for %s", xlator_req.name);
954

955
    LOCK(&ctx->volfile_lock);
956
    {
957
        ret = glusterfs_graph_attach(this->ctx->active, xlator_req.name,
958
                                     &newgraph);
959
        if (!ret && (newgraph && newgraph->first)) {
960
            nextchild = newgraph->first;
961
            ret = xlator_notify(nextchild, GF_EVENT_PARENT_UP, nextchild);
962
            if (ret) {
963
                gf_smsg(this->name, GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED,
964
                        "event=ParentUp", "name=%s", nextchild->name, NULL);
965
                goto unlock;
966
            }
967
            /* we need a protocol/server xlator as
968
             * nextchild
969
             */
970
            srv_xl = this->ctx->active->first;
971
            srv_conf = (server_conf_t *)srv_xl->private;
972
            rpcsvc_autoscale_threads(this->ctx, srv_conf->rpc, 1);
973
        }
974
        if (ret) {
975
            ret = -1;
976
        }
977
        ret = glusterfs_translator_info_response_send(req, ret, NULL, NULL);
978
        if (ret) {
979
            /* Response sent back to glusterd, req is already destroyed. So
980
             * resetting the ret to 0. Otherwise another response will be
981
             * send from rpcsvc_check_and_reply_error. Which will lead to
982
             * double resource leak.
983
             */
984
            ret = 0;
985
        }
986
    unlock:
987
        UNLOCK(&ctx->volfile_lock);
988
    }
989
post_unlock:
990
    if (xlator_req.dict.dict_val)
991
        free(xlator_req.dict.dict_val);
992
    free(xlator_req.input.input_val);
993
    free(xlator_req.name);
994

995
    return ret;
996
}
997

998
static int
999
glusterfs_handle_svc_attach(rpcsvc_request_t *req)
1000
{
1001
    int32_t ret = -1;
1002
    gd1_mgmt_brick_op_req xlator_req = {
1003
        0,
1004
    };
1005
    xlator_t *this = NULL;
1006
    dict_t *dict = NULL;
1007

1008
    GF_ASSERT(req);
1009
    this = THIS;
1010
    GF_ASSERT(this);
1011

1012
    ret = xdr_to_generic(req->msg[0], &xlator_req,
1013
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1014

1015
    if (ret < 0) {
1016
        /*failed to decode msg;*/
1017
        req->rpc_err = GARBAGE_ARGS;
1018
        goto out;
1019
    }
1020

1021
    gf_smsg(THIS->name, GF_LOG_INFO, 0, glusterfsd_msg_41, "volfile-id=%s",
1022
            xlator_req.name, NULL);
1023

1024
    dict = dict_new();
1025
    if (!dict) {
1026
        ret = -1;
1027
        errno = ENOMEM;
1028
        goto out;
1029
    }
1030

1031
    ret = dict_unserialize(xlator_req.dict.dict_val, xlator_req.dict.dict_len,
1032
                           &dict);
1033
    if (ret) {
1034
        gf_smsg(this->name, GF_LOG_WARNING, EINVAL, glusterfsd_msg_42, NULL);
1035
        goto out;
1036
    }
1037
    dict->extra_stdfree = xlator_req.dict.dict_val;
1038

1039
    ret = 0;
1040

1041
    ret = mgmt_process_volfile(xlator_req.input.input_val,
1042
                               xlator_req.input.input_len, xlator_req.name,
1043
                               dict);
1044
out:
1045
    if (dict)
1046
        dict_unref(dict);
1047
    if (xlator_req.input.input_val)
1048
        free(xlator_req.input.input_val);
1049
    if (xlator_req.name)
1050
        free(xlator_req.name);
1051
    glusterfs_translator_info_response_send(req, ret, NULL, NULL);
1052
    return 0;
1053
}
1054

1055
static int
1056
glusterfs_handle_svc_detach(rpcsvc_request_t *req)
1057
{
1058
    gd1_mgmt_brick_op_req xlator_req = {
1059
        0,
1060
    };
1061
    ssize_t ret;
1062
    gf_volfile_t *volfile_obj = NULL;
1063
    glusterfs_ctx_t *ctx = NULL;
1064
    gf_volfile_t *volfile_tmp = NULL;
1065

1066
    ret = xdr_to_generic(req->msg[0], &xlator_req,
1067
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1068
    if (ret < 0) {
1069
        req->rpc_err = GARBAGE_ARGS;
1070
        return -1;
1071
    }
1072
    ctx = glusterfsd_ctx;
1073

1074
    LOCK(&ctx->volfile_lock);
1075
    {
1076
        list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
1077
        {
1078
            if (!strcmp(xlator_req.name, volfile_obj->vol_id)) {
1079
                volfile_tmp = volfile_obj;
1080
                break;
1081
            }
1082
        }
1083

1084
        if (!volfile_tmp) {
1085
            UNLOCK(&ctx->volfile_lock);
1086
            gf_smsg(THIS->name, GF_LOG_ERROR, 0, glusterfsd_msg_041, "name=%s",
1087
                    xlator_req.name, NULL);
1088
            /*
1089
             * Used to be -ENOENT.  However, the caller asked us to
1090
             * make sure it's down and if it's already down that's
1091
             * good enough.
1092
             */
1093
            ret = 0;
1094
            goto out;
1095
        }
1096
        /* coverity[ORDER_REVERSAL] */
1097
        ret = glusterfs_process_svc_detach(ctx, volfile_tmp);
1098
        if (ret) {
1099
            UNLOCK(&ctx->volfile_lock);
1100
            gf_smsg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, glusterfsd_msg_042,
1101
                    NULL);
1102
            goto out;
1103
        }
1104
    }
1105
    UNLOCK(&ctx->volfile_lock);
1106
out:
1107
    glusterfs_terminate_response_send(req, ret);
1108
    free(xlator_req.name);
1109
    xlator_req.name = NULL;
1110

1111
    return 0;
1112
}
1113

1114
static int
1115
glusterfs_handle_dump_metrics(rpcsvc_request_t *req)
1116
{
1117
    int32_t ret = -1;
1118
    gd1_mgmt_brick_op_req xlator_req = {
1119
        0,
1120
    };
1121
    xlator_t *this = NULL;
1122
    glusterfs_ctx_t *ctx = NULL;
1123
    char *filepath = NULL;
1124
    int fd = -1;
1125
    struct stat statbuf = {
1126
        0,
1127
    };
1128
    char *msg = NULL;
1129

1130
    GF_ASSERT(req);
1131
    this = THIS;
1132
    GF_ASSERT(this);
1133

1134
    ret = xdr_to_generic(req->msg[0], &xlator_req,
1135
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1136

1137
    if (ret < 0) {
1138
        /*failed to decode msg;*/
1139
        req->rpc_err = GARBAGE_ARGS;
1140
        return -1;
1141
    }
1142
    ret = -1;
1143
    ctx = this->ctx;
1144

1145
    /* Infra for monitoring */
1146
    filepath = gf_monitor_metrics(ctx);
1147
    if (!filepath)
1148
        goto out;
1149

1150
    fd = sys_open(filepath, O_RDONLY, 0);
1151
    if (fd < 0)
1152
        goto out;
1153

1154
    if (sys_fstat(fd, &statbuf) < 0)
1155
        goto out;
1156

1157
    if (statbuf.st_size > GF_UNIT_MB) {
1158
        gf_smsg(this->name, GF_LOG_WARNING, ENOMEM, LG_MSG_NO_MEMORY,
1159
                "reconsider logic (%" PRId64 ")", statbuf.st_size, NULL);
1160
    }
1161
    msg = GF_CALLOC(1, (statbuf.st_size + 1), gf_common_mt_char);
1162
    if (!msg)
1163
        goto out;
1164

1165
    ret = sys_read(fd, msg, statbuf.st_size);
1166
    if (ret < 0)
1167
        goto out;
1168

1169
    /* Send all the data in errstr, instead of dictionary for now */
1170
    glusterfs_translator_info_response_send(req, 0, msg, NULL);
1171

1172
    ret = 0;
1173
out:
1174
    if (fd >= 0)
1175
        sys_close(fd);
1176

1177
    GF_FREE(msg);
1178
    GF_FREE(filepath);
1179
    if (xlator_req.input.input_val)
1180
        free(xlator_req.input.input_val);
1181
    if (xlator_req.dict.dict_val)
1182
        free(xlator_req.dict.dict_val);
1183

1184
    return ret;
1185
}
1186

1187
static int
1188
glusterfs_handle_defrag(rpcsvc_request_t *req)
1189
{
1190
    int32_t ret = -1;
1191
    gd1_mgmt_brick_op_req xlator_req = {
1192
        0,
1193
    };
1194
    dict_t *dict = NULL;
1195
    xlator_t *xlator = NULL;
1196
    xlator_t *any = NULL;
1197
    dict_t *output = NULL;
1198
    char msg[2048] = {0};
1199
    glusterfs_ctx_t *ctx = NULL;
1200
    glusterfs_graph_t *active = NULL;
1201
    xlator_t *this = NULL;
1202

1203
    GF_ASSERT(req);
1204
    this = THIS;
1205
    GF_ASSERT(this);
1206

1207
    ctx = glusterfsd_ctx;
1208
    GF_ASSERT(ctx);
1209

1210
    active = ctx->active;
1211
    if (!active) {
1212
        req->rpc_err = GARBAGE_ARGS;
1213
        goto out;
1214
    }
1215

1216
    any = active->first;
1217
    ret = xdr_to_generic(req->msg[0], &xlator_req,
1218
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1219
    if (ret < 0) {
1220
        // failed to decode msg;
1221
        req->rpc_err = GARBAGE_ARGS;
1222
        goto out;
1223
    }
1224
    dict = dict_new();
1225
    if (!dict)
1226
        goto out;
1227

1228
    ret = dict_unserialize(xlator_req.input.input_val,
1229
                           xlator_req.input.input_len, &dict);
1230
    if (ret < 0) {
1231
        gf_log(this->name, GF_LOG_ERROR,
1232
               "failed to "
1233
               "unserialize req-buffer to dictionary");
1234
        goto out;
1235
    }
1236
    xlator = xlator_search_by_name(any, xlator_req.name);
1237
    if (!xlator) {
1238
        snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);
1239
        goto out;
1240
    }
1241

1242
    output = dict_new();
1243
    if (!output) {
1244
        ret = -1;
1245
        goto out;
1246
    }
1247

1248
    ret = xlator->notify(xlator, GF_EVENT_VOLUME_DEFRAG, dict, output);
1249

1250
    ret = glusterfs_translator_info_response_send(req, ret, msg, output);
1251
out:
1252
    if (dict)
1253
        dict_unref(dict);
1254
    free(xlator_req.input.input_val);  // malloced by xdr
1255
    if (xlator_req.dict.dict_val)
1256
        free(xlator_req.dict.dict_val);
1257
    if (output)
1258
        dict_unref(output);
1259
    free(xlator_req.name);  // malloced by xdr
1260

1261
    return ret;
1262
}
1263

1264
static int
1265
glusterfs_handle_brick_status(rpcsvc_request_t *req)
1266
{
1267
    int ret = -1;
1268
    gd1_mgmt_brick_op_req brick_req = {
1269
        0,
1270
    };
1271
    gd1_mgmt_brick_op_rsp rsp = {
1272
        0,
1273
    };
1274
    glusterfs_ctx_t *ctx = NULL;
1275
    glusterfs_graph_t *active = NULL;
1276
    xlator_t *this = NULL;
1277
    xlator_t *server_xl = NULL;
1278
    xlator_t *brick_xl = NULL;
1279
    dict_t *dict = NULL;
1280
    dict_t *output = NULL;
1281
    uint32_t cmd = 0;
1282
    char *msg = NULL;
1283
    char *brickname = NULL;
1284

1285
    GF_ASSERT(req);
1286
    this = THIS;
1287
    GF_ASSERT(this);
1288

1289
    ret = xdr_to_generic(req->msg[0], &brick_req,
1290
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1291
    if (ret < 0) {
1292
        req->rpc_err = GARBAGE_ARGS;
1293
        goto out;
1294
    }
1295

1296
    dict = dict_new();
1297
    ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len,
1298
                           &dict);
1299
    if (ret < 0) {
1300
        gf_log(this->name, GF_LOG_ERROR,
1301
               "Failed to unserialize "
1302
               "req-buffer to dictionary");
1303
        goto out;
1304
    }
1305

1306
    ret = dict_get_uint32(dict, "cmd", &cmd);
1307
    if (ret) {
1308
        gf_log(this->name, GF_LOG_ERROR, "Couldn't get status op");
1309
        goto out;
1310
    }
1311

1312
    ret = dict_get_str(dict, "brick-name", &brickname);
1313
    if (ret) {
1314
        gf_log(this->name, GF_LOG_ERROR,
1315
               "Couldn't get brickname from"
1316
               " dict");
1317
        goto out;
1318
    }
1319

1320
    ctx = glusterfsd_ctx;
1321
    if (ctx == NULL) {
1322
        gf_log(this->name, GF_LOG_ERROR, "ctx returned NULL");
1323
        ret = -1;
1324
        goto out;
1325
    }
1326
    if (ctx->active == NULL) {
1327
        gf_log(this->name, GF_LOG_ERROR, "ctx->active returned NULL");
1328
        ret = -1;
1329
        goto out;
1330
    }
1331
    active = ctx->active;
1332
    if (ctx->active->first == NULL) {
1333
        gf_log(this->name, GF_LOG_ERROR,
1334
               "ctx->active->first "
1335
               "returned NULL");
1336
        ret = -1;
1337
        goto out;
1338
    }
1339
    server_xl = active->first;
1340

1341
    brick_xl = get_xlator_by_name(server_xl, brickname);
1342
    if (!brick_xl) {
1343
        gf_log(this->name, GF_LOG_ERROR, "xlator is not loaded");
1344
        ret = -1;
1345
        goto out;
1346
    }
1347

1348
    output = dict_new();
1349
    switch (cmd & GF_CLI_STATUS_MASK) {
1350
        case GF_CLI_STATUS_MEM:
1351
            ret = 0;
1352
            gf_proc_dump_mem_info_to_dict(output);
1353
            gf_proc_dump_mempool_info_to_dict(ctx, output);
1354
            break;
1355

1356
        case GF_CLI_STATUS_CLIENTS:
1357
        case GF_CLI_STATUS_CLIENT_LIST:
1358
            ret = server_xl->dumpops->priv_to_dict(server_xl, output,
1359
                                                   brickname);
1360
            break;
1361

1362
        case GF_CLI_STATUS_INODE:
1363
            ret = server_xl->dumpops->inode_to_dict(brick_xl, output);
1364
            break;
1365

1366
        case GF_CLI_STATUS_FD:
1367
            ret = server_xl->dumpops->fd_to_dict(brick_xl, output);
1368
            break;
1369

1370
        case GF_CLI_STATUS_CALLPOOL:
1371
            ret = 0;
1372
            gf_proc_dump_pending_frames_to_dict(ctx->pool, output);
1373
            break;
1374

1375
        default:
1376
            ret = -1;
1377
            msg = gf_strdup("Unknown status op");
1378
            break;
1379
    }
1380
    rsp.op_ret = ret;
1381
    rsp.op_errno = 0;
1382
    if (ret && msg)
1383
        rsp.op_errstr = msg;
1384
    else
1385
        rsp.op_errstr = "";
1386

1387
    ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
1388
                                      &rsp.output.output_len);
1389
    if (ret) {
1390
        gf_log(this->name, GF_LOG_ERROR,
1391
               "Failed to serialize output dict to rsp");
1392
        goto out;
1393
    }
1394

1395
    glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
1396
                           (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
1397
    ret = 0;
1398

1399
out:
1400
    if (dict)
1401
        dict_unref(dict);
1402
    if (output)
1403
        dict_unref(output);
1404
    free(brick_req.input.input_val);
1405
    if (brick_req.dict.dict_val)
1406
        free(brick_req.dict.dict_val);
1407
    free(brick_req.name);
1408
    GF_FREE(msg);
1409
    GF_FREE(rsp.output.output_val);
1410

1411
    return ret;
1412
}
1413

1414
static int
1415
glusterfs_handle_node_status(rpcsvc_request_t *req)
1416
{
1417
    int ret = -1;
1418
    gd1_mgmt_brick_op_req node_req = {
1419
        0,
1420
    };
1421
    gd1_mgmt_brick_op_rsp rsp = {
1422
        0,
1423
    };
1424
    glusterfs_ctx_t *ctx = NULL;
1425
    glusterfs_graph_t *active = NULL;
1426
    xlator_t *any = NULL;
1427
    xlator_t *node = NULL;
1428
    xlator_t *subvol = NULL;
1429
    dict_t *dict = NULL;
1430
    dict_t *output = NULL;
1431
    char *volname = NULL;
1432
    char *node_name = NULL;
1433
    char *subvol_name = NULL;
1434
    uint32_t cmd = 0;
1435
    char *msg = NULL;
1436

1437
    GF_ASSERT(req);
1438

1439
    ret = xdr_to_generic(req->msg[0], &node_req,
1440
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1441
    if (ret < 0) {
1442
        req->rpc_err = GARBAGE_ARGS;
1443
        goto out;
1444
    }
1445

1446
    dict = dict_new();
1447
    if (!dict) {
1448
        ret = -1;
1449
        gf_log(THIS->name, GF_LOG_ERROR, "Failed to allocate the dictionary");
1450
        goto out;
1451
    }
1452

1453
    ret = dict_unserialize(node_req.input.input_val, node_req.input.input_len,
1454
                           &dict);
1455
    if (ret < 0) {
1456
        gf_log(THIS->name, GF_LOG_ERROR,
1457
               "Failed to unserialize "
1458
               "req buffer to dictionary");
1459
        goto out;
1460
    }
1461

1462
    ret = dict_get_uint32(dict, "cmd", &cmd);
1463
    if (ret) {
1464
        gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get status op");
1465
        goto out;
1466
    }
1467

1468
    ret = dict_get_str(dict, "volname", &volname);
1469
    if (ret) {
1470
        gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname");
1471
        goto out;
1472
    }
1473

1474
    ctx = glusterfsd_ctx;
1475
    GF_ASSERT(ctx);
1476
    active = ctx->active;
1477
    if (active == NULL) {
1478
        gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
1479
        ret = -1;
1480
        goto out;
1481
    }
1482
    any = active->first;
1483

1484
    if ((cmd & GF_CLI_STATUS_SHD) != 0)
1485
        ret = gf_asprintf(&node_name, "%s", "glustershd");
1486
#ifdef BUILD_GNFS
1487
    else if ((cmd & GF_CLI_STATUS_NFS) != 0)
1488
        ret = gf_asprintf(&node_name, "%s", "nfs-server");
1489
#endif
1490
    else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
1491
        ret = gf_asprintf(&node_name, "%s", "quotad");
1492
    else if ((cmd & GF_CLI_STATUS_BITD) != 0)
1493
        ret = gf_asprintf(&node_name, "%s", "bitd");
1494
    else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
1495
        ret = gf_asprintf(&node_name, "%s", "scrubber");
1496

1497
    else {
1498
        ret = -1;
1499
        goto out;
1500
    }
1501
    if (ret == -1) {
1502
        gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name");
1503
        goto out;
1504
    }
1505

1506
    node = xlator_search_by_name(any, node_name);
1507
    if (!node) {
1508
        ret = -1;
1509
        gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded", node_name);
1510
        goto out;
1511
    }
1512

1513
    if ((cmd & GF_CLI_STATUS_NFS) != 0)
1514
        ret = gf_asprintf(&subvol_name, "%s", volname);
1515
    else if ((cmd & GF_CLI_STATUS_SHD) != 0)
1516
        ret = gf_asprintf(&subvol_name, "%s-replicate-0", volname);
1517
    else if ((cmd & GF_CLI_STATUS_QUOTAD) != 0)
1518
        ret = gf_asprintf(&subvol_name, "%s", volname);
1519
    else if ((cmd & GF_CLI_STATUS_BITD) != 0)
1520
        ret = gf_asprintf(&subvol_name, "%s", volname);
1521
    else if ((cmd & GF_CLI_STATUS_SCRUB) != 0)
1522
        ret = gf_asprintf(&subvol_name, "%s", volname);
1523
    else {
1524
        ret = -1;
1525
        goto out;
1526
    }
1527
    if (ret == -1) {
1528
        gf_log(THIS->name, GF_LOG_ERROR, "Failed to set node xlator name");
1529
        goto out;
1530
    }
1531

1532
    subvol = xlator_search_by_name(node, subvol_name);
1533
    if (!subvol) {
1534
        ret = -1;
1535
        gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
1536
               subvol_name);
1537
        goto out;
1538
    }
1539

1540
    output = dict_new();
1541
    if (!output) {
1542
        ret = -1;
1543
        gf_log(THIS->name, GF_LOG_ERROR, "Failed to allocate the dictionary");
1544
        goto out;
1545
    }
1546

1547
    switch (cmd & GF_CLI_STATUS_MASK) {
1548
        case GF_CLI_STATUS_MEM:
1549
            ret = 0;
1550
            gf_proc_dump_mem_info_to_dict(output);
1551
            gf_proc_dump_mempool_info_to_dict(ctx, output);
1552
            break;
1553

1554
        case GF_CLI_STATUS_CLIENTS:
1555
            // clients not available for SHD
1556
            if ((cmd & GF_CLI_STATUS_SHD) != 0)
1557
                break;
1558

1559
            ret = dict_set_str(output, "volname", volname);
1560
            if (ret) {
1561
                gf_log(THIS->name, GF_LOG_ERROR,
1562
                       "Error setting volname to dict");
1563
                goto out;
1564
            }
1565
            ret = node->dumpops->priv_to_dict(node, output, NULL);
1566
            break;
1567

1568
        case GF_CLI_STATUS_INODE:
1569
            ret = 0;
1570
            inode_table_dump_to_dict(subvol->itable, "conn0", output);
1571
            ret = dict_set_int32(output, "conncount", 1);
1572
            break;
1573

1574
        case GF_CLI_STATUS_FD:
1575
            // cannot find fd-tables in nfs-server graph
1576
            // TODO: finish once found
1577
            break;
1578

1579
        case GF_CLI_STATUS_CALLPOOL:
1580
            ret = 0;
1581
            gf_proc_dump_pending_frames_to_dict(ctx->pool, output);
1582
            break;
1583

1584
        default:
1585
            ret = -1;
1586
            msg = gf_strdup("Unknown status op");
1587
            gf_log(THIS->name, GF_LOG_ERROR, "%s", msg);
1588
            break;
1589
    }
1590
    rsp.op_ret = ret;
1591
    rsp.op_errno = 0;
1592
    if (ret && msg)
1593
        rsp.op_errstr = msg;
1594
    else
1595
        rsp.op_errstr = "";
1596

1597
    ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
1598
                                      &rsp.output.output_len);
1599
    if (ret) {
1600
        gf_log(THIS->name, GF_LOG_ERROR,
1601
               "Failed to serialize output dict to rsp");
1602
        goto out;
1603
    }
1604

1605
    glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
1606
                           (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
1607
    ret = 0;
1608

1609
out:
1610
    if (dict)
1611
        dict_unref(dict);
1612
    if (output)
1613
        dict_unref(output);
1614
    free(node_req.input.input_val);
1615
    if (node_req.dict.dict_val)
1616
        free(node_req.dict.dict_val);
1617
    GF_FREE(msg);
1618
    GF_FREE(rsp.output.output_val);
1619
    GF_FREE(node_name);
1620
    GF_FREE(subvol_name);
1621

1622
    gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
1623
    return ret;
1624
}
1625

1626
static int
1627
glusterfs_handle_nfs_profile(rpcsvc_request_t *req)
1628
{
1629
    int ret = -1;
1630
    gd1_mgmt_brick_op_req nfs_req = {
1631
        0,
1632
    };
1633
    gd1_mgmt_brick_op_rsp rsp = {
1634
        0,
1635
    };
1636
    dict_t *dict = NULL;
1637
    glusterfs_ctx_t *ctx = NULL;
1638
    glusterfs_graph_t *active = NULL;
1639
    xlator_t *any = NULL;
1640
    xlator_t *nfs = NULL;
1641
    xlator_t *subvol = NULL;
1642
    char *volname = NULL;
1643
    dict_t *output = NULL;
1644

1645
    GF_ASSERT(req);
1646

1647
    ret = xdr_to_generic(req->msg[0], &nfs_req,
1648
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1649
    if (ret < 0) {
1650
        req->rpc_err = GARBAGE_ARGS;
1651
        goto out;
1652
    }
1653

1654
    dict = dict_new();
1655
    ret = dict_unserialize(nfs_req.input.input_val, nfs_req.input.input_len,
1656
                           &dict);
1657
    if (ret < 0) {
1658
        gf_log(THIS->name, GF_LOG_ERROR,
1659
               "Failed to "
1660
               "unserialize req-buffer to dict");
1661
        goto out;
1662
    }
1663

1664
    ret = dict_get_str(dict, "volname", &volname);
1665
    if (ret) {
1666
        gf_log(THIS->name, GF_LOG_ERROR, "Couldn't get volname");
1667
        goto out;
1668
    }
1669

1670
    ctx = glusterfsd_ctx;
1671
    GF_ASSERT(ctx);
1672

1673
    active = ctx->active;
1674
    if (active == NULL) {
1675
        gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
1676
        ret = -1;
1677
        goto out;
1678
    }
1679
    any = active->first;
1680

1681
    // is this needed?
1682
    // are problems possible by searching for subvol directly from "any"?
1683
    nfs = xlator_search_by_name(any, "nfs-server");
1684
    if (!nfs) {
1685
        ret = -1;
1686
        gf_log(THIS->name, GF_LOG_ERROR,
1687
               "xlator nfs-server is "
1688
               "not loaded");
1689
        goto out;
1690
    }
1691

1692
    subvol = xlator_search_by_name(nfs, volname);
1693
    if (!subvol) {
1694
        ret = -1;
1695
        gf_log(THIS->name, GF_LOG_ERROR, "xlator %s is no loaded", volname);
1696
        goto out;
1697
    }
1698

1699
    output = dict_new();
1700
    ret = subvol->notify(subvol, GF_EVENT_TRANSLATOR_INFO, dict, output);
1701

1702
    rsp.op_ret = ret;
1703
    rsp.op_errno = 0;
1704
    rsp.op_errstr = "";
1705

1706
    ret = dict_allocate_and_serialize(output, &rsp.output.output_val,
1707
                                      &rsp.output.output_len);
1708
    if (ret) {
1709
        gf_log(THIS->name, GF_LOG_ERROR,
1710
               "Failed to serialize output dict to rsp");
1711
        goto out;
1712
    }
1713

1714
    glusterfs_submit_reply(req, &rsp, NULL, 0, NULL,
1715
                           (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
1716
    ret = 0;
1717

1718
out:
1719
    free(nfs_req.input.input_val);
1720
    if (nfs_req.dict.dict_val)
1721
        free(nfs_req.dict.dict_val);
1722
    if (dict)
1723
        dict_unref(dict);
1724
    if (output)
1725
        dict_unref(output);
1726
    GF_FREE(rsp.output.output_val);
1727

1728
    gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
1729
    return ret;
1730
}
1731

1732
static int
1733
glusterfs_handle_volume_barrier_op(rpcsvc_request_t *req)
1734
{
1735
    int32_t ret = -1;
1736
    gd1_mgmt_brick_op_req xlator_req = {
1737
        0,
1738
    };
1739
    dict_t *dict = NULL;
1740
    xlator_t *xlator = NULL;
1741
    xlator_t *any = NULL;
1742
    dict_t *output = NULL;
1743
    char msg[2048] = {0};
1744
    glusterfs_ctx_t *ctx = NULL;
1745
    glusterfs_graph_t *active = NULL;
1746
    xlator_t *this = NULL;
1747

1748
    GF_ASSERT(req);
1749
    this = THIS;
1750
    GF_ASSERT(this);
1751

1752
    ctx = glusterfsd_ctx;
1753
    GF_ASSERT(ctx);
1754

1755
    active = ctx->active;
1756
    if (!active) {
1757
        req->rpc_err = GARBAGE_ARGS;
1758
        goto out;
1759
    }
1760

1761
    any = active->first;
1762
    ret = xdr_to_generic(req->msg[0], &xlator_req,
1763
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1764
    if (ret < 0) {
1765
        // failed to decode msg;
1766
        req->rpc_err = GARBAGE_ARGS;
1767
        goto out;
1768
    }
1769
    dict = dict_new();
1770
    if (!dict)
1771
        goto out;
1772

1773
    ret = dict_unserialize(xlator_req.input.input_val,
1774
                           xlator_req.input.input_len, &dict);
1775
    if (ret < 0) {
1776
        gf_log(this->name, GF_LOG_ERROR,
1777
               "failed to "
1778
               "unserialize req-buffer to dictionary");
1779
        goto out;
1780
    }
1781
    xlator = xlator_search_by_name(any, xlator_req.name);
1782
    if (!xlator) {
1783
        snprintf(msg, sizeof(msg), "xlator %s is not loaded", xlator_req.name);
1784
        goto out;
1785
    }
1786

1787
    output = dict_new();
1788
    if (!output) {
1789
        ret = -1;
1790
        goto out;
1791
    }
1792

1793
    ret = xlator->notify(xlator, GF_EVENT_VOLUME_BARRIER_OP, dict, output);
1794

1795
    ret = glusterfs_translator_info_response_send(req, ret, msg, output);
1796
out:
1797
    if (dict)
1798
        dict_unref(dict);
1799
    free(xlator_req.input.input_val);  // malloced by xdr
1800
    if (xlator_req.dict.dict_val)
1801
        free(xlator_req.dict.dict_val);
1802
    if (output)
1803
        dict_unref(output);
1804
    free(xlator_req.name);  // malloced by xdr
1805

1806
    return ret;
1807
}
1808

1809
static int
1810
glusterfs_handle_barrier(rpcsvc_request_t *req)
1811
{
1812
    int ret = -1;
1813
    gd1_mgmt_brick_op_req brick_req = {
1814
        0,
1815
    };
1816
    gd1_mgmt_brick_op_rsp brick_rsp = {
1817
        0,
1818
    };
1819
    glusterfs_ctx_t *ctx = NULL;
1820
    glusterfs_graph_t *active = NULL;
1821
    xlator_t *top = NULL;
1822
    xlator_t *xlator = NULL;
1823
    xlator_t *old_THIS = NULL;
1824
    dict_t *dict = NULL;
1825
    gf_boolean_t barrier = _gf_true;
1826
    xlator_list_t *trav;
1827

1828
    GF_ASSERT(req);
1829

1830
    ret = xdr_to_generic(req->msg[0], &brick_req,
1831
                         (xdrproc_t)xdr_gd1_mgmt_brick_op_req);
1832
    if (ret < 0) {
1833
        req->rpc_err = GARBAGE_ARGS;
1834
        goto out;
1835
    }
1836

1837
    ctx = glusterfsd_ctx;
1838
    GF_ASSERT(ctx);
1839
    active = ctx->active;
1840
    if (active == NULL) {
1841
        gf_log(THIS->name, GF_LOG_ERROR, "ctx->active returned NULL");
1842
        ret = -1;
1843
        goto out;
1844
    }
1845
    top = active->first;
1846

1847
    for (trav = top->children; trav; trav = trav->next) {
1848
        if (strcmp(trav->xlator->name, brick_req.name) == 0) {
1849
            break;
1850
        }
1851
    }
1852
    if (!trav) {
1853
        ret = -1;
1854
        goto out;
1855
    }
1856
    top = trav->xlator;
1857

1858
    dict = dict_new();
1859
    if (!dict) {
1860
        ret = -1;
1861
        goto out;
1862
    }
1863

1864
    ret = dict_unserialize(brick_req.input.input_val, brick_req.input.input_len,
1865
                           &dict);
1866
    if (ret < 0) {
1867
        gf_log(THIS->name, GF_LOG_ERROR,
1868
               "Failed to unserialize "
1869
               "request dictionary");
1870
        goto out;
1871
    }
1872

1873
    brick_rsp.op_ret = 0;
1874
    brick_rsp.op_errstr = "";  // initing to prevent serilaztion failures
1875
    old_THIS = THIS;
1876

1877
    /* Send barrier request to the barrier xlator */
1878
    xlator = get_xlator_by_type(top, "features/barrier");
1879
    if (!xlator) {
1880
        ret = -1;
1881
        gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
1882
               "features/barrier");
1883
        goto out;
1884
    }
1885

1886
    THIS = xlator;
1887
    // TODO: Extend this to accept return of errnos
1888
    ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict);
1889
    if (ret) {
1890
        gf_log(THIS->name, GF_LOG_ERROR, "barrier notify failed");
1891
        brick_rsp.op_ret = ret;
1892
        brick_rsp.op_errstr = gf_strdup(
1893
            "Failed to reconfigure "
1894
            "barrier.");
1895
        /* This is to invoke changelog-barrier disable if barrier
1896
         * disable fails and don't invoke if barrier enable fails.
1897
         */
1898
        barrier = dict_get_str_boolean(dict, "barrier", _gf_true);
1899
        if (barrier)
1900
            goto submit_reply;
1901
    }
1902

1903
    /* Reset THIS so that we have it correct in case of an error below
1904
     */
1905
    THIS = old_THIS;
1906

1907
    /* Send barrier request to changelog as well */
1908
    xlator = get_xlator_by_type(top, "features/changelog");
1909
    if (!xlator) {
1910
        ret = -1;
1911
        gf_log(THIS->name, GF_LOG_ERROR, "%s xlator is not loaded",
1912
               "features/changelog");
1913
        goto out;
1914
    }
1915

1916
    THIS = xlator;
1917
    ret = xlator->notify(xlator, GF_EVENT_TRANSLATOR_OP, dict);
1918
    if (ret) {
1919
        gf_log(THIS->name, GF_LOG_ERROR, "changelog notify failed");
1920
        brick_rsp.op_ret = ret;
1921
        brick_rsp.op_errstr = gf_strdup("changelog notify failed");
1922
        goto submit_reply;
1923
    }
1924

1925
submit_reply:
1926
    THIS = old_THIS;
1927

1928
    ret = glusterfs_submit_reply(req, &brick_rsp, NULL, 0, NULL,
1929
                                 (xdrproc_t)xdr_gd1_mgmt_brick_op_rsp);
1930

1931
out:
1932
    if (dict)
1933
        dict_unref(dict);
1934
    free(brick_req.input.input_val);
1935
    if (brick_req.dict.dict_val)
1936
        free(brick_req.dict.dict_val);
1937
    gf_log(THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
1938
    return ret;
1939
}
1940

1941
static int
1942
glusterfs_handle_rpc_msg(rpcsvc_request_t *req)
1943
{
1944
    int ret = -1;
1945
    /* for now, nothing */
1946
    return ret;
1947
}
1948

1949
static rpcclnt_cb_actor_t mgmt_cbk_actors[GF_CBK_MAXVALUE] = {
1950
    [GF_CBK_FETCHSPEC] = {"FETCHSPEC", mgmt_cbk_spec, GF_CBK_FETCHSPEC},
1951
    [GF_CBK_EVENT_NOTIFY] = {"EVENTNOTIFY", mgmt_cbk_event,
1952
                             GF_CBK_EVENT_NOTIFY},
1953
    [GF_CBK_STATEDUMP] = {"STATEDUMP", mgmt_cbk_event, GF_CBK_STATEDUMP},
1954
};
1955

1956
static struct rpcclnt_cb_program mgmt_cbk_prog = {
1957
    .progname = "GlusterFS Callback",
1958
    .prognum = GLUSTER_CBK_PROGRAM,
1959
    .progver = GLUSTER_CBK_VERSION,
1960
    .actors = mgmt_cbk_actors,
1961
    .numactors = GF_CBK_MAXVALUE,
1962
};
1963

1964
static char *clnt_pmap_procs[GF_PMAP_MAXVALUE] = {
1965
    [GF_PMAP_NULL] = "NULL",
1966
    [GF_PMAP_PORTBYBRICK] = "PORTBYBRICK",
1967
    [GF_PMAP_BRICKBYPORT] = "BRICKBYPORT",
1968
    [GF_PMAP_SIGNIN] = "SIGNIN",
1969
    [GF_PMAP_SIGNOUT] = "SIGNOUT",
1970
    [GF_PMAP_SIGNUP] = "SIGNUP", /* DEPRECATED - DON'T USE! */
1971
};
1972

1973
static rpc_clnt_prog_t clnt_pmap_prog = {
1974
    .progname = "Gluster Portmap",
1975
    .prognum = GLUSTER_PMAP_PROGRAM,
1976
    .progver = GLUSTER_PMAP_VERSION,
1977
    .procnames = clnt_pmap_procs,
1978
};
1979

1980
static char *clnt_handshake_procs[GF_HNDSK_MAXVALUE] = {
1981
    [GF_HNDSK_NULL] = "NULL",
1982
    [GF_HNDSK_SETVOLUME] = "SETVOLUME",
1983
    [GF_HNDSK_GETSPEC] = "GETSPEC",
1984
    [GF_HNDSK_PING] = "PING",
1985
    [GF_HNDSK_EVENT_NOTIFY] = "EVENTNOTIFY",
1986
};
1987

1988
static rpc_clnt_prog_t clnt_handshake_prog = {
1989
    .progname = "GlusterFS Handshake",
1990
    .prognum = GLUSTER_HNDSK_PROGRAM,
1991
    .progver = GLUSTER_HNDSK_VERSION,
1992
    .procnames = clnt_handshake_procs,
1993
};
1994

1995
static rpcsvc_actor_t glusterfs_actors[GLUSTERD_BRICK_MAXVALUE] = {
1996
    [GLUSTERD_BRICK_NULL] = {"NULL", glusterfs_handle_rpc_msg, NULL,
1997
                             GLUSTERD_BRICK_NULL, DRC_NA, 0},
1998
    [GLUSTERD_BRICK_TERMINATE] = {"TERMINATE", glusterfs_handle_terminate, NULL,
1999
                                  GLUSTERD_BRICK_TERMINATE, DRC_NA, 0},
2000
    [GLUSTERD_BRICK_XLATOR_INFO] = {"TRANSLATOR INFO",
2001
                                    glusterfs_handle_translator_info_get, NULL,
2002
                                    GLUSTERD_BRICK_XLATOR_INFO, DRC_NA, 0},
2003
    [GLUSTERD_BRICK_XLATOR_OP] = {"TRANSLATOR OP",
2004
                                  glusterfs_handle_translator_op, NULL,
2005
                                  GLUSTERD_BRICK_XLATOR_OP, DRC_NA, 0},
2006
    [GLUSTERD_BRICK_STATUS] = {"STATUS", glusterfs_handle_brick_status, NULL,
2007
                               GLUSTERD_BRICK_STATUS, DRC_NA, 0},
2008
    [GLUSTERD_BRICK_XLATOR_DEFRAG] = {"TRANSLATOR DEFRAG",
2009
                                      glusterfs_handle_defrag, NULL,
2010
                                      GLUSTERD_BRICK_XLATOR_DEFRAG, DRC_NA, 0},
2011
    [GLUSTERD_NODE_PROFILE] = {"NFS PROFILE", glusterfs_handle_nfs_profile,
2012
                               NULL, GLUSTERD_NODE_PROFILE, DRC_NA, 0},
2013
    [GLUSTERD_NODE_STATUS] = {"NFS STATUS", glusterfs_handle_node_status, NULL,
2014
                              GLUSTERD_NODE_STATUS, DRC_NA, 0},
2015
    [GLUSTERD_VOLUME_BARRIER_OP] = {"VOLUME BARRIER OP",
2016
                                    glusterfs_handle_volume_barrier_op, NULL,
2017
                                    GLUSTERD_VOLUME_BARRIER_OP, DRC_NA, 0},
2018
    [GLUSTERD_BRICK_BARRIER] = {"BARRIER", glusterfs_handle_barrier, NULL,
2019
                                GLUSTERD_BRICK_BARRIER, DRC_NA, 0},
2020
    [GLUSTERD_NODE_BITROT] = {"BITROT", glusterfs_handle_bitrot, NULL,
2021
                              GLUSTERD_NODE_BITROT, DRC_NA, 0},
2022
    [GLUSTERD_BRICK_ATTACH] = {"ATTACH", glusterfs_handle_attach, NULL,
2023
                               GLUSTERD_BRICK_ATTACH, DRC_NA, 0},
2024

2025
    [GLUSTERD_DUMP_METRICS] = {"DUMP METRICS", glusterfs_handle_dump_metrics,
2026
                               NULL, GLUSTERD_DUMP_METRICS, DRC_NA, 0},
2027

2028
    [GLUSTERD_SVC_ATTACH] = {"ATTACH CLIENT", glusterfs_handle_svc_attach, NULL,
2029
                             GLUSTERD_SVC_ATTACH, DRC_NA, 0},
2030

2031
    [GLUSTERD_SVC_DETACH] = {"DETACH CLIENT", glusterfs_handle_svc_detach, NULL,
2032
                             GLUSTERD_SVC_DETACH, DRC_NA, 0},
2033

2034
};
2035

2036
static struct rpcsvc_program glusterfs_mop_prog = {
2037
    .progname = "Gluster Brick operations",
2038
    .prognum = GD_BRICK_PROGRAM,
2039
    .progver = GD_BRICK_VERSION,
2040
    .actors = glusterfs_actors,
2041
    .numactors = GLUSTERD_BRICK_MAXVALUE,
2042
    .synctask = _gf_true,
2043
};
2044

2045
int
2046
mgmt_submit_request(void *req, call_frame_t *frame, glusterfs_ctx_t *ctx,
2047
                    rpc_clnt_prog_t *prog, int procnum, fop_cbk_fn_t cbkfn,
2048
                    xdrproc_t xdrproc)
2049
{
2050
    int ret = -1;
2051
    int count = 0;
2052
    struct iovec iov = {
2053
        0,
2054
    };
2055
    struct iobuf *iobuf = NULL;
2056
    struct iobref *iobref = NULL;
2057
    ssize_t xdr_size = 0;
2058
    gf_boolean_t frame_cleanup = _gf_true;
2059

2060
    iobref = iobref_new();
2061
    if (!iobref) {
2062
        goto out;
2063
    }
2064

2065
    if (req) {
2066
        xdr_size = xdr_sizeof(xdrproc, req);
2067

2068
        iobuf = iobuf_get2(ctx->iobuf_pool, xdr_size);
2069
        if (!iobuf) {
2070
            goto out;
2071
        };
2072

2073
        iobref_add(iobref, iobuf);
2074

2075
        iov.iov_base = iobuf->ptr;
2076
        iov.iov_len = iobuf_pagesize(iobuf);
2077

2078
        /* Create the xdr payload */
2079
        ret = xdr_serialize_generic(iov, req, xdrproc);
2080
        if (ret == -1) {
2081
            gf_log(THIS->name, GF_LOG_WARNING, "failed to create XDR payload");
2082
            goto out;
2083
        }
2084
        iov.iov_len = ret;
2085
        count = 1;
2086
    }
2087

2088
    /* Send the msg */
2089
    ret = rpc_clnt_submit(ctx->mgmt, prog, procnum, cbkfn, &iov, count, NULL, 0,
2090
                          iobref, frame, NULL, 0, NULL, 0, NULL);
2091

2092
    frame_cleanup = _gf_false;
2093
out:
2094
    if (iobref)
2095
        iobref_unref(iobref);
2096

2097
    if (iobuf)
2098
        iobuf_unref(iobuf);
2099

2100
    if (frame_cleanup)
2101
        STACK_DESTROY(frame->root);
2102

2103
    return ret;
2104
}
2105

2106
static int
2107
mgmt_pmap_signin2_cbk(struct rpc_req *req, struct iovec *iov, int count,
2108
                      void *myframe)
2109
{
2110
    pmap_signin_rsp rsp = {
2111
        0,
2112
    };
2113
    glusterfs_ctx_t *ctx = NULL;
2114
    call_frame_t *frame = NULL;
2115
    int ret = 0;
2116

2117
    ctx = glusterfsd_ctx;
2118
    frame = myframe;
2119

2120
    if (-1 == req->rpc_status) {
2121
        ret = -1;
2122
        rsp.op_ret = -1;
2123
        rsp.op_errno = EINVAL;
2124
        goto out;
2125
    }
2126

2127
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
2128
    if (ret < 0) {
2129
        gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error");
2130
        rsp.op_ret = -1;
2131
        rsp.op_errno = EINVAL;
2132
        goto out;
2133
    }
2134

2135
    if (-1 == rsp.op_ret) {
2136
        gf_log(frame->this->name, GF_LOG_ERROR,
2137
               "failed to register the port with glusterd");
2138
        ret = -1;
2139
        goto out;
2140
    }
2141

2142
    ret = 0;
2143
out:
2144
    if (need_emancipate)
2145
        emancipate(ctx, ret);
2146

2147
    STACK_DESTROY(frame->root);
2148
    return 0;
2149
}
2150

2151
static int
2152
mgmt_pmap_signin_cbk(struct rpc_req *req, struct iovec *iov, int count,
2153
                     void *myframe)
2154
{
2155
    pmap_signin_rsp rsp = {
2156
        0,
2157
    };
2158
    call_frame_t *frame = NULL;
2159
    int ret = 0;
2160
    int emancipate_ret = -1;
2161
    pmap_signin_req pmap_req = {
2162
        0,
2163
    };
2164
    cmd_args_t *cmd_args = NULL;
2165
    glusterfs_ctx_t *ctx = NULL;
2166
    char brick_name[PATH_MAX] = {
2167
        0,
2168
    };
2169
    gf_boolean_t frame_cleanup = _gf_true;
2170

2171
    frame = myframe;
2172
    ctx = glusterfsd_ctx;
2173
    cmd_args = &ctx->cmd_args;
2174

2175
    if (-1 == req->rpc_status) {
2176
        ret = -1;
2177
        rsp.op_ret = -1;
2178
        rsp.op_errno = EINVAL;
2179
        goto out;
2180
    }
2181

2182
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_pmap_signin_rsp);
2183
    if (ret < 0) {
2184
        gf_log(frame->this->name, GF_LOG_ERROR, "XDR decode error");
2185
        rsp.op_ret = -1;
2186
        rsp.op_errno = EINVAL;
2187
        goto out;
2188
    }
2189

2190
    if (-1 == rsp.op_ret) {
2191
        gf_log(frame->this->name, GF_LOG_ERROR,
2192
               "failed to register the port with glusterd");
2193
        ret = -1;
2194
        goto out;
2195
    }
2196

2197
    if (!cmd_args->brick_port2) {
2198
        /* We are done with signin process */
2199
        emancipate_ret = 0;
2200
        goto out;
2201
    }
2202

2203
    snprintf(brick_name, PATH_MAX, "%s.rdma", cmd_args->brick_name);
2204
    pmap_req.port = cmd_args->brick_port2;
2205
    pmap_req.brick = brick_name;
2206

2207
    ret = mgmt_submit_request(&pmap_req, frame, ctx, &clnt_pmap_prog,
2208
                              GF_PMAP_SIGNIN, mgmt_pmap_signin2_cbk,
2209
                              (xdrproc_t)xdr_pmap_signin_req);
2210
    frame_cleanup = _gf_false;
2211
    if (ret)
2212
        goto out;
2213

2214
    return 0;
2215

2216
out:
2217
    if (need_emancipate && (ret < 0 || !cmd_args->brick_port2))
2218
        emancipate(ctx, emancipate_ret);
2219

2220
    if (frame_cleanup)
2221
        STACK_DESTROY(frame->root);
2222

2223
    return 0;
2224
}
2225

2226
static int
2227
glusterfs_mgmt_pmap_signin(glusterfs_ctx_t *ctx)
2228
{
2229
    call_frame_t *frame = NULL;
2230
    xlator_list_t **trav_p;
2231
    xlator_t *top;
2232
    pmap_signin_req req = {
2233
        0,
2234
    };
2235
    int ret = -1;
2236
    int emancipate_ret = -1;
2237
    cmd_args_t *cmd_args = NULL;
2238

2239
    cmd_args = &ctx->cmd_args;
2240

2241
    if (!cmd_args->brick_port || !cmd_args->brick_name) {
2242
        gf_log("fsd-mgmt", GF_LOG_DEBUG,
2243
               "portmapper signin arguments not given");
2244
        emancipate_ret = 0;
2245
        goto out;
2246
    }
2247

2248
    req.port = cmd_args->brick_port;
2249
    req.pid = (int)getpid(); /* only glusterd2 consumes this */
2250

2251
    if (ctx->active) {
2252
        top = ctx->active->first;
2253
        for (trav_p = &top->children; *trav_p; trav_p = &(*trav_p)->next) {
2254
            frame = create_frame(THIS, ctx->pool);
2255
            req.brick = (*trav_p)->xlator->name;
2256
            ret = mgmt_submit_request(&req, frame, ctx, &clnt_pmap_prog,
2257
                                      GF_PMAP_SIGNIN, mgmt_pmap_signin_cbk,
2258
                                      (xdrproc_t)xdr_pmap_signin_req);
2259
            if (ret < 0) {
2260
                gf_log(THIS->name, GF_LOG_WARNING,
2261
                       "failed to send sign in request; brick = %s", req.brick);
2262
            }
2263
        }
2264
    }
2265

2266
    /* unfortunately, the caller doesn't care about the returned value */
2267

2268
out:
2269
    if (need_emancipate && ret < 0)
2270
        emancipate(ctx, emancipate_ret);
2271
    return ret;
2272
}
2273

2274
static int
2275
mgmt_getspec_cbk(struct rpc_req *req, struct iovec *iov, int count,
2276
                 void *myframe)
2277
{
2278
    gf_getspec_rsp rsp = {
2279
        0,
2280
    };
2281
    call_frame_t *frame = NULL;
2282
    glusterfs_ctx_t *ctx = NULL;
2283
    int ret = 0, locked = 0;
2284
    ssize_t size = 0;
2285
    FILE *tmpfp = NULL;
2286
    char *volfile_id = NULL;
2287
    gf_volfile_t *volfile_obj = NULL;
2288
    gf_volfile_t *volfile_tmp = NULL;
2289
    char sha256_hash[SHA256_DIGEST_LENGTH] = {
2290
        0,
2291
    };
2292
    dict_t *dict = NULL;
2293
    char *servers_list = NULL;
2294
    int tmp_fd = -1;
2295
    char template[] = "/tmp/glfs.volfile.XXXXXX";
2296

2297
    frame = myframe;
2298
    ctx = frame->this->ctx;
2299

2300
    if (-1 == req->rpc_status) {
2301
        ret = -1;
2302
        goto out;
2303
    }
2304

2305
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_getspec_rsp);
2306
    if (ret < 0) {
2307
        gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error");
2308
        ret = -1;
2309
        goto out;
2310
    }
2311

2312
    if (-1 == rsp.op_ret) {
2313
        gf_log(frame->this->name, GF_LOG_ERROR,
2314
               "failed to get the 'volume file' from server");
2315
        ret = rsp.op_errno;
2316
        goto out;
2317
    }
2318

2319
    if (!rsp.xdata.xdata_len) {
2320
        goto volfile;
2321
    }
2322

2323
    dict = dict_new();
2324
    if (!dict) {
2325
        ret = -1;
2326
        errno = ENOMEM;
2327
        goto out;
2328
    }
2329

2330
    ret = dict_unserialize(rsp.xdata.xdata_val, rsp.xdata.xdata_len, &dict);
2331
    if (ret) {
2332
        gf_log(frame->this->name, GF_LOG_ERROR,
2333
               "failed to unserialize xdata to dictionary");
2334
        goto out;
2335
    }
2336
    dict->extra_stdfree = rsp.xdata.xdata_val;
2337

2338
    ret = dict_get_str(dict, "servers-list", &servers_list);
2339
    if (ret) {
2340
        /* Server list is set by glusterd at the time of getspec */
2341
        ret = dict_get_str(dict, GLUSTERD_BRICK_SERVERS, &servers_list);
2342
        if (ret)
2343
            goto volfile;
2344
    }
2345

2346
    gf_log(frame->this->name, GF_LOG_INFO,
2347
           "Received list of available volfile servers: %s", servers_list);
2348

2349
    ret = gf_process_getspec_servers_list(&ctx->cmd_args, servers_list);
2350
    if (ret) {
2351
        gf_log(frame->this->name, GF_LOG_ERROR,
2352
               "Failed (%s) to process servers list: %s", strerror(errno),
2353
               servers_list);
2354
    }
2355

2356
volfile:
2357
    size = rsp.op_ret;
2358
    volfile_id = frame->local;
2359
    if (mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
2360
        ret = mgmt_process_volfile((const char *)rsp.spec, size, volfile_id,
2361
                                   dict);
2362
        goto post_graph_mgmt;
2363
    }
2364

2365
    ret = 0;
2366
    glusterfs_compute_sha256((const unsigned char *)rsp.spec, size,
2367
                             sha256_hash);
2368

2369
    LOCK(&ctx->volfile_lock);
2370
    {
2371
        locked = 1;
2372

2373
        list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
2374
        {
2375
            if (!strcmp(volfile_id, volfile_obj->vol_id)) {
2376
                if (!memcmp(sha256_hash, volfile_obj->volfile_checksum,
2377
                            sizeof(volfile_obj->volfile_checksum))) {
2378
                    UNLOCK(&ctx->volfile_lock);
2379
                    gf_log(frame->this->name, GF_LOG_INFO,
2380
                           "No change in volfile,"
2381
                           "continuing");
2382
                    goto post_unlock;
2383
                }
2384
                volfile_tmp = volfile_obj;
2385
                break;
2386
            }
2387
        }
2388

2389
        /* coverity[secure_temp] mkstemp uses 0600 as the mode */
2390
        tmp_fd = mkstemp(template);
2391
        if (-1 == tmp_fd) {
2392
            UNLOCK(&ctx->volfile_lock);
2393
            gf_smsg(frame->this->name, GF_LOG_ERROR, 0, glusterfsd_msg_39,
2394
                    "create template=%s", template, NULL);
2395
            ret = -1;
2396
            goto post_unlock;
2397
        }
2398

2399
        /* Calling unlink so that when the file is closed or program
2400
         * terminates the temporary file is deleted.
2401
         */
2402
        ret = sys_unlink(template);
2403
        if (ret < 0) {
2404
            gf_smsg(frame->this->name, GF_LOG_INFO, 0, glusterfsd_msg_39,
2405
                    "delete template=%s", template, NULL);
2406
            ret = 0;
2407
        }
2408

2409
        tmpfp = fdopen(tmp_fd, "w+b");
2410
        if (!tmpfp) {
2411
            ret = -1;
2412
            goto out;
2413
        }
2414

2415
        fwrite(rsp.spec, size, 1, tmpfp);
2416
        fflush(tmpfp);
2417
        if (ferror(tmpfp)) {
2418
            ret = -1;
2419
            goto out;
2420
        }
2421

2422
        /*  Check if only options have changed. No need to reload the
2423
         *  volfile if topology hasn't changed.
2424
         *  glusterfs_volfile_reconfigure returns 3 possible return states
2425
         *  return 0          =======> reconfiguration of options has succeeded
2426
         *  return 1          =======> the graph has to be reconstructed and all
2427
         * the xlators should be inited return -1(or -ve) =======> Some Internal
2428
         * Error occurred during the operation
2429
         */
2430

2431
        ret = glusterfs_volfile_reconfigure(tmpfp, ctx);
2432
        if (ret == 0) {
2433
            gf_log("glusterfsd-mgmt", GF_LOG_DEBUG,
2434
                   "No need to re-load volfile, reconfigure done");
2435
            if (!volfile_tmp) {
2436
                ret = -1;
2437
                UNLOCK(&ctx->volfile_lock);
2438
                gf_log("mgmt", GF_LOG_ERROR,
2439
                       "Graph reconfigure succeeded with out having "
2440
                       "checksum.");
2441
                goto post_unlock;
2442
            }
2443
            memcpy(volfile_tmp->volfile_checksum, sha256_hash,
2444
                   sizeof(volfile_tmp->volfile_checksum));
2445
            goto out;
2446
        }
2447

2448
        if (ret < 0) {
2449
            UNLOCK(&ctx->volfile_lock);
2450
            gf_log("glusterfsd-mgmt", GF_LOG_DEBUG, "Reconfigure failed !!");
2451
            goto post_unlock;
2452
        }
2453

2454
        ret = glusterfs_process_volfp(ctx, tmpfp);
2455
        /* tmpfp closed */
2456
        tmpfp = NULL;
2457
        tmp_fd = -1;
2458
        if (ret)
2459
            goto out;
2460

2461
        if (!volfile_tmp) {
2462
            volfile_tmp = GF_CALLOC(1, sizeof(gf_volfile_t),
2463
                                    gf_common_volfile_t);
2464
            if (!volfile_tmp) {
2465
                ret = -1;
2466
                goto out;
2467
            }
2468

2469
            INIT_LIST_HEAD(&volfile_tmp->volfile_list);
2470
            volfile_tmp->graph = ctx->active;
2471
            list_add(&volfile_tmp->volfile_list, &ctx->volfile_list);
2472
            snprintf(volfile_tmp->vol_id, sizeof(volfile_tmp->vol_id), "%s",
2473
                     volfile_id);
2474
        }
2475
        memcpy(volfile_tmp->volfile_checksum, sha256_hash,
2476
               sizeof(volfile_tmp->volfile_checksum));
2477
    }
2478
    UNLOCK(&ctx->volfile_lock);
2479

2480
    locked = 0;
2481

2482
post_graph_mgmt:
2483
    if (!is_mgmt_rpc_reconnect) {
2484
        need_emancipate = _gf_true;
2485
        glusterfs_mgmt_pmap_signin(ctx);
2486
        is_mgmt_rpc_reconnect = _gf_true;
2487
    }
2488

2489
out:
2490

2491
    if (locked)
2492
        UNLOCK(&ctx->volfile_lock);
2493
post_unlock:
2494
    GF_FREE(frame->local);
2495
    frame->local = NULL;
2496
    STACK_DESTROY(frame->root);
2497
    free(rsp.spec);
2498

2499
    if (dict)
2500
        dict_unref(dict);
2501

2502
    // Stop if server is running at an unsupported op-version
2503
    if (ENOTSUP == ret) {
2504
        gf_log("mgmt", GF_LOG_ERROR,
2505
               "Server is operating at an "
2506
               "op-version which is not supported");
2507
        cleanup_and_exit(0);
2508
    }
2509

2510
    if (ret && ctx && !ctx->active) {
2511
        /* Do it only for the first time */
2512
        /* Failed to get the volume file, something wrong,
2513
           restart the process */
2514
        gf_log("mgmt", GF_LOG_ERROR, "failed to fetch volume file (key:%s)",
2515
               ctx->cmd_args.volfile_id);
2516
        emancipate(ctx, ret);
2517
        cleanup_and_exit(0);
2518
    }
2519

2520
    if (tmpfp)
2521
        fclose(tmpfp);
2522
    else if (tmp_fd != -1)
2523
        sys_close(tmp_fd);
2524

2525
    return 0;
2526
}
2527

2528
static int
2529
glusterfs_volfile_fetch_one(glusterfs_ctx_t *ctx, char *volfile_id)
2530
{
2531
    cmd_args_t *cmd_args = NULL;
2532
    gf_getspec_req req = {
2533
        0,
2534
    };
2535
    int ret = 0;
2536
    call_frame_t *frame = NULL;
2537
    dict_t *dict = NULL;
2538

2539
    cmd_args = &ctx->cmd_args;
2540
    if (!volfile_id) {
2541
        volfile_id = ctx->cmd_args.volfile_id;
2542
        if (!volfile_id) {
2543
            gf_log(THIS->name, GF_LOG_ERROR,
2544
                   "No volfile-id provided, erroring out");
2545
            return -1;
2546
        }
2547
    }
2548

2549
    frame = create_frame(THIS, ctx->pool);
2550
    if (!frame) {
2551
        ret = -1;
2552
        goto out;
2553
    }
2554

2555
    req.key = volfile_id;
2556
    req.flags = 0;
2557
    /*
2558
     * We are only storing one variable in local, hence using the same
2559
     * variable. If multiple local variable is required, create a struct.
2560
     */
2561
    frame->local = gf_strdup(volfile_id);
2562
    if (!frame->local) {
2563
        ret = -1;
2564
        goto out;
2565
    }
2566

2567
    dict = dict_new();
2568
    if (!dict) {
2569
        ret = -1;
2570
        goto out;
2571
    }
2572

2573
    // Set the supported min and max op-versions, so glusterd can make a
2574
    // decision
2575
    ret = dict_set_int32(dict, "min-op-version", GD_OP_VERSION_MIN);
2576
    if (ret) {
2577
        gf_log(THIS->name, GF_LOG_ERROR,
2578
               "Failed to set min-op-version"
2579
               " in request dict");
2580
        goto out;
2581
    }
2582

2583
    ret = dict_set_int32(dict, "max-op-version", GD_OP_VERSION_MAX);
2584
    if (ret) {
2585
        gf_log(THIS->name, GF_LOG_ERROR,
2586
               "Failed to set max-op-version"
2587
               " in request dict");
2588
        goto out;
2589
    }
2590

2591
    /* Ask for a list of volfile (glusterd2 only) servers */
2592
    if (GF_CLIENT_PROCESS == ctx->process_mode) {
2593
        req.flags = req.flags | GF_GETSPEC_FLAG_SERVERS_LIST;
2594
    }
2595

2596
    if (cmd_args->brick_name) {
2597
        ret = dict_set_dynstr_with_alloc(dict, "brick_name",
2598
                                         cmd_args->brick_name);
2599
        if (ret) {
2600
            gf_log(THIS->name, GF_LOG_ERROR,
2601
                   "Failed to set brick_name in request dict");
2602
            goto out;
2603
        }
2604
    }
2605

2606
    ret = dict_allocate_and_serialize(dict, &req.xdata.xdata_val,
2607
                                      &req.xdata.xdata_len);
2608
    if (ret < 0) {
2609
        gf_log(THIS->name, GF_LOG_ERROR, "Failed to serialize dictionary");
2610
        goto out;
2611
    }
2612

2613
    ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog,
2614
                              GF_HNDSK_GETSPEC, mgmt_getspec_cbk,
2615
                              (xdrproc_t)xdr_gf_getspec_req);
2616

2617
    /*  In case of error the frame will be destroy by rpc_clnt_submit */
2618
    frame = NULL;
2619
out:
2620
    GF_FREE(req.xdata.xdata_val);
2621
    if (dict)
2622
        dict_unref(dict);
2623
    if (ret && frame) {
2624
        /* Free the frame->local fast, because we have not used memget
2625
         */
2626
        GF_FREE(frame->local);
2627
        frame->local = NULL;
2628
        STACK_DESTROY(frame->root);
2629
    }
2630

2631
    return ret;
2632
}
2633

2634
int
2635
glusterfs_volfile_fetch(glusterfs_ctx_t *ctx)
2636
{
2637
    xlator_t *server_xl = NULL;
2638
    xlator_list_t *trav;
2639
    gf_volfile_t *volfile_obj = NULL;
2640
    int ret = 0;
2641

2642
    LOCK(&ctx->volfile_lock);
2643
    {
2644
        if (ctx->active &&
2645
            mgmt_is_multiplexed_daemon(ctx->cmd_args.process_name)) {
2646
            list_for_each_entry(volfile_obj, &ctx->volfile_list, volfile_list)
2647
            {
2648
                ret |= glusterfs_volfile_fetch_one(ctx, volfile_obj->vol_id);
2649
            }
2650
            UNLOCK(&ctx->volfile_lock);
2651
            return ret;
2652
        }
2653

2654
        if (ctx->active) {
2655
            server_xl = ctx->active->first;
2656
            if (strcmp(server_xl->type, "protocol/server") != 0) {
2657
                server_xl = NULL;
2658
            }
2659
        }
2660
        if (!server_xl) {
2661
            /* Startup (ctx->active not set) or non-server. */
2662
            UNLOCK(&ctx->volfile_lock);
2663
            return glusterfs_volfile_fetch_one(ctx, ctx->cmd_args.volfile_id);
2664
        }
2665

2666
        ret = 0;
2667
        for (trav = server_xl->children; trav; trav = trav->next) {
2668
            ret |= glusterfs_volfile_fetch_one(ctx, trav->xlator->volfile_id);
2669
        }
2670
    }
2671
    UNLOCK(&ctx->volfile_lock);
2672
    return ret;
2673
}
2674

2675
static int32_t
2676
glusterfs_rebalance_event_notify_cbk(struct rpc_req *req, struct iovec *iov,
2677
                                     int count, void *myframe)
2678
{
2679
    gf_event_notify_rsp rsp = {
2680
        0,
2681
    };
2682
    call_frame_t *frame = NULL;
2683
    int ret = 0;
2684

2685
    frame = myframe;
2686

2687
    if (-1 == req->rpc_status) {
2688
        gf_log(frame->this->name, GF_LOG_ERROR,
2689
               "failed to get the rsp from server");
2690
        ret = -1;
2691
        goto out;
2692
    }
2693

2694
    ret = xdr_to_generic(*iov, &rsp, (xdrproc_t)xdr_gf_event_notify_rsp);
2695
    if (ret < 0) {
2696
        gf_log(frame->this->name, GF_LOG_ERROR, "XDR decoding error");
2697
        ret = -1;
2698
        goto out;
2699
    }
2700

2701
    if (-1 == rsp.op_ret) {
2702
        gf_log(frame->this->name, GF_LOG_ERROR,
2703
               "Received error (%s) from server", strerror(rsp.op_errno));
2704
        ret = -1;
2705
        goto out;
2706
    }
2707
out:
2708
    free(rsp.dict.dict_val);  // malloced by xdr
2709

2710
    if (frame) {
2711
        STACK_DESTROY(frame->root);
2712
    }
2713

2714
    return ret;
2715
}
2716

2717
static int32_t
2718
glusterfs_rebalance_event_notify(dict_t *dict)
2719
{
2720
    glusterfs_ctx_t *ctx = NULL;
2721
    gf_event_notify_req req = {
2722
        0,
2723
    };
2724
    int32_t ret = -1;
2725
    cmd_args_t *cmd_args = NULL;
2726
    call_frame_t *frame = NULL;
2727

2728
    ctx = glusterfsd_ctx;
2729
    cmd_args = &ctx->cmd_args;
2730

2731
    frame = create_frame(THIS, ctx->pool);
2732

2733
    req.op = GF_EN_DEFRAG_STATUS;
2734

2735
    if (dict) {
2736
        ret = dict_set_str(dict, "volname", cmd_args->volfile_id);
2737
        if (ret) {
2738
            gf_log("", GF_LOG_ERROR, "failed to set volname");
2739
        }
2740
        ret = dict_allocate_and_serialize(dict, &req.dict.dict_val,
2741
                                          &req.dict.dict_len);
2742
        if (ret) {
2743
            gf_log("", GF_LOG_ERROR, "failed to serialize dict");
2744
        }
2745
    }
2746

2747
    ret = mgmt_submit_request(&req, frame, ctx, &clnt_handshake_prog,
2748
                              GF_HNDSK_EVENT_NOTIFY,
2749
                              glusterfs_rebalance_event_notify_cbk,
2750
                              (xdrproc_t)xdr_gf_event_notify_req);
2751

2752
    GF_FREE(req.dict.dict_val);
2753
    return ret;
2754
}
2755

2756
static int
2757
mgmt_rpc_notify(struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,
2758
                void *data)
2759
{
2760
    xlator_t *this = NULL;
2761
    glusterfs_ctx_t *ctx = NULL;
2762
    int ret = 0;
2763
    server_cmdline_t *server = NULL;
2764
    rpc_transport_t *rpc_trans = NULL;
2765
    int need_term = 0;
2766
    int emval = 0;
2767
    static int log_ctr1;
2768
    static int log_ctr2;
2769
    struct dnscache6 *dnscache = NULL;
2770

2771
    this = mydata;
2772
    rpc_trans = rpc->conn.trans;
2773
    ctx = this->ctx;
2774

2775
    switch (event) {
2776
        case RPC_CLNT_DISCONNECT:
2777
            if (rpc_trans->connect_failed) {
2778
                GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_ERROR,
2779
                                    "failed to connect to remote-"
2780
                                    "host: %s",
2781
                                    ctx->cmd_args.volfile_server);
2782
            } else {
2783
                GF_LOG_OCCASIONALLY(log_ctr1, "glusterfsd-mgmt", GF_LOG_INFO,
2784
                                    "disconnected from remote-"
2785
                                    "host: %s",
2786
                                    ctx->cmd_args.volfile_server);
2787
            }
2788

2789
            if (!rpc->disabled) {
2790
                /*
2791
                 * Check if dnscache is exhausted for current server
2792
                 * and continue until cache is exhausted
2793
                 */
2794
                dnscache = rpc_trans->dnscache;
2795
                if (dnscache && dnscache->next) {
2796
                    break;
2797
                }
2798
            }
2799
            server = ctx->cmd_args.curr_server;
2800

2801
            if (ctx->cmd_args.brick_port && ctx->cmd_args.brick_name) {
2802
                /* This process requires a portmap signin with glusterd.
2803
                 * Currently the glusterd portmaps are local to each glusterd.
2804
                 * Hence connecting the process to a different volfile server
2805
                 * won't work well with such process, so don't try to connect
2806
                 * to backup volfile server here.
2807
                 */
2808
                if (!ctx->active) {
2809
                    need_term = 1;
2810
                }
2811
                emval = ENOTCONN;
2812
                GF_LOG_OCCASIONALLY(log_ctr2, "glusterfsd-mgmt", GF_LOG_INFO,
2813
                                    "Port-mapper is active, Giving up on the "
2814
                                    "backup volfile servers");
2815
                break;
2816
            }
2817
            if (server->list.next == &ctx->cmd_args.volfile_servers) {
2818
                if (!ctx->active) {
2819
                    need_term = 1;
2820
                    gf_log("glusterfsd-mgmt", GF_LOG_INFO,
2821
                           "Exhausted all volfile servers, Exiting");
2822
                    emval = ENOTCONN;
2823
                    break;
2824
                } else {
2825
                    server = list_first_entry(&ctx->cmd_args.volfile_servers,
2826
                                              typeof(*server), list);
2827
                    emval = ENOTCONN;
2828
                    GF_LOG_OCCASIONALLY(
2829
                        log_ctr2, "glusterfsd-mgmt", GF_LOG_INFO,
2830
                        "Exhausted all volfile servers, Retrying from again!");
2831
                }
2832
            } else {
2833
                server = list_entry(server->list.next, typeof(*server), list);
2834
            }
2835
            ctx->cmd_args.curr_server = server;
2836
            ctx->cmd_args.volfile_server = server->volfile_server;
2837

2838
            ret = dict_set_str(rpc_trans->options, "remote-host",
2839
                               server->volfile_server);
2840
            if (ret != 0) {
2841
                gf_log("glusterfsd-mgmt", GF_LOG_ERROR,
2842
                       "failed to set remote-host: %s", server->volfile_server);
2843
                if (!ctx->active) {
2844
                    need_term = 1;
2845
                }
2846
                emval = ENOTCONN;
2847
                break;
2848
            }
2849
            gf_log("glusterfsd-mgmt", GF_LOG_INFO,
2850
                   "connecting to next volfile server %s",
2851
                   server->volfile_server);
2852
            break;
2853
        case RPC_CLNT_CONNECT:
2854
            ret = glusterfs_volfile_fetch(ctx);
2855
            if (ret) {
2856
                emval = ret;
2857
                if (!ctx->active) {
2858
                    need_term = 1;
2859
                    gf_log("glusterfsd-mgmt", GF_LOG_ERROR,
2860
                           "failed to fetch volume file (key:%s)",
2861
                           ctx->cmd_args.volfile_id);
2862
                    break;
2863
                }
2864
            }
2865

2866
            if (is_mgmt_rpc_reconnect)
2867
                glusterfs_mgmt_pmap_signin(ctx);
2868

2869
            break;
2870
        default:
2871
            break;
2872
    }
2873

2874
    if (need_term) {
2875
        emancipate(ctx, emval);
2876
        cleanup_and_exit(1);
2877
    }
2878

2879
    return 0;
2880
}
2881

2882
static int
2883
glusterfs_rpcsvc_notify(rpcsvc_t *rpc, void *xl, rpcsvc_event_t event,
2884
                        void *data)
2885
{
2886
    return 0;
2887
}
2888

2889
int
2890
glusterfs_listener_init(glusterfs_ctx_t *ctx)
2891
{
2892
    cmd_args_t *cmd_args = NULL;
2893
    rpcsvc_t *rpc = NULL;
2894
    dict_t *options = NULL;
2895
    int ret = -1;
2896

2897
    cmd_args = &ctx->cmd_args;
2898

2899
    if (ctx->listener)
2900
        return 0;
2901

2902
    if (!cmd_args->sock_file)
2903
        return 0;
2904

2905
    options = dict_new();
2906
    if (!options)
2907
        goto out;
2908

2909
    ret = rpcsvc_transport_unix_options_build(options, cmd_args->sock_file);
2910
    if (ret)
2911
        goto out;
2912

2913
    rpc = rpcsvc_init(THIS, ctx, options, 8);
2914
    if (rpc == NULL) {
2915
        goto out;
2916
    }
2917

2918
    ret = rpcsvc_register_notify(rpc, glusterfs_rpcsvc_notify, THIS);
2919
    if (ret) {
2920
        goto out;
2921
    }
2922

2923
    ret = rpcsvc_create_listeners(rpc, options, "glusterfsd");
2924
    if (ret < 1) {
2925
        goto out;
2926
    }
2927

2928
    ret = rpcsvc_program_register(rpc, &glusterfs_mop_prog, _gf_false);
2929
    if (ret) {
2930
        goto out;
2931
    }
2932

2933
    ctx->listener = rpc;
2934

2935
out:
2936
    if (options)
2937
        dict_unref(options);
2938
    return ret;
2939
}
2940

2941
int
2942
glusterfs_mgmt_notify(int32_t op, void *data, ...)
2943
{
2944
    int ret = 0;
2945
    switch (op) {
2946
        case GF_EN_DEFRAG_STATUS:
2947
            ret = glusterfs_rebalance_event_notify((dict_t *)data);
2948
            break;
2949

2950
        default:
2951
            gf_log("", GF_LOG_ERROR, "Invalid op");
2952
            break;
2953
    }
2954

2955
    return ret;
2956
}
2957

2958
int
2959
glusterfs_mgmt_init(glusterfs_ctx_t *ctx)
2960
{
2961
    cmd_args_t *cmd_args = NULL;
2962
    struct rpc_clnt *rpc = NULL;
2963
    dict_t *options = NULL;
2964
    int ret = -1;
2965
    int port = GF_DEFAULT_BASE_PORT;
2966
    char *host = NULL;
2967
    xlator_cmdline_option_t *opt = NULL;
2968

2969
    cmd_args = &ctx->cmd_args;
2970
    GF_VALIDATE_OR_GOTO(THIS->name, cmd_args->volfile_server, out);
2971

2972
    if (ctx->mgmt)
2973
        return 0;
2974

2975
    options = dict_new();
2976
    if (!options)
2977
        goto out;
2978

2979
    if (cmd_args->volfile_server_port)
2980
        port = cmd_args->volfile_server_port;
2981

2982
    host = cmd_args->volfile_server;
2983

2984
    if (cmd_args->volfile_server_transport &&
2985
        !strcmp(cmd_args->volfile_server_transport, "unix")) {
2986
        ret = rpc_transport_unix_options_build(options, host, 0);
2987
    } else {
2988
        opt = find_xlator_option_in_cmd_args_t("address-family", cmd_args);
2989
        ret = rpc_transport_inet_options_build(options, host, port,
2990
                                               (opt ? opt->value : NULL));
2991
    }
2992
    if (ret)
2993
        goto out;
2994

2995
    /* Explicitly turn on encrypted transport. */
2996
    if (ctx->secure_mgmt) {
2997
        ret = dict_set_dynstr_with_alloc(options,
2998
                                         "transport.socket.ssl-enabled", "yes");
2999
        if (ret) {
3000
            gf_log(THIS->name, GF_LOG_ERROR,
3001
                   "failed to set 'transport.socket.ssl-enabled' "
3002
                   "in options dict");
3003
            goto out;
3004
        }
3005

3006
        ctx->ssl_cert_depth = glusterfs_read_secure_access_file();
3007
    }
3008

3009
    rpc = rpc_clnt_new(options, THIS, THIS->name, 8);
3010
    if (!rpc) {
3011
        ret = -1;
3012
        gf_log(THIS->name, GF_LOG_WARNING, "failed to create rpc clnt");
3013
        goto out;
3014
    }
3015

3016
    ret = rpc_clnt_register_notify(rpc, mgmt_rpc_notify, THIS);
3017
    if (ret) {
3018
        gf_log(THIS->name, GF_LOG_WARNING,
3019
               "failed to register notify function");
3020
        goto out;
3021
    }
3022

3023
    ret = rpcclnt_cbk_program_register(rpc, &mgmt_cbk_prog, THIS);
3024
    if (ret) {
3025
        gf_log(THIS->name, GF_LOG_WARNING,
3026
               "failed to register callback function");
3027
        goto out;
3028
    }
3029

3030
    ctx->notify = glusterfs_mgmt_notify;
3031

3032
    /* This value should be set before doing the 'rpc_clnt_start()' as
3033
       the notify function uses this variable */
3034
    ctx->mgmt = rpc;
3035

3036
    ret = rpc_clnt_start(rpc);
3037
out:
3038
    if (options)
3039
        dict_unref(options);
3040
    return ret;
3041
}
3042

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.