glusterfs

Форк
0
/
glusterd-mgmt-handler.c 
1100 строк · 31.3 Кб
1
/*
2
   Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
3
   This file is part of GlusterFS.
4

5
   This file is licensed to you under your choice of the GNU Lesser
6
   General Public License, version 3 or any later version (LGPLv3 or
7
   later), or the GNU General Public License, version 2 (GPLv2), in all
8
   cases as published by the Free Software Foundation.
9
*/
10

11
#include "glusterd-utils.h"
12
#include "glusterd-locks.h"
13
#include "glusterd-mgmt.h"
14
#include "glusterd-op-sm.h"
15
#include "glusterd-messages.h"
16

17
static int
18
glusterd_mgmt_v3_null(rpcsvc_request_t *req)
19
{
20
    return 0;
21
}
22

23
static int
24
glusterd_mgmt_v3_lock_send_resp(rpcsvc_request_t *req, int32_t status,
25
                                uint32_t op_errno)
26
{
27
    gd1_mgmt_v3_lock_rsp rsp = {
28
        {0},
29
    };
30
    int ret = -1;
31

32
    GF_ASSERT(req);
33

34
    rsp.op_ret = status;
35
    if (rsp.op_ret)
36
        rsp.op_errno = op_errno;
37

38
    glusterd_get_uuid(&rsp.uuid);
39

40
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
41
                                (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
42

43
    gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 lock, ret: %d", ret);
44

45
    return ret;
46
}
47

48
static int
49
glusterd_synctasked_mgmt_v3_lock(rpcsvc_request_t *req,
50
                                 gd1_mgmt_v3_lock_req *lock_req,
51
                                 glusterd_op_lock_ctx_t *ctx)
52
{
53
    int32_t ret = -1;
54
    xlator_t *this = THIS;
55
    uint32_t op_errno = 0;
56

57
    GF_ASSERT(req);
58
    GF_ASSERT(ctx);
59
    GF_ASSERT(ctx->dict);
60

61
    /* Trying to acquire multiple mgmt_v3 locks */
62
    ret = glusterd_multiple_mgmt_v3_lock(ctx->dict, ctx->uuid, &op_errno);
63
    if (ret)
64
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
65
               "Failed to acquire mgmt_v3 locks for %s", uuid_utoa(ctx->uuid));
66

67
    ret = glusterd_mgmt_v3_lock_send_resp(req, ret, op_errno);
68

69
    gf_msg_trace(this->name, 0, "Returning %d", ret);
70
    return ret;
71
}
72

73
static int
74
glusterd_op_state_machine_mgmt_v3_lock(rpcsvc_request_t *req,
75
                                       gd1_mgmt_v3_lock_req *lock_req,
76
                                       glusterd_op_lock_ctx_t *ctx)
77
{
78
    int32_t ret = -1;
79
    xlator_t *this = THIS;
80
    glusterd_op_info_t txn_op_info = {
81
        GD_OP_STATE_DEFAULT,
82
    };
83

84
    GF_ASSERT(req);
85

86
    glusterd_txn_opinfo_init(&txn_op_info, 0, &lock_req->op, ctx->dict, req);
87

88
    ret = glusterd_set_txn_opinfo(&lock_req->txn_id, &txn_op_info);
89
    if (ret) {
90
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OPINFO_SET_FAIL,
91
               "Unable to set transaction's opinfo");
92
        goto out;
93
    }
94

95
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_LOCK, &lock_req->txn_id, ctx);
96
    if (ret)
97
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_EVENT_LOCK_FAIL,
98
               "Failed to inject event GD_OP_EVENT_LOCK");
99

100
out:
101
    glusterd_friend_sm();
102
    glusterd_op_sm();
103

104
    gf_msg_trace(this->name, 0, "Returning %d", ret);
105
    return ret;
106
}
107

108
static int
109
glusterd_handle_mgmt_v3_lock_fn(rpcsvc_request_t *req)
110
{
111
    gd1_mgmt_v3_lock_req lock_req = {
112
        {0},
113
    };
114
    int32_t ret = -1;
115
    glusterd_op_lock_ctx_t *ctx = NULL;
116
    xlator_t *this = THIS;
117
    gf_boolean_t is_synctasked = _gf_false;
118
    gf_boolean_t free_ctx = _gf_false;
119
    glusterd_conf_t *conf = NULL;
120
    time_t timeout = 0;
121

122
    conf = this->private;
123
    GF_ASSERT(conf);
124
    GF_ASSERT(req);
125

126
    ret = xdr_to_generic(req->msg[0], &lock_req,
127
                         (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
128
    if (ret < 0) {
129
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
130
               "Failed to decode lock "
131
               "request received from peer");
132
        req->rpc_err = GARBAGE_ARGS;
133
        goto out;
134
    }
135

136
    gf_msg_debug(this->name, 0,
137
                 "Received mgmt_v3 lock req "
138
                 "from uuid: %s",
139
                 uuid_utoa(lock_req.uuid));
140

141
    if (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL) {
142
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
143
               "%s doesn't "
144
               "belong to the cluster. Ignoring request.",
145
               uuid_utoa(lock_req.uuid));
146
        ret = -1;
147
        goto out;
148
    }
149

150
    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
151
    if (!ctx) {
152
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
153
        ret = -1;
154
        goto out;
155
    }
156

157
    gf_uuid_copy(ctx->uuid, lock_req.uuid);
158
    ctx->req = req;
159

160
    ctx->dict = dict_new();
161
    if (!ctx->dict) {
162
        gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
163
        ret = -1;
164
        goto out;
165
    }
166

167
    ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len,
168
                           &ctx->dict);
169
    if (ret) {
170
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
171
                NULL);
172
        goto out;
173
    }
174

175
    /* Cli will add timeout key to dict if the default timeout is
176
     * other than 2 minutes. Here we use this value to check whether
177
     * mgmt_v3_lock_timeout should be set to default value or we
178
     * need to change the value according to timeout value
179
     * i.e, timeout + 120 seconds. */
180
    ret = dict_get_time(ctx->dict, "timeout", &timeout);
181
    if (!ret)
182
        conf->mgmt_v3_lock_timeout = timeout + 120;
183

184
    is_synctasked = dict_get_str_boolean(ctx->dict, "is_synctasked", _gf_false);
185
    if (is_synctasked) {
186
        ret = glusterd_synctasked_mgmt_v3_lock(req, &lock_req, ctx);
187
        if (ret) {
188
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
189
                   "Failed to acquire mgmt_v3_locks");
190
            /* Ignore the return code, as it shouldn't be propagated
191
             * from the handler function so as to avoid double
192
             * deletion of the req
193
             */
194
            ret = 0;
195
        }
196

197
        /* The above function does not take ownership of ctx.
198
         * Therefore we need to free the ctx explicitly. */
199
        free_ctx = _gf_true;
200
    } else {
201
        /* Shouldn't ignore the return code here, and it should
202
         * be propagated from the handler function as in failure
203
         * case it doesn't delete the req object
204
         */
205
        ret = glusterd_op_state_machine_mgmt_v3_lock(req, &lock_req, ctx);
206
        if (ret)
207
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_LOCK_GET_FAIL,
208
                   "Failed to acquire mgmt_v3_locks");
209
    }
210

211
out:
212

213
    if (ctx && (ret || free_ctx)) {
214
        if (ctx->dict)
215
            dict_unref(ctx->dict);
216

217
        GF_FREE(ctx);
218
    }
219

220
    free(lock_req.dict.dict_val);
221

222
    gf_msg_trace(this->name, 0, "Returning %d", ret);
223
    return ret;
224
}
225

226
static int
227
glusterd_mgmt_v3_pre_validate_send_resp(rpcsvc_request_t *req, int32_t op,
228
                                        int32_t status, char *op_errstr,
229
                                        dict_t *rsp_dict, uint32_t op_errno)
230
{
231
    gd1_mgmt_v3_pre_val_rsp rsp = {
232
        {0},
233
    };
234
    int ret = -1;
235
    xlator_t *this = THIS;
236

237
    GF_ASSERT(req);
238

239
    rsp.op_ret = status;
240
    glusterd_get_uuid(&rsp.uuid);
241
    rsp.op = op;
242
    rsp.op_errno = op_errno;
243
    if (op_errstr)
244
        rsp.op_errstr = op_errstr;
245
    else
246
        rsp.op_errstr = "";
247

248
    ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
249
                                      &rsp.dict.dict_len);
250
    if (ret < 0) {
251
        gf_smsg(this->name, GF_LOG_ERROR, 0,
252
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
253
        goto out;
254
    }
255

256
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
257
                                (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
258

259
    GF_FREE(rsp.dict.dict_val);
260
out:
261
    gf_msg_debug(this->name, 0, "Responded to pre validation, ret: %d", ret);
262
    return ret;
263
}
264

265
static int
266
glusterd_handle_pre_validate_fn(rpcsvc_request_t *req)
267
{
268
    int32_t ret = -1;
269
    gd1_mgmt_v3_pre_val_req op_req = {
270
        {0},
271
    };
272
    xlator_t *this = THIS;
273
    char *op_errstr = NULL;
274
    dict_t *dict = NULL;
275
    dict_t *rsp_dict = NULL;
276
    uint32_t op_errno = 0;
277

278
    GF_ASSERT(req);
279

280
    ret = xdr_to_generic(req->msg[0], &op_req,
281
                         (xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
282
    if (ret < 0) {
283
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
284
               "Failed to decode pre validation "
285
               "request received from peer");
286
        req->rpc_err = GARBAGE_ARGS;
287
        goto out;
288
    }
289

290
    if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
291
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
292
               "%s doesn't "
293
               "belong to the cluster. Ignoring request.",
294
               uuid_utoa(op_req.uuid));
295
        ret = -1;
296
        goto out;
297
    }
298

299
    dict = dict_new();
300
    if (!dict) {
301
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
302
        goto out;
303
    }
304

305
    ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
306
    if (ret) {
307
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
308
                NULL);
309
        goto out;
310
    }
311

312
    rsp_dict = dict_new();
313
    if (!rsp_dict) {
314
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
315
        return -1;
316
    }
317

318
    ret = gd_mgmt_v3_pre_validate_fn(op_req.op, dict, &op_errstr, rsp_dict,
319
                                     &op_errno);
320
    if (ret) {
321
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALIDATION_FAIL,
322
               "Pre Validation failed on operation %s", gd_op_list[op_req.op]);
323
    }
324

325
    ret = glusterd_mgmt_v3_pre_validate_send_resp(
326
        req, op_req.op, ret, op_errstr, rsp_dict, op_errno);
327
    if (ret) {
328
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
329
               "Failed to send Pre Validation "
330
               "response for operation %s",
331
               gd_op_list[op_req.op]);
332
        goto out;
333
    }
334

335
out:
336
    if (op_errstr && (strcmp(op_errstr, "")))
337
        GF_FREE(op_errstr);
338

339
    free(op_req.dict.dict_val);
340

341
    if (dict)
342
        dict_unref(dict);
343

344
    if (rsp_dict)
345
        dict_unref(rsp_dict);
346

347
    /* Return 0 from handler to avoid double deletion of req obj */
348
    return 0;
349
}
350

351
static int
352
glusterd_mgmt_v3_brick_op_send_resp(rpcsvc_request_t *req, int32_t op,
353
                                    int32_t status, char *op_errstr,
354
                                    dict_t *rsp_dict)
355
{
356
    gd1_mgmt_v3_brick_op_rsp rsp = {
357
        {0},
358
    };
359
    int ret = -1;
360
    xlator_t *this = THIS;
361

362
    GF_ASSERT(req);
363

364
    rsp.op_ret = status;
365
    glusterd_get_uuid(&rsp.uuid);
366
    rsp.op = op;
367
    if (op_errstr)
368
        rsp.op_errstr = op_errstr;
369
    else
370
        rsp.op_errstr = "";
371

372
    ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
373
                                      &rsp.dict.dict_len);
374
    if (ret < 0) {
375
        gf_smsg(this->name, GF_LOG_ERROR, 0,
376
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
377
        goto out;
378
    }
379

380
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
381
                                (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
382

383
    GF_FREE(rsp.dict.dict_val);
384
out:
385
    gf_msg_debug(this->name, 0, "Responded to brick op, ret: %d", ret);
386
    return ret;
387
}
388

389
static int
390
glusterd_handle_brick_op_fn(rpcsvc_request_t *req)
391
{
392
    int32_t ret = -1;
393
    gd1_mgmt_v3_brick_op_req op_req = {
394
        {0},
395
    };
396
    xlator_t *this = THIS;
397
    char *op_errstr = NULL;
398
    dict_t *dict = NULL;
399
    dict_t *rsp_dict = NULL;
400

401
    GF_ASSERT(req);
402

403
    ret = xdr_to_generic(req->msg[0], &op_req,
404
                         (xdrproc_t)xdr_gd1_mgmt_v3_brick_op_req);
405
    if (ret < 0) {
406
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
407
               "Failed to decode brick op "
408
               "request received from peer");
409
        req->rpc_err = GARBAGE_ARGS;
410
        goto out;
411
    }
412

413
    if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
414
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
415
               "%s doesn't "
416
               "belong to the cluster. Ignoring request.",
417
               uuid_utoa(op_req.uuid));
418
        ret = -1;
419
        goto out;
420
    }
421

422
    dict = dict_new();
423
    if (!dict) {
424
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
425
        goto out;
426
    }
427

428
    ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
429
    if (ret) {
430
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
431
                NULL);
432
        goto out;
433
    }
434

435
    rsp_dict = dict_new();
436
    if (!rsp_dict) {
437
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
438
        return -1;
439
    }
440

441
    ret = gd_mgmt_v3_brick_op_fn(op_req.op, dict, &op_errstr, rsp_dict);
442

443
    if (ret) {
444
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_BRICK_OP_FAIL,
445
               "Brick Op failed on operation %s", gd_op_list[op_req.op]);
446
    }
447

448
    ret = glusterd_mgmt_v3_brick_op_send_resp(req, op_req.op, ret, op_errstr,
449
                                              rsp_dict);
450
    if (ret) {
451
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_PRE_VALD_RESP_FAIL,
452
               "Failed to send brick op "
453
               "response for operation %s",
454
               gd_op_list[op_req.op]);
455
        goto out;
456
    }
457

458
out:
459
    if (op_errstr && (strcmp(op_errstr, "")))
460
        GF_FREE(op_errstr);
461

462
    free(op_req.dict.dict_val);
463

464
    if (dict)
465
        dict_unref(dict);
466

467
    if (rsp_dict)
468
        dict_unref(rsp_dict);
469

470
    /* Return 0 from handler to avoid double deletion of req obj */
471
    return 0;
472
}
473

474
static int
475
glusterd_mgmt_v3_commit_send_resp(rpcsvc_request_t *req, int32_t op,
476
                                  int32_t status, char *op_errstr,
477
                                  uint32_t op_errno, dict_t *rsp_dict)
478
{
479
    gd1_mgmt_v3_commit_rsp rsp = {
480
        {0},
481
    };
482
    int ret = -1;
483
    xlator_t *this = THIS;
484

485
    GF_ASSERT(req);
486

487
    rsp.op_ret = status;
488
    glusterd_get_uuid(&rsp.uuid);
489
    rsp.op = op;
490
    rsp.op_errno = op_errno;
491
    if (op_errstr)
492
        rsp.op_errstr = op_errstr;
493
    else
494
        rsp.op_errstr = "";
495

496
    ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
497
                                      &rsp.dict.dict_len);
498
    if (ret < 0) {
499
        gf_smsg(this->name, GF_LOG_ERROR, 0,
500
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
501
        goto out;
502
    }
503

504
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
505
                                (xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
506

507
    GF_FREE(rsp.dict.dict_val);
508
out:
509
    gf_msg_debug(this->name, 0, "Responded to commit, ret: %d", ret);
510
    return ret;
511
}
512

513
static int
514
glusterd_handle_commit_fn(rpcsvc_request_t *req)
515
{
516
    int32_t ret = -1;
517
    gd1_mgmt_v3_commit_req op_req = {
518
        {0},
519
    };
520
    xlator_t *this = THIS;
521
    char *op_errstr = NULL;
522
    dict_t *dict = NULL;
523
    dict_t *rsp_dict = NULL;
524
    uint32_t op_errno = 0;
525

526
    GF_ASSERT(req);
527

528
    ret = xdr_to_generic(req->msg[0], &op_req,
529
                         (xdrproc_t)xdr_gd1_mgmt_v3_commit_req);
530
    if (ret < 0) {
531
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
532
               "Failed to decode commit "
533
               "request received from peer");
534
        req->rpc_err = GARBAGE_ARGS;
535
        goto out;
536
    }
537

538
    if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
539
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
540
               "%s doesn't "
541
               "belong to the cluster. Ignoring request.",
542
               uuid_utoa(op_req.uuid));
543
        ret = -1;
544
        goto out;
545
    }
546

547
    dict = dict_new();
548
    if (!dict) {
549
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
550
        goto out;
551
    }
552

553
    ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
554
    if (ret) {
555
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
556
                NULL);
557
        goto out;
558
    }
559

560
    rsp_dict = dict_new();
561
    if (!rsp_dict) {
562
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
563
        return -1;
564
    }
565

566
    ret = gd_mgmt_v3_commit_fn(op_req.op, dict, &op_errstr, &op_errno,
567
                               rsp_dict);
568

569
    if (ret) {
570
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_COMMIT_OP_FAIL,
571
               "commit failed on operation %s", gd_op_list[op_req.op]);
572
    }
573

574
    ret = glusterd_mgmt_v3_commit_send_resp(req, op_req.op, ret, op_errstr,
575
                                            op_errno, rsp_dict);
576
    if (ret) {
577
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
578
               "Failed to send commit "
579
               "response for operation %s",
580
               gd_op_list[op_req.op]);
581
        goto out;
582
    }
583

584
out:
585
    if (op_errstr && (strcmp(op_errstr, "")))
586
        GF_FREE(op_errstr);
587

588
    free(op_req.dict.dict_val);
589

590
    if (dict)
591
        dict_unref(dict);
592

593
    if (rsp_dict)
594
        dict_unref(rsp_dict);
595

596
    /* Return 0 from handler to avoid double deletion of req obj */
597
    return 0;
598
}
599

600
static int
601
glusterd_mgmt_v3_post_commit_send_resp(rpcsvc_request_t *req, int32_t op,
602
                                       int32_t status, char *op_errstr,
603
                                       uint32_t op_errno, dict_t *rsp_dict)
604
{
605
    gd1_mgmt_v3_post_commit_rsp rsp = {
606
        {0},
607
    };
608
    int ret = -1;
609
    xlator_t *this = THIS;
610

611
    GF_ASSERT(req);
612

613
    rsp.op_ret = status;
614
    glusterd_get_uuid(&rsp.uuid);
615
    rsp.op = op;
616
    rsp.op_errno = op_errno;
617
    if (op_errstr)
618
        rsp.op_errstr = op_errstr;
619
    else
620
        rsp.op_errstr = "";
621

622
    ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
623
                                      &rsp.dict.dict_len);
624
    if (ret < 0) {
625
        gf_smsg(this->name, GF_LOG_ERROR, 0,
626
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
627
        goto out;
628
    }
629

630
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
631
                                (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_rsp);
632

633
    GF_FREE(rsp.dict.dict_val);
634
out:
635
    gf_msg_debug(this->name, 0, "Responded to post commit, ret: %d", ret);
636
    return ret;
637
}
638

639
static int
640
glusterd_handle_post_commit_fn(rpcsvc_request_t *req)
641
{
642
    int32_t ret = -1;
643
    gd1_mgmt_v3_post_commit_req op_req = {
644
        {0},
645
    };
646
    xlator_t *this = THIS;
647
    char *op_errstr = NULL;
648
    dict_t *dict = NULL;
649
    dict_t *rsp_dict = NULL;
650
    uint32_t op_errno = 0;
651

652
    GF_ASSERT(req);
653

654
    ret = xdr_to_generic(req->msg[0], &op_req,
655
                         (xdrproc_t)xdr_gd1_mgmt_v3_post_commit_req);
656
    if (ret < 0) {
657
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
658
               "Failed to decode post commit "
659
               "request received from peer");
660
        req->rpc_err = GARBAGE_ARGS;
661
        goto out;
662
    }
663

664
    if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
665
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
666
               "%s doesn't "
667
               "belong to the cluster. Ignoring request.",
668
               uuid_utoa(op_req.uuid));
669
        ret = -1;
670
        goto out;
671
    }
672

673
    dict = dict_new();
674
    if (!dict) {
675
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
676
        goto out;
677
    }
678

679
    ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
680
    if (ret) {
681
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
682
                NULL);
683
        goto out;
684
    }
685

686
    rsp_dict = dict_new();
687
    if (!rsp_dict) {
688
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
689
        return -1;
690
    }
691

692
    ret = gd_mgmt_v3_post_commit_fn(op_req.op, dict, &op_errstr, &op_errno,
693
                                    rsp_dict);
694

695
    if (ret) {
696
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_COMMIT_OP_FAIL,
697
               "post commit failed on operation %s", gd_op_list[op_req.op]);
698
    }
699

700
    ret = glusterd_mgmt_v3_post_commit_send_resp(req, op_req.op, ret, op_errstr,
701
                                                 op_errno, rsp_dict);
702
    if (ret) {
703
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
704
               "Failed to send post commit "
705
               "response for operation %s",
706
               gd_op_list[op_req.op]);
707
        goto out;
708
    }
709

710
out:
711
    if (op_errstr && (strcmp(op_errstr, "")))
712
        GF_FREE(op_errstr);
713

714
    free(op_req.dict.dict_val);
715

716
    if (dict)
717
        dict_unref(dict);
718

719
    if (rsp_dict)
720
        dict_unref(rsp_dict);
721

722
    /* Return 0 from handler to avoid double deletion of req obj */
723
    return 0;
724
}
725

726
static int
727
glusterd_mgmt_v3_post_validate_send_resp(rpcsvc_request_t *req, int32_t op,
728
                                         int32_t status, char *op_errstr,
729
                                         dict_t *rsp_dict)
730
{
731
    gd1_mgmt_v3_post_val_rsp rsp = {
732
        {0},
733
    };
734
    int ret = -1;
735
    xlator_t *this = THIS;
736

737
    GF_ASSERT(req);
738

739
    rsp.op_ret = status;
740
    glusterd_get_uuid(&rsp.uuid);
741
    rsp.op = op;
742
    if (op_errstr)
743
        rsp.op_errstr = op_errstr;
744
    else
745
        rsp.op_errstr = "";
746

747
    ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
748
                                      &rsp.dict.dict_len);
749
    if (ret < 0) {
750
        gf_smsg(this->name, GF_LOG_ERROR, 0,
751
                GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
752
        goto out;
753
    }
754

755
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
756
                                (xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
757

758
    GF_FREE(rsp.dict.dict_val);
759
out:
760
    gf_msg_debug(this->name, 0, "Responded to post validation, ret: %d", ret);
761
    return ret;
762
}
763

764
static int
765
glusterd_handle_post_validate_fn(rpcsvc_request_t *req)
766
{
767
    int32_t ret = -1;
768
    gd1_mgmt_v3_post_val_req op_req = {
769
        {0},
770
    };
771
    xlator_t *this = THIS;
772
    char *op_errstr = NULL;
773
    dict_t *dict = NULL;
774
    dict_t *rsp_dict = NULL;
775

776
    GF_ASSERT(req);
777

778
    ret = xdr_to_generic(req->msg[0], &op_req,
779
                         (xdrproc_t)xdr_gd1_mgmt_v3_post_val_req);
780
    if (ret < 0) {
781
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
782
               "Failed to decode post validation "
783
               "request received from peer");
784
        req->rpc_err = GARBAGE_ARGS;
785
        goto out;
786
    }
787

788
    if (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL) {
789
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
790
               "%s doesn't "
791
               "belong to the cluster. Ignoring request.",
792
               uuid_utoa(op_req.uuid));
793
        ret = -1;
794
        goto out;
795
    }
796

797
    dict = dict_new();
798
    if (!dict) {
799
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
800
        goto out;
801
    }
802

803
    ret = dict_unserialize(op_req.dict.dict_val, op_req.dict.dict_len, &dict);
804
    if (ret) {
805
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
806
                NULL);
807
        goto out;
808
    }
809

810
    rsp_dict = dict_new();
811
    if (!rsp_dict) {
812
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
813
        return -1;
814
    }
815

816
    ret = gd_mgmt_v3_post_validate_fn(op_req.op, op_req.op_ret, dict,
817
                                      &op_errstr, rsp_dict);
818

819
    if (ret) {
820
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_POST_VALIDATION_FAIL,
821
               "Post Validation failed on operation %s", gd_op_list[op_req.op]);
822
    }
823

824
    ret = glusterd_mgmt_v3_post_validate_send_resp(req, op_req.op, ret,
825
                                                   op_errstr, rsp_dict);
826
    if (ret) {
827
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_OP_RESP_FAIL,
828
               "Failed to send Post Validation "
829
               "response for operation %s",
830
               gd_op_list[op_req.op]);
831
        goto out;
832
    }
833

834
out:
835
    if (op_errstr && (strcmp(op_errstr, "")))
836
        GF_FREE(op_errstr);
837

838
    free(op_req.dict.dict_val);
839

840
    if (dict)
841
        dict_unref(dict);
842

843
    if (rsp_dict)
844
        dict_unref(rsp_dict);
845

846
    /* Return 0 from handler to avoid double deletion of req obj */
847
    return 0;
848
}
849

850
static int
851
glusterd_mgmt_v3_unlock_send_resp(rpcsvc_request_t *req, int32_t status)
852
{
853
    gd1_mgmt_v3_unlock_rsp rsp = {
854
        {0},
855
    };
856
    int ret = -1;
857

858
    GF_ASSERT(req);
859

860
    rsp.op_ret = status;
861
    if (rsp.op_ret)
862
        rsp.op_errno = errno;
863

864
    glusterd_get_uuid(&rsp.uuid);
865

866
    ret = glusterd_submit_reply(req, &rsp, NULL, 0, NULL,
867
                                (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
868

869
    gf_msg_debug(THIS->name, 0, "Responded to mgmt_v3 unlock, ret: %d", ret);
870

871
    return ret;
872
}
873

874
static int
875
glusterd_syctasked_mgmt_v3_unlock(rpcsvc_request_t *req,
876
                                  gd1_mgmt_v3_unlock_req *unlock_req,
877
                                  glusterd_op_lock_ctx_t *ctx)
878
{
879
    int32_t ret = -1;
880
    xlator_t *this = THIS;
881

882
    GF_ASSERT(req);
883
    GF_ASSERT(ctx);
884

885
    /* Trying to release multiple mgmt_v3 locks */
886
    ret = glusterd_multiple_mgmt_v3_unlock(ctx->dict, ctx->uuid);
887
    if (ret) {
888
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
889
               "Failed to release mgmt_v3 locks for %s", uuid_utoa(ctx->uuid));
890
    }
891

892
    ret = glusterd_mgmt_v3_unlock_send_resp(req, ret);
893

894
    gf_msg_trace(this->name, 0, "Returning %d", ret);
895
    return ret;
896
}
897

898
static int
899
glusterd_op_state_machine_mgmt_v3_unlock(rpcsvc_request_t *req,
900
                                         gd1_mgmt_v3_unlock_req *lock_req,
901
                                         glusterd_op_lock_ctx_t *ctx)
902
{
903
    int32_t ret = -1;
904
    xlator_t *this = THIS;
905

906
    GF_ASSERT(req);
907

908
    ret = glusterd_op_sm_inject_event(GD_OP_EVENT_UNLOCK, &lock_req->txn_id,
909
                                      ctx);
910
    if (ret)
911
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_OP_EVENT_UNLOCK_FAIL,
912
               "Failed to inject event GD_OP_EVENT_UNLOCK");
913

914
    glusterd_friend_sm();
915
    glusterd_op_sm();
916

917
    gf_msg_trace(this->name, 0, "Returning %d", ret);
918
    return ret;
919
}
920

921
static int
922
glusterd_handle_mgmt_v3_unlock_fn(rpcsvc_request_t *req)
923
{
924
    gd1_mgmt_v3_unlock_req lock_req = {
925
        {0},
926
    };
927
    int32_t ret = -1;
928
    glusterd_op_lock_ctx_t *ctx = NULL;
929
    xlator_t *this = THIS;
930
    gf_boolean_t is_synctasked = _gf_false;
931
    gf_boolean_t free_ctx = _gf_false;
932

933
    GF_ASSERT(req);
934

935
    ret = xdr_to_generic(req->msg[0], &lock_req,
936
                         (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
937
    if (ret < 0) {
938
        gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL,
939
               "Failed to decode unlock "
940
               "request received from peer");
941
        req->rpc_err = GARBAGE_ARGS;
942
        goto out;
943
    }
944

945
    gf_msg_debug(this->name, 0,
946
                 "Received volume unlock req "
947
                 "from uuid: %s",
948
                 uuid_utoa(lock_req.uuid));
949

950
    if (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL) {
951
        gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
952
               "%s doesn't "
953
               "belong to the cluster. Ignoring request.",
954
               uuid_utoa(lock_req.uuid));
955
        ret = -1;
956
        goto out;
957
    }
958

959
    ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_op_lock_ctx_t);
960
    if (!ctx) {
961
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_NO_MEMORY, NULL);
962
        ret = -1;
963
        goto out;
964
    }
965

966
    gf_uuid_copy(ctx->uuid, lock_req.uuid);
967
    ctx->req = req;
968

969
    ctx->dict = dict_new();
970
    if (!ctx->dict) {
971
        gf_smsg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_CREATE_FAIL, NULL);
972
        ret = -1;
973
        goto out;
974
    }
975

976
    ret = dict_unserialize(lock_req.dict.dict_val, lock_req.dict.dict_len,
977
                           &ctx->dict);
978
    if (ret) {
979
        gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
980
                NULL);
981
        goto out;
982
    }
983

984
    is_synctasked = dict_get_str_boolean(ctx->dict, "is_synctasked", _gf_false);
985
    if (is_synctasked) {
986
        ret = glusterd_syctasked_mgmt_v3_unlock(req, &lock_req, ctx);
987
        if (ret) {
988
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
989
                   "Failed to release mgmt_v3_locks");
990
            /* Ignore the return code, as it shouldn't be propagated
991
             * from the handler function so as to avoid double
992
             * deletion of the req
993
             */
994
            ret = 0;
995
        }
996

997
        /* The above function does not take ownership of ctx.
998
         * Therefore we need to free the ctx explicitly. */
999
        free_ctx = _gf_true;
1000
    } else {
1001
        /* Shouldn't ignore the return code here, and it should
1002
         * be propagated from the handler function as in failure
1003
         * case it doesn't delete the req object
1004
         */
1005
        ret = glusterd_op_state_machine_mgmt_v3_unlock(req, &lock_req, ctx);
1006
        if (ret)
1007
            gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_MGMTV3_UNLOCK_FAIL,
1008
                   "Failed to release mgmt_v3_locks");
1009
    }
1010

1011
out:
1012

1013
    if (ctx && (ret || free_ctx)) {
1014
        if (ctx->dict)
1015
            dict_unref(ctx->dict);
1016

1017
        GF_FREE(ctx);
1018
    }
1019

1020
    free(lock_req.dict.dict_val);
1021

1022
    gf_msg_trace(this->name, 0, "Returning %d", ret);
1023
    return ret;
1024
}
1025

1026
int
1027
glusterd_handle_mgmt_v3_lock(rpcsvc_request_t *req)
1028
{
1029
    return glusterd_big_locked_handler(req, glusterd_handle_mgmt_v3_lock_fn);
1030
}
1031

1032
static int
1033
glusterd_handle_pre_validate(rpcsvc_request_t *req)
1034
{
1035
    return glusterd_big_locked_handler(req, glusterd_handle_pre_validate_fn);
1036
}
1037

1038
static int
1039
glusterd_handle_brick_op(rpcsvc_request_t *req)
1040
{
1041
    return glusterd_big_locked_handler(req, glusterd_handle_brick_op_fn);
1042
}
1043

1044
static int
1045
glusterd_handle_commit(rpcsvc_request_t *req)
1046
{
1047
    return glusterd_big_locked_handler(req, glusterd_handle_commit_fn);
1048
}
1049

1050
static int
1051
glusterd_handle_post_commit(rpcsvc_request_t *req)
1052
{
1053
    return glusterd_big_locked_handler(req, glusterd_handle_post_commit_fn);
1054
}
1055

1056
static int
1057
glusterd_handle_post_validate(rpcsvc_request_t *req)
1058
{
1059
    return glusterd_big_locked_handler(req, glusterd_handle_post_validate_fn);
1060
}
1061

1062
int
1063
glusterd_handle_mgmt_v3_unlock(rpcsvc_request_t *req)
1064
{
1065
    return glusterd_big_locked_handler(req, glusterd_handle_mgmt_v3_unlock_fn);
1066
}
1067

1068
static rpcsvc_actor_t gd_svc_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
1069
    [GLUSTERD_MGMT_V3_NULL] = {"NULL", glusterd_mgmt_v3_null, NULL,
1070
                               GLUSTERD_MGMT_V3_NULL, DRC_NA, 0},
1071
    [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_handle_mgmt_v3_lock,
1072
                               NULL, GLUSTERD_MGMT_V3_LOCK, DRC_NA, 0},
1073
    [GLUSTERD_MGMT_V3_PRE_VALIDATE] = {"PRE_VAL", glusterd_handle_pre_validate,
1074
                                       NULL, GLUSTERD_MGMT_V3_PRE_VALIDATE,
1075
                                       DRC_NA, 0},
1076
    [GLUSTERD_MGMT_V3_BRICK_OP] = {"BRCK_OP", glusterd_handle_brick_op, NULL,
1077
                                   GLUSTERD_MGMT_V3_BRICK_OP, DRC_NA, 0},
1078
    [GLUSTERD_MGMT_V3_COMMIT] = {"COMMIT", glusterd_handle_commit, NULL,
1079
                                 GLUSTERD_MGMT_V3_COMMIT, DRC_NA, 0},
1080
    [GLUSTERD_MGMT_V3_POST_VALIDATE] = {"POST_VAL",
1081
                                        glusterd_handle_post_validate, NULL,
1082
                                        GLUSTERD_MGMT_V3_POST_VALIDATE, DRC_NA,
1083
                                        0},
1084
    [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK",
1085
                                 glusterd_handle_mgmt_v3_unlock, NULL,
1086
                                 GLUSTERD_MGMT_V3_UNLOCK, DRC_NA, 0},
1087
    [GLUSTERD_MGMT_V3_POST_COMMIT] = {"POST_COMMIT",
1088
                                      glusterd_handle_post_commit, NULL,
1089
                                      GLUSTERD_MGMT_V3_POST_COMMIT, DRC_NA, 0},
1090

1091
};
1092

1093
struct rpcsvc_program gd_svc_mgmt_v3_prog = {
1094
    .progname = "GlusterD svc mgmt v3",
1095
    .prognum = GD_MGMT_PROGRAM,
1096
    .progver = GD_MGMT_V3_VERSION,
1097
    .numactors = GLUSTERD_MGMT_V3_MAXVALUE,
1098
    .actors = gd_svc_mgmt_v3_actors,
1099
    .synctask = _gf_true,
1100
};
1101

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.