glusterfs

Форк
0
/
server-handshake.c 
881 строка · 26.7 Кб
1
/*
2
  Copyright (c) 2010-2013 Red Hat, Inc. <http://www.redhat.com>
3
  This file is part of GlusterFS.
4

5
  This file is licensed to you under your choice of the GNU Lesser
6
  General Public License, version 3 or any later version (LGPLv3 or
7
  later), or the GNU General Public License, version 2 (GPLv2), in all
8
  cases as published by the Free Software Foundation.
9
*/
10

11
#include "server.h"
12
#include "server-helpers.h"
13
#include "rpc-common-xdr.h"
14
#include "glusterfs4-xdr.h"
15
#include <glusterfs/compat-errno.h>
16
#include "glusterfs3.h"
17
#include "authenticate.h"
18
#include "server-messages.h"
19
#include <glusterfs/syscall.h>
20
#include <glusterfs/events.h>
21
#include <glusterfs/syncop.h>
22

23
struct __get_xl_struct {
24
    const char *name;
25
    xlator_t *reply;
26
};
27
int
28
gf_compare_client_version(rpcsvc_request_t *req, int fop_prognum,
29
                          int mgmt_prognum)
30
{
31
    int ret = 0;
32
    /* TODO: think.. */
33

34
    return ret;
35
}
36

37
int
38
server_getspec(rpcsvc_request_t *req)
39
{
40
    int32_t ret = 0;
41
    int32_t op_errno = ENOENT;
42
    gf_getspec_req args = {
43
        0,
44
    };
45
    gf_getspec_rsp rsp = {
46
        0,
47
    };
48
    struct stat stbuf = {
49
        0,
50
    };
51
    char volpath[PATH_MAX] = {
52
        0,
53
    };
54
    int32_t spec_fd = -1;
55
    xlator_t *this = req->svc->xl;
56
    server_conf_t *conf = this->private;
57
    gf_boolean_t need_to_free_buffer = _gf_false;
58

59
    ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_getspec_req);
60
    if (ret < 0) {
61
        // failed to decode msg;
62
        req->rpc_err = GARBAGE_ARGS;
63
        op_errno = EINVAL;
64
        rsp.spec = "<this method is not in use, use glusterd for getspec>";
65
        rsp.op_errno = gf_errno_to_error(op_errno);
66
        goto out;
67
    }
68

69
    /* By default, the behavior is not to return anything if specific option is
70
     * not set */
71
    if (!conf->volfile_dir) {
72
        ret = -1;
73
        op_errno = ENOTSUP;
74
        rsp.spec = "<this method is not in use, use glusterd for getspec>";
75
        goto out;
76
    }
77
    char *volid = args.key;
78
    if (strstr(volid, "../")) {
79
        op_errno = EINVAL;
80
        rsp.spec = "having '../' in volid is not valid";
81
        rsp.op_errno = gf_errno_to_error(op_errno);
82
        ret = -1;
83
        goto out;
84
    }
85
    ret = snprintf(volpath, PATH_MAX - 1, "%s/%s.vol", conf->volfile_dir,
86
                   volid);
87
    if (ret == -1) {
88
        op_errno = ENOMEM;
89
        gf_msg(this->name, GF_LOG_ERROR, errno, 0, "failed to copy volfile");
90
        goto out;
91
    }
92

93
    ret = sys_stat(volpath, &stbuf);
94
    if (ret < 0) {
95
        op_errno = errno;
96
        goto out;
97
    }
98

99
    spec_fd = sys_open(volpath, O_RDONLY, 0);
100
    if (spec_fd < 0) {
101
        op_errno = errno;
102
        gf_msg("glusterd", GF_LOG_ERROR, errno, 0, "Unable to open %s (%s)",
103
               volpath, strerror(errno));
104
        goto out;
105
    }
106
    ret = stbuf.st_size;
107

108
    if (ret > 0) {
109
        rsp.spec = MALLOC((ret + 1) * sizeof(char));
110
        if (!rsp.spec) {
111
            gf_msg(this->name, GF_LOG_ERROR, errno, 0, "no memory");
112
            ret = -1;
113
            goto out;
114
        }
115
        need_to_free_buffer = _gf_true;
116
        ret = sys_read(spec_fd, rsp.spec, ret);
117
        if (ret <= 0) {
118
            op_errno = errno;
119
        }
120
    }
121

122
out:
123
    if (spec_fd >= 0)
124
        sys_close(spec_fd);
125

126
    rsp.op_ret = ret;
127
    if (rsp.op_ret < 0) {
128
        gf_msg(this->name, GF_LOG_ERROR, op_errno, 0,
129
               "Failed to mount the volume");
130
    }
131

132
    if (op_errno)
133
        rsp.op_errno = gf_errno_to_error(op_errno);
134

135
    if (!rsp.spec)
136
        rsp.spec = "";
137

138
    server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
139
                        (xdrproc_t)xdr_gf_getspec_rsp);
140

141
    free(args.key);
142
    if (args.xdata.xdata_val)
143
        free(args.xdata.xdata_val);
144

145
    if (need_to_free_buffer)
146
        FREE(rsp.spec);
147
    if (rsp.xdata.xdata_val)
148
        GF_FREE(rsp.xdata.xdata_val);
149

150
    return 0;
151
}
152

153
static void
154
server_first_lookup_done(rpcsvc_request_t *req, gf_setvolume_rsp *rsp)
155
{
156
    server_submit_reply(NULL, req, rsp, NULL, 0, NULL,
157
                        (xdrproc_t)xdr_gf_setvolume_rsp);
158

159
    GF_FREE(rsp->dict.dict_val);
160
    GF_FREE(rsp);
161
}
162

163
static inode_t *
164
do_path_lookup(xlator_t *xl, dict_t *dict, inode_t *parinode, char *basename)
165
{
166
    int ret = 0;
167
    loc_t loc = {
168
        0,
169
    };
170
    uuid_t gfid = {
171
        0,
172
    };
173
    struct iatt iatt = {
174
        0,
175
    };
176
    inode_t *inode = NULL;
177

178
    loc.parent = inode_ref(parinode);
179
    loc_touchup(&loc, basename);
180
    loc.inode = inode_new(xl->itable);
181

182
    gf_uuid_generate(gfid);
183
    ret = dict_set_gfuuid(dict, "gfid-req", gfid, true);
184
    if (ret) {
185
        gf_log(xl->name, GF_LOG_ERROR, "failed to set 'gfid-req' for subdir");
186
        goto out;
187
    }
188

189
    ret = syncop_lookup(xl, &loc, &iatt, NULL, dict, NULL);
190
    if (ret < 0) {
191
        gf_log(xl->name, GF_LOG_ERROR, "first lookup on subdir (%s) failed: %s",
192
               basename, strerror(errno));
193
    }
194

195
    /* Inode linking is required so that the
196
       resolution happens all fine for future fops */
197
    inode = inode_link(loc.inode, loc.parent, loc.name, &iatt);
198

199
    /* Extra ref so the pointer is valid till client is valid */
200
    /* FIXME: not a priority, but this can lead to some inode
201
       leaks if subdir is more than 1 level depth. Leak is only
202
       per subdir entry, and not dependent on number of
203
       connections, so it should be fine for now */
204
    inode_ref(inode);
205

206
out:
207
    loc_wipe(&loc);
208
    return inode;
209
}
210

211
int
212
server_first_lookup(xlator_t *this, client_t *client, dict_t *reply)
213
{
214
    loc_t loc = {
215
        0,
216
    };
217
    dict_t *dict = NULL;
218
    int ret = 0;
219
    xlator_t *xl = client->bound_xl;
220
    char *msg = NULL;
221
    inode_t *inode = NULL;
222
    char *bname = NULL;
223
    char *str = NULL;
224
    char *tmp = NULL;
225
    char *saveptr = NULL;
226

227
    loc.path = "/";
228
    loc.name = "";
229
    loc.inode = xl->itable->root;
230
    loc.parent = NULL;
231
    gf_uuid_copy(loc.gfid, loc.inode->gfid);
232

233
    ret = syncop_lookup(xl, &loc, NULL, NULL, NULL, NULL);
234
    if (ret < 0)
235
        gf_log(xl->name, GF_LOG_ERROR, "lookup on root failed: %s",
236
               strerror(errno));
237
    /* Ignore error from lookup, don't set
238
     * failure in rsp->op_ret. lookup on a snapview-server
239
     * can fail with ESTALE
240
     */
241
    /* TODO-SUBDIR-MOUNT: validate above comment with respect to subdir lookup
242
     */
243

244
    if (client->subdir_mount) {
245
        str = tmp = gf_strdup(client->subdir_mount);
246
        dict = dict_new();
247
        inode = xl->itable->root;
248
        bname = strtok_r(str, "/", &saveptr);
249
        while (bname != NULL) {
250
            inode = do_path_lookup(xl, dict, inode, bname);
251
            if (inode == NULL) {
252
                gf_log(this->name, GF_LOG_ERROR,
253
                       "first lookup on subdir (%s) failed: %s",
254
                       client->subdir_mount, strerror(errno));
255
                ret = -1;
256
                goto fail;
257
            }
258
            bname = strtok_r(NULL, "/", &saveptr);
259
        }
260

261
        /* Can be used in server_resolve() */
262
        gf_uuid_copy(client->subdir_gfid, inode->gfid);
263
        client->subdir_inode = inode;
264
    }
265

266
    ret = 0;
267
    goto out;
268

269
fail:
270
    /* we should say to client, it is not possible
271
       to connect */
272
    ret = gf_asprintf(&msg, "subdirectory for mount \"%s\" is not found",
273
                      client->subdir_mount);
274
    if (-1 == ret) {
275
        gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
276
               "asprintf failed while setting error msg");
277
    }
278
    ret = dict_set_dynstr(reply, "ERROR", msg);
279
    if (ret < 0)
280
        gf_msg_debug(this->name, 0,
281
                     "failed to set error "
282
                     "msg");
283

284
    ret = -1;
285
out:
286
    if (dict)
287
        dict_unref(dict);
288

289
    inode_unref(loc.inode);
290

291
    if (tmp)
292
        GF_FREE(tmp);
293

294
    return ret;
295
}
296

297
int
298
server_setvolume(rpcsvc_request_t *req)
299
{
300
    gf_setvolume_req args = {
301
        {
302
            0,
303
        },
304
    };
305
    gf_setvolume_rsp *rsp = NULL;
306
    client_t *client = NULL;
307
    server_ctx_t *serv_ctx = NULL;
308
    server_conf_t *conf = NULL;
309
    peer_info_t *peerinfo = NULL;
310
    dict_t *reply = NULL;
311
    dict_t *config_params = NULL;
312
    dict_t *params = NULL;
313
    char *name = NULL;
314
    char *volume_id = NULL;
315
    char *client_uid = NULL;
316
    char *clnt_version = NULL;
317
    xlator_t *xl = NULL;
318
    char *msg = NULL;
319
    xlator_t *this = NULL;
320
    int32_t ret = -1;
321
    int32_t op_ret = -1;
322
    int32_t op_errno = EINVAL;
323
    uint32_t opversion = 0;
324
    rpc_transport_t *xprt = NULL;
325
    int32_t fop_version = 0;
326
    int32_t mgmt_version = 0;
327
    glusterfs_ctx_t *ctx = NULL;
328
    struct _child_status *tmp = NULL;
329
    char *subdir_mount = NULL;
330
    char *client_name = NULL;
331
    gf_boolean_t cleanup_starting = _gf_false;
332
    gf_boolean_t xlator_in_graph = _gf_true;
333

334
    params = dict_new();
335
    reply = dict_new();
336
    ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_setvolume_req);
337
    if (ret < 0) {
338
        // failed to decode msg;
339
        req->rpc_err = GARBAGE_ARGS;
340
        goto fail;
341
    }
342
    ctx = THIS->ctx;
343

344
    this = req->svc->xl;
345
    /* this is to ensure config_params is populated with the first brick
346
     * details at first place if brick multiplexing is enabled
347
     */
348
    config_params = dict_copy_with_ref(this->options, NULL);
349

350
    ret = dict_unserialize(args.dict.dict_val, args.dict.dict_len, &params);
351
    if (ret < 0) {
352
        ret = dict_set_sizen_str_sizen(reply, "ERROR",
353
                                       "Internal error: failed to unserialize "
354
                                       "request dictionary");
355
        if (ret < 0)
356
            gf_msg_debug(this->name, 0,
357
                         "failed to set error "
358
                         "msg \"%s\"",
359
                         "Internal error: failed "
360
                         "to unserialize request dictionary");
361

362
        op_ret = -1;
363
        op_errno = EINVAL;
364
        goto fail;
365
    }
366

367
    ret = dict_get_str(params, "remote-subvolume", &name);
368
    if (ret < 0) {
369
        ret = dict_set_str(reply, "ERROR",
370
                           "No remote-subvolume option specified");
371
        if (ret < 0)
372
            gf_msg_debug(this->name, 0,
373
                         "failed to set error "
374
                         "msg");
375

376
        op_ret = -1;
377
        op_errno = EINVAL;
378
        goto fail;
379
    }
380

381
    LOCK(&ctx->volfile_lock);
382
    {
383
        xl = get_xlator_by_name(this, name);
384
        if (!xl) {
385
            xlator_in_graph = _gf_false;
386
            xl = this;
387
        }
388
        if (ctx->cleanup_starting) {
389
            cleanup_starting = _gf_true;
390
            op_ret = -1;
391
            op_errno = ENOENT;
392
        }
393
    }
394
    UNLOCK(&ctx->volfile_lock);
395
    if (!xl || cleanup_starting) {
396
        ret = gf_asprintf(&msg, "remote-subvolume \"%s\" is not found", name);
397
        if (-1 == ret) {
398
            gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
399
                   "asprintf failed while setting error msg");
400
            goto fail;
401
        }
402
        ret = dict_set_dynstr(reply, "ERROR", msg);
403
        if (ret < 0)
404
            gf_msg_debug(this->name, 0,
405
                         "failed to set error "
406
                         "msg");
407

408
        op_ret = -1;
409
        op_errno = ENOENT;
410
        goto fail;
411
    }
412

413
    config_params = dict_copy_with_ref(xl->options, config_params);
414
    conf = this->private;
415

416
    if (conf->parent_up == _gf_false) {
417
        /* PARENT_UP indicates that all xlators in graph are inited
418
         * successfully
419
         */
420
        op_ret = -1;
421
        op_errno = EAGAIN;
422

423
        ret = dict_set_str(reply, "ERROR",
424
                           "xlator graph in server is not initialised "
425
                           "yet. Try again later");
426
        if (ret < 0)
427
            gf_msg_debug(this->name, 0,
428
                         "failed to set error: "
429
                         "xlator graph in server is not "
430
                         "initialised yet. Try again later");
431
        goto fail;
432
    }
433

434
    pthread_mutex_lock(&conf->mutex);
435
    list_for_each_entry(tmp, &conf->child_status->status_list, status_list)
436
    {
437
        if (strcmp(tmp->name, name) == 0)
438
            break;
439
    }
440

441
    if (!tmp->name) {
442
        gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CHILD_STATUS_FAILED,
443
               "No xlator %s is found in child status list", name);
444
    } else {
445
        ret = dict_set_int32(reply, "child_up", tmp->child_up);
446
        if (ret < 0)
447
            gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_DICT_GET_FAILED,
448
                   "Failed to set 'child_up' for xlator %s "
449
                   "in the reply dict",
450
                   tmp->name);
451
        if (!tmp->child_up) {
452
            ret = dict_set_str(reply, "ERROR",
453
                               "Not received child_up for this xlator");
454
            if (ret < 0)
455
                gf_msg_debug(this->name, 0, "failed to set error msg");
456

457
            gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_CHILD_STATUS_FAILED,
458
                   "Not received child_up for this xlator %s", name);
459
            op_ret = -1;
460
            op_errno = EAGAIN;
461
            pthread_mutex_unlock(&conf->mutex);
462
            goto fail;
463
        }
464
    }
465
    pthread_mutex_unlock(&conf->mutex);
466

467
    ret = dict_get_str(params, "process-uuid", &client_uid);
468
    if (ret < 0) {
469
        ret = dict_set_str(reply, "ERROR", "UUID not specified");
470
        if (ret < 0)
471
            gf_msg_debug(this->name, 0,
472
                         "failed to set error "
473
                         "msg");
474

475
        op_ret = -1;
476
        op_errno = EINVAL;
477
        goto fail;
478
    }
479

480
    ret = dict_get_str(params, "subdir-mount", &subdir_mount);
481
    if (ret < 0) {
482
        /* Not a problem at all as the key is optional */
483
    }
484
    ret = dict_get_str(params, "process-name", &client_name);
485
    if (ret < 0) {
486
        client_name = "unknown";
487
    }
488

489
    /* If any value is set, the first element will be non-0.
490
       It would be '0', but not '\0' :-) */
491
    if (xl->graph->volume_id[0]) {
492
        ret = dict_get_str_sizen(params, "volume-id", &volume_id);
493
        if (!ret && strcmp(xl->graph->volume_id, volume_id)) {
494
            ret = dict_set_str(reply, "ERROR",
495
                               "Volume-ID different, possible case "
496
                               "of same brick re-used in another volume");
497
            if (ret < 0)
498
                gf_msg_debug(this->name, 0, "failed to set error msg");
499

500
            op_ret = -1;
501
            op_errno = EINVAL;
502
            goto fail;
503
        }
504
        ret = dict_set_str(reply, "volume-id", tmp->volume_id);
505
        if (ret)
506
            gf_msg_debug(this->name, 0, "failed to set 'volume-id'");
507
    }
508
    client = gf_client_get(this, &req->cred, client_uid, subdir_mount);
509
    if (client == NULL) {
510
        op_ret = -1;
511
        op_errno = ENOMEM;
512
        goto fail;
513
    }
514

515
    client->client_name = gf_strdup(client_name);
516

517
    gf_msg_debug(this->name, 0, "Connected to %s", client->client_uid);
518

519
    serv_ctx = server_ctx_get(client, client->this);
520
    if (serv_ctx == NULL) {
521
        gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_SERVER_CTX_GET_FAILED,
522
               "server_ctx_get() "
523
               "failed");
524
        goto fail;
525
    }
526

527
    pthread_mutex_lock(&conf->mutex);
528
    if (xl->cleanup_starting) {
529
        cleanup_starting = _gf_true;
530
    } else if (req->trans->xl_private != client) {
531
        req->trans->xl_private = client;
532
    }
533
    pthread_mutex_unlock(&conf->mutex);
534

535
    if (cleanup_starting) {
536
        op_ret = -1;
537
        op_errno = EAGAIN;
538

539
        ret = dict_set_str(reply, "ERROR",
540
                           "cleanup flag is set for xlator. "
541
                           " Try again later");
542
        if (ret < 0)
543
            gf_msg_debug(this->name, 0,
544
                         "failed to set error: "
545
                         "cleanup flag is set for xlator. "
546
                         "Try again later");
547
        goto fail;
548
    }
549

550
    auth_set_username_passwd(params, config_params, client);
551
    if (req->trans->ssl_name) {
552
        if (dict_set_str(params, "ssl-name", req->trans->ssl_name) != 0) {
553
            gf_msg(this->name, GF_LOG_WARNING, 0, PS_MSG_SSL_NAME_SET_FAILED,
554
                   "failed to set "
555
                   "ssl_name %s",
556
                   req->trans->ssl_name);
557
            /* Not fatal, auth will just fail. */
558
        }
559
    }
560

561
    ret = dict_get_int32(params, "fops-version", &fop_version);
562
    if (ret < 0) {
563
        ret = dict_set_str(reply, "ERROR", "No FOP version number specified");
564
        if (ret < 0)
565
            gf_msg_debug(this->name, 0,
566
                         "failed to set error "
567
                         "msg");
568
    }
569

570
    ret = dict_get_int32(params, "mgmt-version", &mgmt_version);
571
    if (ret < 0) {
572
        ret = dict_set_str(reply, "ERROR", "No MGMT version number specified");
573
        if (ret < 0)
574
            gf_msg_debug(this->name, 0,
575
                         "failed to set error "
576
                         "msg");
577
    }
578

579
    ret = gf_compare_client_version(req, fop_version, mgmt_version);
580
    if (ret != 0) {
581
        ret = gf_asprintf(&msg,
582
                          "version mismatch: client(%d)"
583
                          " - client-mgmt(%d)",
584
                          fop_version, mgmt_version);
585
        /* get_supported_version (req)); */
586
        if (-1 == ret) {
587
            gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
588
                   "asprintf failed while"
589
                   "setting up error msg");
590
            goto fail;
591
        }
592
        ret = dict_set_dynstr(reply, "ERROR", msg);
593
        if (ret < 0)
594
            gf_msg_debug(this->name, 0,
595
                         "failed to set error "
596
                         "msg");
597

598
        op_ret = -1;
599
        op_errno = EINVAL;
600
        goto fail;
601
    }
602

603
    peerinfo = &req->trans->peerinfo;
604
    if (peerinfo) {
605
        ret = dict_set_static_ptr(params, "peer-info", peerinfo);
606
        if (ret < 0)
607
            gf_msg_debug(this->name, 0,
608
                         "failed to set "
609
                         "peer-info");
610
    }
611

612
    ret = dict_get_uint32(params, "opversion", &opversion);
613
    if (ret) {
614
        gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_OPVERSION_GET_FAILED,
615
               "Failed to get client opversion");
616
    }
617
    client->opversion = opversion;
618
    /* Assign op-version value to the client */
619
    pthread_mutex_lock(&conf->mutex);
620
    list_for_each_entry(xprt, &conf->xprt_list, list)
621
    {
622
        if (strcmp(peerinfo->identifier, xprt->peerinfo.identifier))
623
            continue;
624
        xprt->peerinfo.max_op_version = opversion;
625
    }
626
    pthread_mutex_unlock(&conf->mutex);
627

628
    if (conf->auth_modules == NULL) {
629
        gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_AUTH_INIT_FAILED,
630
               "Authentication module not initialized");
631
    }
632

633
    ret = dict_get_str(params, "client-version", &clnt_version);
634
    if (ret)
635
        gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_VERSION_NOT_SET,
636
               "client-version not set, may be of older version");
637

638
    ret = gf_authenticate(params, config_params, conf->auth_modules);
639

640
    if (ret == AUTH_ACCEPT) {
641
        /* Store options received from client side */
642
        req->trans->clnt_options = dict_ref(params);
643

644
        gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_ACCEPTED,
645
               "accepted client from %s (version: %s) with subvol %s",
646
               client->client_uid, (clnt_version) ? clnt_version : "old", name);
647

648
        gf_event(EVENT_CLIENT_CONNECT,
649
                 "client_uid=%s;"
650
                 "client_identifier=%s;server_identifier=%s;"
651
                 "brick_path=%s;subdir_mount=%s",
652
                 client->client_uid, req->trans->peerinfo.identifier,
653
                 req->trans->myinfo.identifier, name, subdir_mount);
654

655
        op_ret = 0;
656
        client->bound_xl = xl;
657

658
        /* Don't be confused by the below line (like how ERROR can
659
           be Success), key checked on client is 'ERROR' and hence
660
           we send 'Success' in this key */
661
        ret = dict_set_str(reply, "ERROR", "Success");
662
        if (ret < 0)
663
            gf_msg_debug(this->name, 0,
664
                         "failed to set error "
665
                         "msg");
666
    } else {
667
        op_ret = -1;
668
        if (!xlator_in_graph) {
669
            gf_msg(this->name, GF_LOG_ERROR, ENOENT, PS_MSG_AUTHENTICATE_ERROR,
670
                   "Cannot authenticate client"
671
                   " from %s %s because brick is not attached in graph",
672
                   client->client_uid, (clnt_version) ? clnt_version : "old");
673

674
            op_errno = ENOENT;
675
            ret = dict_set_str(reply, "ERROR", "Brick not found");
676
        } else {
677
            gf_event(EVENT_CLIENT_AUTH_REJECT,
678
                     "client_uid=%s;"
679
                     "client_identifier=%s;server_identifier=%s;"
680
                     "brick_path=%s",
681
                     client->client_uid, req->trans->peerinfo.identifier,
682
                     req->trans->myinfo.identifier, name);
683
            gf_msg(this->name, GF_LOG_ERROR, EACCES, PS_MSG_AUTHENTICATE_ERROR,
684
                   "Cannot authenticate client"
685
                   " from %s %s",
686
                   client->client_uid, (clnt_version) ? clnt_version : "old");
687

688
            op_errno = EACCES;
689
            ret = dict_set_str(reply, "ERROR", "Authentication failed");
690
        }
691
        if (ret < 0)
692
            gf_msg_debug(this->name, 0,
693
                         "failed to set error "
694
                         "msg");
695
        goto fail;
696
    }
697

698
    if (client->bound_xl == NULL) {
699
        ret = dict_set_str(reply, "ERROR",
700
                           "Check volfile and handshake "
701
                           "options in protocol/client");
702
        if (ret < 0)
703
            gf_msg_debug(this->name, 0,
704
                         "failed to set error "
705
                         "msg");
706

707
        op_ret = -1;
708
        op_errno = EACCES;
709
        goto fail;
710
    }
711

712
    LOCK(&conf->itable_lock);
713
    {
714
        if (client->bound_xl->itable == NULL) {
715
            /* create inode table for this bound_xl, if one doesn't
716
               already exist */
717

718
            gf_msg_trace(this->name, 0,
719
                         "creating inode table with"
720
                         " lru_limit=%" PRId32 ", xlator=%s",
721
                         conf->inode_lru_limit, client->bound_xl->name);
722

723
            /* TODO: what is this ? */
724
            client->bound_xl->itable = inode_table_new(conf->inode_lru_limit,
725
                                                       client->bound_xl, 0, 0);
726
        }
727
    }
728
    UNLOCK(&conf->itable_lock);
729

730
    ret = dict_set_str(reply, "process-uuid", this->ctx->process_uuid);
731
    if (ret)
732
        gf_msg_debug(this->name, 0, "failed to set 'process-uuid'");
733

734
    /* Insert a dummy key value pair to avoid failure at client side for
735
     * clnt-lk-version with older clients.
736
     */
737
    ret = dict_set_uint32(reply, "clnt-lk-version", 0);
738
    if (ret) {
739
        gf_msg(this->name, GF_LOG_WARNING, 0, PS_MSG_CLIENT_LK_VERSION_ERROR,
740
               "failed to set "
741
               "'clnt-lk-version'");
742
    }
743

744
    ret = dict_set_uint64(reply, "transport-ptr", ((uint64_t)(long)req->trans));
745
    if (ret)
746
        gf_msg_debug(this->name, 0, "failed to set 'transport-ptr'");
747

748
fail:
749
    /* It is important to validate the lookup on '/' as part of handshake,
750
       because if lookup itself can't succeed, we should communicate this
751
       to client. Very important in case of subdirectory mounts, where if
752
       client is trying to mount a non-existing directory */
753
    if (op_ret >= 0 && client->bound_xl->itable) {
754
        if (client->bound_xl->cleanup_starting) {
755
            op_ret = -1;
756
            op_errno = EAGAIN;
757
            ret = dict_set_str(reply, "ERROR",
758
                               "cleanup flag is set for xlator "
759
                               "before call first_lookup Try again later");
760
            /* quisce coverity about UNUSED_VALUE ret */
761
            (void)(ret);
762
        } else {
763
            op_ret = server_first_lookup(this, client, reply);
764
            if (op_ret == -1)
765
                op_errno = ENOENT;
766
        }
767
    }
768

769
    rsp = GF_CALLOC(1, sizeof(gf_setvolume_rsp), gf_server_mt_setvolume_rsp_t);
770
    GF_ASSERT(rsp);
771

772
    rsp->op_ret = 0;
773

774
    ret = dict_allocate_and_serialize(reply, (char **)&rsp->dict.dict_val,
775
                                      &rsp->dict.dict_len);
776
    if (ret != 0) {
777
        ret = -1;
778
        gf_msg_debug("server-handshake", 0, "failed to serialize reply dict");
779
        op_ret = -1;
780
        op_errno = -ret;
781
    }
782

783
    rsp->op_ret = op_ret;
784
    rsp->op_errno = gf_errno_to_error(op_errno);
785

786
    /* if bound_xl is NULL or something fails, then put the connection
787
     * back. Otherwise the connection would have been added to the
788
     * list of connections the server is maintaining and might segfault
789
     * during statedump when bound_xl of the connection is accessed.
790
     */
791
    if (op_ret && !xl && (client != NULL)) {
792
        /* We would have set the xl_private of the transport to the
793
         * @conn. But if we have put the connection i.e shutting down
794
         * the connection, then we should set xl_private to NULL as it
795
         * would be pointing to a freed memory and would segfault when
796
         * accessed upon getting DISCONNECT.
797
         */
798
        gf_client_put(client, NULL);
799
        req->trans->xl_private = NULL;
800
    }
801

802
    /* Send the response properly */
803
    server_first_lookup_done(req, rsp);
804

805
    free(args.dict.dict_val);
806

807
    dict_unref(params);
808
    dict_unref(reply);
809
    if (config_params) {
810
        /*
811
         * This might be null if we couldn't even find the translator
812
         * (brick) to copy it from.
813
         */
814
        dict_unref(config_params);
815
    }
816

817
    return 0;
818
}
819

820
int
821
server_ping(rpcsvc_request_t *req)
822
{
823
    gf_common_rsp rsp = {
824
        0,
825
    };
826

827
    /* Accepted */
828
    rsp.op_ret = 0;
829

830
    server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
831
                        (xdrproc_t)xdr_gf_common_rsp);
832

833
    return 0;
834
}
835

836
int
837
server_set_lk_version(rpcsvc_request_t *req)
838
{
839
    int ret = -1;
840
    gf_set_lk_ver_req args = {
841
        0,
842
    };
843
    gf_set_lk_ver_rsp rsp = {
844
        0,
845
    };
846

847
    ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_set_lk_ver_req);
848
    if (ret < 0) {
849
        /* failed to decode msg */
850
        req->rpc_err = GARBAGE_ARGS;
851
        goto fail;
852
    }
853

854
    rsp.lk_ver = args.lk_ver;
855
fail:
856
    server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
857
                        (xdrproc_t)xdr_gf_set_lk_ver_rsp);
858

859
    free(args.uid);
860

861
    return 0;
862
}
863

864
static rpcsvc_actor_t gluster_handshake_actors[GF_HNDSK_MAXVALUE] = {
865
    [GF_HNDSK_NULL] = {"NULL", server_null, NULL, GF_HNDSK_NULL, DRC_NA, 0},
866
    [GF_HNDSK_SETVOLUME] = {"SETVOLUME", server_setvolume, NULL,
867
                            GF_HNDSK_SETVOLUME, DRC_NA, 0},
868
    [GF_HNDSK_GETSPEC] = {"GETSPEC", server_getspec, NULL, GF_HNDSK_GETSPEC,
869
                          DRC_NA, 0},
870
    [GF_HNDSK_PING] = {"PING", server_ping, NULL, GF_HNDSK_PING, DRC_NA, 0},
871
    [GF_HNDSK_SET_LK_VER] = {"SET_LK_VER", server_set_lk_version, NULL,
872
                             GF_HNDSK_SET_LK_VER, DRC_NA, 0},
873
};
874

875
struct rpcsvc_program gluster_handshake_prog = {
876
    .progname = "GlusterFS Handshake",
877
    .prognum = GLUSTER_HNDSK_PROGRAM,
878
    .progver = GLUSTER_HNDSK_VERSION,
879
    .actors = gluster_handshake_actors,
880
    .numactors = GF_HNDSK_MAXVALUE,
881
};
882

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.