2
Copyright (c) 2010-2013 Red Hat, Inc. <http://www.redhat.com>
3
This file is part of GlusterFS.
5
This file is licensed to you under your choice of the GNU Lesser
6
General Public License, version 3 or any later version (LGPLv3 or
7
later), or the GNU General Public License, version 2 (GPLv2), in all
8
cases as published by the Free Software Foundation.
12
#include "server-helpers.h"
13
#include "rpc-common-xdr.h"
14
#include "glusterfs4-xdr.h"
15
#include <glusterfs/compat-errno.h>
16
#include "glusterfs3.h"
17
#include "authenticate.h"
18
#include "server-messages.h"
19
#include <glusterfs/syscall.h>
20
#include <glusterfs/events.h>
21
#include <glusterfs/syncop.h>
23
struct __get_xl_struct {
28
gf_compare_client_version(rpcsvc_request_t *req, int fop_prognum,
38
server_getspec(rpcsvc_request_t *req)
41
int32_t op_errno = ENOENT;
42
gf_getspec_req args = {
45
gf_getspec_rsp rsp = {
51
char volpath[PATH_MAX] = {
55
xlator_t *this = req->svc->xl;
56
server_conf_t *conf = this->private;
57
gf_boolean_t need_to_free_buffer = _gf_false;
59
ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_getspec_req);
61
// failed to decode msg;
62
req->rpc_err = GARBAGE_ARGS;
64
rsp.spec = "<this method is not in use, use glusterd for getspec>";
65
rsp.op_errno = gf_errno_to_error(op_errno);
69
/* By default, the behavior is not to return anything if specific option is
71
if (!conf->volfile_dir) {
74
rsp.spec = "<this method is not in use, use glusterd for getspec>";
77
char *volid = args.key;
78
if (strstr(volid, "../")) {
80
rsp.spec = "having '../' in volid is not valid";
81
rsp.op_errno = gf_errno_to_error(op_errno);
85
ret = snprintf(volpath, PATH_MAX - 1, "%s/%s.vol", conf->volfile_dir,
89
gf_msg(this->name, GF_LOG_ERROR, errno, 0, "failed to copy volfile");
93
ret = sys_stat(volpath, &stbuf);
99
spec_fd = sys_open(volpath, O_RDONLY, 0);
102
gf_msg("glusterd", GF_LOG_ERROR, errno, 0, "Unable to open %s (%s)",
103
volpath, strerror(errno));
109
rsp.spec = MALLOC((ret + 1) * sizeof(char));
111
gf_msg(this->name, GF_LOG_ERROR, errno, 0, "no memory");
115
need_to_free_buffer = _gf_true;
116
ret = sys_read(spec_fd, rsp.spec, ret);
127
if (rsp.op_ret < 0) {
128
gf_msg(this->name, GF_LOG_ERROR, op_errno, 0,
129
"Failed to mount the volume");
133
rsp.op_errno = gf_errno_to_error(op_errno);
138
server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
139
(xdrproc_t)xdr_gf_getspec_rsp);
142
if (args.xdata.xdata_val)
143
free(args.xdata.xdata_val);
145
if (need_to_free_buffer)
147
if (rsp.xdata.xdata_val)
148
GF_FREE(rsp.xdata.xdata_val);
154
server_first_lookup_done(rpcsvc_request_t *req, gf_setvolume_rsp *rsp)
156
server_submit_reply(NULL, req, rsp, NULL, 0, NULL,
157
(xdrproc_t)xdr_gf_setvolume_rsp);
159
GF_FREE(rsp->dict.dict_val);
164
do_path_lookup(xlator_t *xl, dict_t *dict, inode_t *parinode, char *basename)
176
inode_t *inode = NULL;
178
loc.parent = inode_ref(parinode);
179
loc_touchup(&loc, basename);
180
loc.inode = inode_new(xl->itable);
182
gf_uuid_generate(gfid);
183
ret = dict_set_gfuuid(dict, "gfid-req", gfid, true);
185
gf_log(xl->name, GF_LOG_ERROR, "failed to set 'gfid-req' for subdir");
189
ret = syncop_lookup(xl, &loc, &iatt, NULL, dict, NULL);
191
gf_log(xl->name, GF_LOG_ERROR, "first lookup on subdir (%s) failed: %s",
192
basename, strerror(errno));
195
/* Inode linking is required so that the
196
resolution happens all fine for future fops */
197
inode = inode_link(loc.inode, loc.parent, loc.name, &iatt);
199
/* Extra ref so the pointer is valid till client is valid */
200
/* FIXME: not a priority, but this can lead to some inode
201
leaks if subdir is more than 1 level depth. Leak is only
202
per subdir entry, and not dependent on number of
203
connections, so it should be fine for now */
212
server_first_lookup(xlator_t *this, client_t *client, dict_t *reply)
219
xlator_t *xl = client->bound_xl;
221
inode_t *inode = NULL;
225
char *saveptr = NULL;
229
loc.inode = xl->itable->root;
231
gf_uuid_copy(loc.gfid, loc.inode->gfid);
233
ret = syncop_lookup(xl, &loc, NULL, NULL, NULL, NULL);
235
gf_log(xl->name, GF_LOG_ERROR, "lookup on root failed: %s",
237
/* Ignore error from lookup, don't set
238
* failure in rsp->op_ret. lookup on a snapview-server
239
* can fail with ESTALE
241
/* TODO-SUBDIR-MOUNT: validate above comment with respect to subdir lookup
244
if (client->subdir_mount) {
245
str = tmp = gf_strdup(client->subdir_mount);
247
inode = xl->itable->root;
248
bname = strtok_r(str, "/", &saveptr);
249
while (bname != NULL) {
250
inode = do_path_lookup(xl, dict, inode, bname);
252
gf_log(this->name, GF_LOG_ERROR,
253
"first lookup on subdir (%s) failed: %s",
254
client->subdir_mount, strerror(errno));
258
bname = strtok_r(NULL, "/", &saveptr);
261
/* Can be used in server_resolve() */
262
gf_uuid_copy(client->subdir_gfid, inode->gfid);
263
client->subdir_inode = inode;
270
/* we should say to client, it is not possible
272
ret = gf_asprintf(&msg, "subdirectory for mount \"%s\" is not found",
273
client->subdir_mount);
275
gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
276
"asprintf failed while setting error msg");
278
ret = dict_set_dynstr(reply, "ERROR", msg);
280
gf_msg_debug(this->name, 0,
281
"failed to set error "
289
inode_unref(loc.inode);
298
server_setvolume(rpcsvc_request_t *req)
300
gf_setvolume_req args = {
305
gf_setvolume_rsp *rsp = NULL;
306
client_t *client = NULL;
307
server_ctx_t *serv_ctx = NULL;
308
server_conf_t *conf = NULL;
309
peer_info_t *peerinfo = NULL;
310
dict_t *reply = NULL;
311
dict_t *config_params = NULL;
312
dict_t *params = NULL;
314
char *volume_id = NULL;
315
char *client_uid = NULL;
316
char *clnt_version = NULL;
319
xlator_t *this = NULL;
322
int32_t op_errno = EINVAL;
323
uint32_t opversion = 0;
324
rpc_transport_t *xprt = NULL;
325
int32_t fop_version = 0;
326
int32_t mgmt_version = 0;
327
glusterfs_ctx_t *ctx = NULL;
328
struct _child_status *tmp = NULL;
329
char *subdir_mount = NULL;
330
char *client_name = NULL;
331
gf_boolean_t cleanup_starting = _gf_false;
332
gf_boolean_t xlator_in_graph = _gf_true;
336
ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_setvolume_req);
338
// failed to decode msg;
339
req->rpc_err = GARBAGE_ARGS;
345
/* this is to ensure config_params is populated with the first brick
346
* details at first place if brick multiplexing is enabled
348
config_params = dict_copy_with_ref(this->options, NULL);
350
ret = dict_unserialize(args.dict.dict_val, args.dict.dict_len, ¶ms);
352
ret = dict_set_sizen_str_sizen(reply, "ERROR",
353
"Internal error: failed to unserialize "
354
"request dictionary");
356
gf_msg_debug(this->name, 0,
357
"failed to set error "
359
"Internal error: failed "
360
"to unserialize request dictionary");
367
ret = dict_get_str(params, "remote-subvolume", &name);
369
ret = dict_set_str(reply, "ERROR",
370
"No remote-subvolume option specified");
372
gf_msg_debug(this->name, 0,
373
"failed to set error "
381
LOCK(&ctx->volfile_lock);
383
xl = get_xlator_by_name(this, name);
385
xlator_in_graph = _gf_false;
388
if (ctx->cleanup_starting) {
389
cleanup_starting = _gf_true;
394
UNLOCK(&ctx->volfile_lock);
395
if (!xl || cleanup_starting) {
396
ret = gf_asprintf(&msg, "remote-subvolume \"%s\" is not found", name);
398
gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
399
"asprintf failed while setting error msg");
402
ret = dict_set_dynstr(reply, "ERROR", msg);
404
gf_msg_debug(this->name, 0,
405
"failed to set error "
413
config_params = dict_copy_with_ref(xl->options, config_params);
414
conf = this->private;
416
if (conf->parent_up == _gf_false) {
417
/* PARENT_UP indicates that all xlators in graph are inited
423
ret = dict_set_str(reply, "ERROR",
424
"xlator graph in server is not initialised "
425
"yet. Try again later");
427
gf_msg_debug(this->name, 0,
428
"failed to set error: "
429
"xlator graph in server is not "
430
"initialised yet. Try again later");
434
pthread_mutex_lock(&conf->mutex);
435
list_for_each_entry(tmp, &conf->child_status->status_list, status_list)
437
if (strcmp(tmp->name, name) == 0)
442
gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CHILD_STATUS_FAILED,
443
"No xlator %s is found in child status list", name);
445
ret = dict_set_int32(reply, "child_up", tmp->child_up);
447
gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_DICT_GET_FAILED,
448
"Failed to set 'child_up' for xlator %s "
451
if (!tmp->child_up) {
452
ret = dict_set_str(reply, "ERROR",
453
"Not received child_up for this xlator");
455
gf_msg_debug(this->name, 0, "failed to set error msg");
457
gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_CHILD_STATUS_FAILED,
458
"Not received child_up for this xlator %s", name);
461
pthread_mutex_unlock(&conf->mutex);
465
pthread_mutex_unlock(&conf->mutex);
467
ret = dict_get_str(params, "process-uuid", &client_uid);
469
ret = dict_set_str(reply, "ERROR", "UUID not specified");
471
gf_msg_debug(this->name, 0,
472
"failed to set error "
480
ret = dict_get_str(params, "subdir-mount", &subdir_mount);
482
/* Not a problem at all as the key is optional */
484
ret = dict_get_str(params, "process-name", &client_name);
486
client_name = "unknown";
489
/* If any value is set, the first element will be non-0.
490
It would be '0', but not '\0' :-) */
491
if (xl->graph->volume_id[0]) {
492
ret = dict_get_str_sizen(params, "volume-id", &volume_id);
493
if (!ret && strcmp(xl->graph->volume_id, volume_id)) {
494
ret = dict_set_str(reply, "ERROR",
495
"Volume-ID different, possible case "
496
"of same brick re-used in another volume");
498
gf_msg_debug(this->name, 0, "failed to set error msg");
504
ret = dict_set_str(reply, "volume-id", tmp->volume_id);
506
gf_msg_debug(this->name, 0, "failed to set 'volume-id'");
508
client = gf_client_get(this, &req->cred, client_uid, subdir_mount);
509
if (client == NULL) {
515
client->client_name = gf_strdup(client_name);
517
gf_msg_debug(this->name, 0, "Connected to %s", client->client_uid);
519
serv_ctx = server_ctx_get(client, client->this);
520
if (serv_ctx == NULL) {
521
gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_SERVER_CTX_GET_FAILED,
527
pthread_mutex_lock(&conf->mutex);
528
if (xl->cleanup_starting) {
529
cleanup_starting = _gf_true;
530
} else if (req->trans->xl_private != client) {
531
req->trans->xl_private = client;
533
pthread_mutex_unlock(&conf->mutex);
535
if (cleanup_starting) {
539
ret = dict_set_str(reply, "ERROR",
540
"cleanup flag is set for xlator. "
543
gf_msg_debug(this->name, 0,
544
"failed to set error: "
545
"cleanup flag is set for xlator. "
550
auth_set_username_passwd(params, config_params, client);
551
if (req->trans->ssl_name) {
552
if (dict_set_str(params, "ssl-name", req->trans->ssl_name) != 0) {
553
gf_msg(this->name, GF_LOG_WARNING, 0, PS_MSG_SSL_NAME_SET_FAILED,
556
req->trans->ssl_name);
557
/* Not fatal, auth will just fail. */
561
ret = dict_get_int32(params, "fops-version", &fop_version);
563
ret = dict_set_str(reply, "ERROR", "No FOP version number specified");
565
gf_msg_debug(this->name, 0,
566
"failed to set error "
570
ret = dict_get_int32(params, "mgmt-version", &mgmt_version);
572
ret = dict_set_str(reply, "ERROR", "No MGMT version number specified");
574
gf_msg_debug(this->name, 0,
575
"failed to set error "
579
ret = gf_compare_client_version(req, fop_version, mgmt_version);
581
ret = gf_asprintf(&msg,
582
"version mismatch: client(%d)"
583
" - client-mgmt(%d)",
584
fop_version, mgmt_version);
585
/* get_supported_version (req)); */
587
gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_ASPRINTF_FAILED,
588
"asprintf failed while"
589
"setting up error msg");
592
ret = dict_set_dynstr(reply, "ERROR", msg);
594
gf_msg_debug(this->name, 0,
595
"failed to set error "
603
peerinfo = &req->trans->peerinfo;
605
ret = dict_set_static_ptr(params, "peer-info", peerinfo);
607
gf_msg_debug(this->name, 0,
612
ret = dict_get_uint32(params, "opversion", &opversion);
614
gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_OPVERSION_GET_FAILED,
615
"Failed to get client opversion");
617
client->opversion = opversion;
618
/* Assign op-version value to the client */
619
pthread_mutex_lock(&conf->mutex);
620
list_for_each_entry(xprt, &conf->xprt_list, list)
622
if (strcmp(peerinfo->identifier, xprt->peerinfo.identifier))
624
xprt->peerinfo.max_op_version = opversion;
626
pthread_mutex_unlock(&conf->mutex);
628
if (conf->auth_modules == NULL) {
629
gf_msg(this->name, GF_LOG_ERROR, 0, PS_MSG_AUTH_INIT_FAILED,
630
"Authentication module not initialized");
633
ret = dict_get_str(params, "client-version", &clnt_version);
635
gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_VERSION_NOT_SET,
636
"client-version not set, may be of older version");
638
ret = gf_authenticate(params, config_params, conf->auth_modules);
640
if (ret == AUTH_ACCEPT) {
641
/* Store options received from client side */
642
req->trans->clnt_options = dict_ref(params);
644
gf_msg(this->name, GF_LOG_INFO, 0, PS_MSG_CLIENT_ACCEPTED,
645
"accepted client from %s (version: %s) with subvol %s",
646
client->client_uid, (clnt_version) ? clnt_version : "old", name);
648
gf_event(EVENT_CLIENT_CONNECT,
650
"client_identifier=%s;server_identifier=%s;"
651
"brick_path=%s;subdir_mount=%s",
652
client->client_uid, req->trans->peerinfo.identifier,
653
req->trans->myinfo.identifier, name, subdir_mount);
656
client->bound_xl = xl;
658
/* Don't be confused by the below line (like how ERROR can
659
be Success), key checked on client is 'ERROR' and hence
660
we send 'Success' in this key */
661
ret = dict_set_str(reply, "ERROR", "Success");
663
gf_msg_debug(this->name, 0,
664
"failed to set error "
668
if (!xlator_in_graph) {
669
gf_msg(this->name, GF_LOG_ERROR, ENOENT, PS_MSG_AUTHENTICATE_ERROR,
670
"Cannot authenticate client"
671
" from %s %s because brick is not attached in graph",
672
client->client_uid, (clnt_version) ? clnt_version : "old");
675
ret = dict_set_str(reply, "ERROR", "Brick not found");
677
gf_event(EVENT_CLIENT_AUTH_REJECT,
679
"client_identifier=%s;server_identifier=%s;"
681
client->client_uid, req->trans->peerinfo.identifier,
682
req->trans->myinfo.identifier, name);
683
gf_msg(this->name, GF_LOG_ERROR, EACCES, PS_MSG_AUTHENTICATE_ERROR,
684
"Cannot authenticate client"
686
client->client_uid, (clnt_version) ? clnt_version : "old");
689
ret = dict_set_str(reply, "ERROR", "Authentication failed");
692
gf_msg_debug(this->name, 0,
693
"failed to set error "
698
if (client->bound_xl == NULL) {
699
ret = dict_set_str(reply, "ERROR",
700
"Check volfile and handshake "
701
"options in protocol/client");
703
gf_msg_debug(this->name, 0,
704
"failed to set error "
712
LOCK(&conf->itable_lock);
714
if (client->bound_xl->itable == NULL) {
715
/* create inode table for this bound_xl, if one doesn't
718
gf_msg_trace(this->name, 0,
719
"creating inode table with"
720
" lru_limit=%" PRId32 ", xlator=%s",
721
conf->inode_lru_limit, client->bound_xl->name);
723
/* TODO: what is this ? */
724
client->bound_xl->itable = inode_table_new(conf->inode_lru_limit,
725
client->bound_xl, 0, 0);
728
UNLOCK(&conf->itable_lock);
730
ret = dict_set_str(reply, "process-uuid", this->ctx->process_uuid);
732
gf_msg_debug(this->name, 0, "failed to set 'process-uuid'");
734
/* Insert a dummy key value pair to avoid failure at client side for
735
* clnt-lk-version with older clients.
737
ret = dict_set_uint32(reply, "clnt-lk-version", 0);
739
gf_msg(this->name, GF_LOG_WARNING, 0, PS_MSG_CLIENT_LK_VERSION_ERROR,
741
"'clnt-lk-version'");
744
ret = dict_set_uint64(reply, "transport-ptr", ((uint64_t)(long)req->trans));
746
gf_msg_debug(this->name, 0, "failed to set 'transport-ptr'");
749
/* It is important to validate the lookup on '/' as part of handshake,
750
because if lookup itself can't succeed, we should communicate this
751
to client. Very important in case of subdirectory mounts, where if
752
client is trying to mount a non-existing directory */
753
if (op_ret >= 0 && client->bound_xl->itable) {
754
if (client->bound_xl->cleanup_starting) {
757
ret = dict_set_str(reply, "ERROR",
758
"cleanup flag is set for xlator "
759
"before call first_lookup Try again later");
760
/* quisce coverity about UNUSED_VALUE ret */
763
op_ret = server_first_lookup(this, client, reply);
769
rsp = GF_CALLOC(1, sizeof(gf_setvolume_rsp), gf_server_mt_setvolume_rsp_t);
774
ret = dict_allocate_and_serialize(reply, (char **)&rsp->dict.dict_val,
775
&rsp->dict.dict_len);
778
gf_msg_debug("server-handshake", 0, "failed to serialize reply dict");
783
rsp->op_ret = op_ret;
784
rsp->op_errno = gf_errno_to_error(op_errno);
786
/* if bound_xl is NULL or something fails, then put the connection
787
* back. Otherwise the connection would have been added to the
788
* list of connections the server is maintaining and might segfault
789
* during statedump when bound_xl of the connection is accessed.
791
if (op_ret && !xl && (client != NULL)) {
792
/* We would have set the xl_private of the transport to the
793
* @conn. But if we have put the connection i.e shutting down
794
* the connection, then we should set xl_private to NULL as it
795
* would be pointing to a freed memory and would segfault when
796
* accessed upon getting DISCONNECT.
798
gf_client_put(client, NULL);
799
req->trans->xl_private = NULL;
802
/* Send the response properly */
803
server_first_lookup_done(req, rsp);
805
free(args.dict.dict_val);
811
* This might be null if we couldn't even find the translator
812
* (brick) to copy it from.
814
dict_unref(config_params);
821
server_ping(rpcsvc_request_t *req)
823
gf_common_rsp rsp = {
830
server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
831
(xdrproc_t)xdr_gf_common_rsp);
837
server_set_lk_version(rpcsvc_request_t *req)
840
gf_set_lk_ver_req args = {
843
gf_set_lk_ver_rsp rsp = {
847
ret = xdr_to_generic(req->msg[0], &args, (xdrproc_t)xdr_gf_set_lk_ver_req);
849
/* failed to decode msg */
850
req->rpc_err = GARBAGE_ARGS;
854
rsp.lk_ver = args.lk_ver;
856
server_submit_reply(NULL, req, &rsp, NULL, 0, NULL,
857
(xdrproc_t)xdr_gf_set_lk_ver_rsp);
864
static rpcsvc_actor_t gluster_handshake_actors[GF_HNDSK_MAXVALUE] = {
865
[GF_HNDSK_NULL] = {"NULL", server_null, NULL, GF_HNDSK_NULL, DRC_NA, 0},
866
[GF_HNDSK_SETVOLUME] = {"SETVOLUME", server_setvolume, NULL,
867
GF_HNDSK_SETVOLUME, DRC_NA, 0},
868
[GF_HNDSK_GETSPEC] = {"GETSPEC", server_getspec, NULL, GF_HNDSK_GETSPEC,
870
[GF_HNDSK_PING] = {"PING", server_ping, NULL, GF_HNDSK_PING, DRC_NA, 0},
871
[GF_HNDSK_SET_LK_VER] = {"SET_LK_VER", server_set_lk_version, NULL,
872
GF_HNDSK_SET_LK_VER, DRC_NA, 0},
875
struct rpcsvc_program gluster_handshake_prog = {
876
.progname = "GlusterFS Handshake",
877
.prognum = GLUSTER_HNDSK_PROGRAM,
878
.progver = GLUSTER_HNDSK_VERSION,
879
.actors = gluster_handshake_actors,
880
.numactors = GF_HNDSK_MAXVALUE,