glusterfs

Форк
0
/
server-helpers.c 
1251 строка · 32.4 Кб
1
/*
2
  Copyright (c) 2010-2013 Red Hat, Inc. <http://www.redhat.com>
3
  This file is part of GlusterFS.
4

5
  This file is licensed to you under your choice of the GNU Lesser
6
  General Public License, version 3 or any later version (LGPLv3 or
7
  later), or the GNU General Public License, version 2 (GPLv2), in all
8
  cases as published by the Free Software Foundation.
9
*/
10

11
#include "server.h"
12
#include "server-helpers.h"
13
#include "server-messages.h"
14
#include <glusterfs/syscall.h>
15
#include <glusterfs/default-args.h>
16
#include "server-common.h"
17

18
#include <fnmatch.h>
19
#include <pwd.h>
20

21
/* based on nfs_fix_aux_groups() */
22
static int
23
gid_resolve(server_conf_t *conf, call_stack_t *root)
24
{
25
    int ret = 0;
26
    struct passwd mypw;
27
    char mystrs[1024];
28
    struct passwd *result;
29
    gid_t *mygroups = NULL;
30
    gid_list_t gl;
31
    int ngroups;
32
    const gid_list_t *agl;
33

34
    agl = gid_cache_lookup(&conf->gid_cache, root->uid, 0, 0);
35
    if (agl) {
36
        root->ngrps = agl->gl_count;
37

38
        if (root->ngrps > 0) {
39
            ret = call_stack_alloc_groups(root, agl->gl_count);
40
            if (ret == 0) {
41
                memcpy(root->groups, agl->gl_list,
42
                       sizeof(gid_t) * agl->gl_count);
43
            }
44
        }
45

46
        gid_cache_release(&conf->gid_cache, agl);
47

48
        return ret;
49
    }
50

51
    ret = getpwuid_r(root->uid, &mypw, mystrs, sizeof(mystrs), &result);
52
    if (ret != 0) {
53
        gf_smsg("gid-cache", GF_LOG_ERROR, errno, PS_MSG_GET_UID_FAILED,
54
                "uid=%u", root->uid, NULL);
55
        return -1;
56
    }
57

58
    if (!result) {
59
        gf_smsg("gid-cache", GF_LOG_ERROR, 0, PS_MSG_UID_NOT_FOUND, "uid=%u",
60
                root->uid, NULL);
61
        return -1;
62
    }
63

64
    gf_msg_trace("gid-cache", 0, "mapped %u => %s", root->uid, result->pw_name);
65

66
    ngroups = gf_getgrouplist(result->pw_name, root->gid, &mygroups);
67
    if (ngroups == -1) {
68
        gf_smsg("gid-cache", GF_LOG_ERROR, 0, PS_MSG_MAPPING_ERROR,
69
                "pw_name=%s", result->pw_name, "root->ngtps=%d", root->ngrps,
70
                NULL);
71
        return -1;
72
    }
73
    root->ngrps = (uint16_t)ngroups;
74

75
    /* setup a full gid_list_t to add it to the gid_cache */
76
    gl.gl_id = root->uid;
77
    gl.gl_uid = root->uid;
78
    gl.gl_gid = root->gid;
79
    gl.gl_count = root->ngrps;
80

81
    gl.gl_list = GF_MALLOC(root->ngrps * sizeof(gid_t), gf_common_mt_groups_t);
82
    if (gl.gl_list)
83
        memcpy(gl.gl_list, mygroups, sizeof(gid_t) * root->ngrps);
84
    else {
85
        GF_FREE(mygroups);
86
        return -1;
87
    }
88

89
    if (root->ngrps > 0) {
90
        call_stack_set_groups(root, root->ngrps, &mygroups);
91
    }
92

93
    if (gid_cache_add(&conf->gid_cache, &gl) != 1)
94
        GF_FREE(gl.gl_list);
95

96
    return ret;
97
}
98

99
static int
100
server_decode_groups(call_frame_t *frame, rpcsvc_request_t *req)
101
{
102
    int i = 0;
103

104
    if (call_stack_alloc_groups(frame->root, req->auxgidcount) != 0)
105
        return -1;
106

107
    frame->root->ngrps = req->auxgidcount;
108
    if (frame->root->ngrps == 0)
109
        return 0;
110

111
    /* ngrps cannot be bigger than USHRT_MAX(65535) */
112
    if (frame->root->ngrps > GF_MAX_AUX_GROUPS)
113
        return -1;
114

115
    for (; i < frame->root->ngrps; ++i)
116
        frame->root->groups[i] = req->auxgids[i];
117

118
    return 0;
119
}
120

121
void
122
server_resolve_wipe(server_resolve_t *resolve)
123
{
124
    GF_FREE((void *)resolve->path);
125

126
    GF_FREE((void *)resolve->bname);
127

128
    loc_wipe(&resolve->resolve_loc);
129
}
130

131
void
132
free_state(server_state_t *state)
133
{
134
    if (state->fd) {
135
        fd_unref(state->fd);
136
        state->fd = NULL;
137
    }
138

139
    if (state->params) {
140
        dict_unref(state->params);
141
        state->params = NULL;
142
    }
143

144
    if (state->iobref) {
145
        iobref_unref(state->iobref);
146
        state->iobref = NULL;
147
    }
148

149
    if (state->dict) {
150
        dict_unref(state->dict);
151
        state->dict = NULL;
152
    }
153

154
    if (state->xdata) {
155
        dict_unref(state->xdata);
156
        state->xdata = NULL;
157
    }
158

159
    GF_FREE((void *)state->volume);
160

161
    GF_FREE((void *)state->name);
162

163
    loc_wipe(&state->loc);
164
    loc_wipe(&state->loc2);
165

166
    server_resolve_wipe(&state->resolve);
167
    server_resolve_wipe(&state->resolve2);
168

169
    /* Call rpc_trnasport_unref to avoid crashes at last after free
170
       all resources because of server_rpc_notify (for transport destroy)
171
       call's xlator_mem_cleanup if all xprt are destroyed that internally
172
       call's inode_table_destroy.
173
    */
174
    if (state->xprt) {
175
        rpc_transport_unref(state->xprt);
176
        state->xprt = NULL;
177
    }
178

179
    GF_FREE(state);
180
}
181

182
static int
183
server_connection_cleanup_flush_cbk(call_frame_t *frame, void *cookie,
184
                                    xlator_t *this, int32_t op_ret,
185
                                    int32_t op_errno, dict_t *xdata)
186
{
187
    int32_t ret = -1;
188
    fd_t *fd = NULL;
189
    client_t *client = NULL;
190
    uint64_t fd_cnt = 0;
191
    xlator_t *victim = NULL;
192
    server_conf_t *conf = NULL;
193
    xlator_t *serv_xl = NULL;
194
    rpc_transport_t *xprt = NULL;
195
    rpc_transport_t *xp_next = NULL;
196
    int32_t detach = (long)cookie;
197
    gf_boolean_t xprt_found = _gf_false;
198

199
    GF_VALIDATE_OR_GOTO("server", this, out);
200
    GF_VALIDATE_OR_GOTO("server", frame, out);
201

202
    fd = frame->local;
203
    client = frame->root->client;
204
    serv_xl = frame->this;
205
    conf = serv_xl->private;
206

207
    fd_unref(fd);
208
    frame->local = NULL;
209

210
    if (client)
211
        victim = client->bound_xl;
212

213
    if (victim) {
214
        fd_cnt = GF_ATOMIC_DEC(client->fd_cnt);
215
        if (!fd_cnt && conf && detach) {
216
            pthread_mutex_lock(&conf->mutex);
217
            {
218
                list_for_each_entry_safe(xprt, xp_next, &conf->xprt_list, list)
219
                {
220
                    if (!xprt->xl_private)
221
                        continue;
222
                    if (xprt->xl_private == client) {
223
                        xprt_found = _gf_true;
224
                        break;
225
                    }
226
                }
227
            }
228
            pthread_mutex_unlock(&conf->mutex);
229
            if (xprt_found) {
230
                rpc_transport_unref(xprt);
231
            }
232
        }
233
    }
234

235
    gf_client_unref(client);
236
    STACK_DESTROY(frame->root);
237

238
    ret = 0;
239
out:
240
    return ret;
241
}
242

243
static int
244
do_fd_cleanup(xlator_t *this, client_t *client, fdentry_t *fdentries,
245
              int fd_count, int32_t detach)
246
{
247
    fd_t *fd = NULL;
248
    int i = 0, ret = -1;
249
    call_frame_t *tmp_frame = NULL;
250
    xlator_t *bound_xl = NULL;
251
    char *path = NULL;
252

253
    GF_VALIDATE_OR_GOTO("server", this, out);
254
    GF_VALIDATE_OR_GOTO("server", fdentries, out);
255

256
    bound_xl = client->bound_xl;
257

258
    for (i = 0; i < fd_count; i++) {
259
        fd = fdentries[i].fd;
260

261
        if (fd != NULL) {
262
            tmp_frame = create_frame(this, this->ctx->pool);
263
            if (tmp_frame == NULL) {
264
                goto out;
265
            }
266

267
            tmp_frame->root->type = GF_OP_TYPE_FOP;
268
            GF_ASSERT(fd->inode);
269

270
            ret = inode_path(fd->inode, NULL, &path);
271

272
            if (ret > 0) {
273
                gf_smsg(this->name, GF_LOG_INFO, 0, PS_MSG_FD_CLEANUP,
274
                        "path=%s", path, NULL);
275
                GF_FREE(path);
276
            } else {
277
                gf_smsg(this->name, GF_LOG_INFO, 0, PS_MSG_FD_CLEANUP,
278
                        "inode-gfid=%s", uuid_utoa(fd->inode->gfid), NULL);
279
            }
280

281
            tmp_frame->local = fd;
282
            tmp_frame->root->pid = 0;
283
            gf_client_ref(client);
284
            tmp_frame->root->client = client;
285
            memset(&tmp_frame->root->lk_owner, 0, sizeof(gf_lkowner_t));
286

287
            STACK_WIND_COOKIE(tmp_frame, server_connection_cleanup_flush_cbk,
288
                              (void *)(long)detach, bound_xl,
289
                              bound_xl->fops->flush, fd, NULL);
290
        }
291
    }
292

293
    GF_FREE(fdentries);
294
    ret = 0;
295

296
out:
297
    return ret;
298
}
299

300
int
301
server_connection_cleanup(xlator_t *this, client_t *client, int32_t flags,
302
                          gf_boolean_t *fd_exist)
303
{
304
    server_ctx_t *serv_ctx = NULL;
305
    fdentry_t *fdentries = NULL;
306
    uint32_t fd_count = 0;
307
    int cd_ret = 0;
308
    int ret = 0;
309
    xlator_t *bound_xl = NULL;
310
    int i = 0;
311
    fd_t *fd = NULL;
312
    uint64_t fd_cnt = 0;
313
    int32_t detach = 0;
314

315
    GF_VALIDATE_OR_GOTO("server", this, out);
316
    GF_VALIDATE_OR_GOTO(this->name, client, out);
317
    GF_VALIDATE_OR_GOTO(this->name, flags, out);
318

319
    serv_ctx = server_ctx_get(client, client->this);
320

321
    if (serv_ctx == NULL) {
322
        gf_smsg(this->name, GF_LOG_INFO, 0, PS_MSG_SERVER_CTX_GET_FAILED, NULL);
323
        goto out;
324
    }
325

326
    LOCK(&serv_ctx->fdtable_lock);
327
    {
328
        if (serv_ctx->fdtable && (flags & POSIX_LOCKS))
329
            fdentries = gf_fd_fdtable_get_all_fds(serv_ctx->fdtable, &fd_count);
330
    }
331
    UNLOCK(&serv_ctx->fdtable_lock);
332

333
    if (client->bound_xl == NULL)
334
        goto out;
335

336
    if (flags & INTERNAL_LOCKS) {
337
        cd_ret = gf_client_disconnect(client);
338
    }
339

340
    if (fdentries != NULL) {
341
        /* Loop to configure fd_count on victim brick */
342
        bound_xl = client->bound_xl;
343
        if (bound_xl) {
344
            for (i = 0; i < fd_count; i++) {
345
                fd = fdentries[i].fd;
346
                if (!fd)
347
                    continue;
348
                fd_cnt++;
349
            }
350
            if (fd_cnt) {
351
                if (fd_exist)
352
                    (*fd_exist) = _gf_true;
353
                GF_ATOMIC_ADD(client->fd_cnt, fd_cnt);
354
            }
355
        }
356

357
        /* If fd_exist is not NULL it means function is invoke
358
           by server_rpc_notify at the time of getting DISCONNECT
359
           notification
360
        */
361
        if (fd_exist)
362
            detach = 1;
363

364
        gf_msg_debug(this->name, 0,
365
                     "Performing cleanup on %d "
366
                     "fdentries",
367
                     fd_count);
368
        ret = do_fd_cleanup(this, client, fdentries, fd_count, detach);
369
    } else
370
        gf_smsg(this->name, GF_LOG_INFO, 0, PS_MSG_FDENTRY_NULL, NULL);
371

372
    if (cd_ret || ret)
373
        ret = -1;
374

375
out:
376
    return ret;
377
}
378

379
static call_frame_t *
380
server_alloc_frame(rpcsvc_request_t *req, client_t *client)
381
{
382
    call_frame_t *frame = NULL;
383
    server_state_t *state = NULL;
384

385
    frame = create_frame(client->this, req->svc->ctx->pool);
386
    if (!frame)
387
        goto out;
388

389
    state = GF_CALLOC(1, sizeof(*state), gf_server_mt_state_t);
390
    if (caa_unlikely(!state)) {
391
        STACK_DESTROY(frame->root);
392
        frame = NULL;
393
        goto out;
394
    }
395

396
    if (client->bound_xl)
397
        state->itable = client->bound_xl->itable;
398

399
    state->xprt = rpc_transport_ref(req->trans);
400
    state->resolve.fd_no = -1;
401
    state->resolve2.fd_no = -1;
402

403
    frame->root->state = state; /* which socket */
404
    frame->root->type = GF_OP_TYPE_FOP;
405

406
    frame->this = client->this;
407
out:
408
    return frame;
409
}
410

411
call_frame_t *
412
get_frame_from_request(rpcsvc_request_t *req)
413
{
414
    call_frame_t *frame = NULL;
415
    client_t *client = NULL;
416
    client_t *tmp_client = NULL;
417
    xlator_t *this = NULL;
418
    server_conf_t *priv = NULL;
419
    clienttable_t *clienttable = NULL;
420
    unsigned int i = 0;
421
    rpc_transport_t *trans = NULL;
422
    server_state_t *state = NULL;
423

424
    GF_VALIDATE_OR_GOTO("server", req, out);
425
    trans = req->trans;
426
    GF_VALIDATE_OR_GOTO("server", trans, out);
427
    GF_VALIDATE_OR_GOTO("server", req->svc, out);
428
    GF_VALIDATE_OR_GOTO("server", req->svc->ctx, out);
429

430
    client = trans->xl_private;
431
    GF_VALIDATE_OR_GOTO("server", client, out);
432

433
    frame = server_alloc_frame(req, client);
434
    if (!frame)
435
        goto out;
436

437
    frame->root->op = req->procnum;
438

439
    this = trans->xl;
440
    priv = this->private;
441
    clienttable = this->ctx->clienttable;
442

443
    for (i = 0; i < clienttable->max_clients; i++) {
444
        tmp_client = clienttable->cliententries[i].client;
445
        if (client == tmp_client) {
446
            /* For nfs clients the server processes will be running
447
               within the trusted storage pool machines. So if we
448
               do not do root-squashing and all-squashing for nfs
449
               servers, thinking that its a trusted client, then
450
               root-squashing and all-squashing won't work for nfs
451
               clients.
452
            */
453
            if (req->pid == NFS_PID) {
454
                RPC_AUTH_ROOT_SQUASH(req);
455
                RPC_AUTH_ALL_SQUASH(req);
456
                goto after_squash;
457
            }
458
            /* for non trusted clients username and password
459
               would not have been set. So for non trusted clients
460
               (i.e clients not from the same machine as the brick,
461
               and clients from outside the storage pool)
462
               do the root-squashing and all-squashing.
463
               TODO: If any client within the storage pool (i.e
464
               mounting within a machine from the pool but using
465
               other machine's ip/hostname from the same pool)
466
               is present treat it as a trusted client
467
            */
468
            else if (!client->auth.username) {
469
                RPC_AUTH_ROOT_SQUASH(req);
470
                RPC_AUTH_ALL_SQUASH(req);
471
                goto after_squash;
472
            }
473

474
            /* Problem: If we just check whether the client is
475
               trusted client and do not do root squashing and
476
               all squashing for them, then for smb clients and
477
               UFO clients root squashing and all squashing will
478
               never happen as they use the fuse mounts done within
479
               the trusted pool (i.e they are trusted clients).
480
               Solution: To fix it, do root squashing and all squashing
481
               for trusted clients also. If one wants to have a client
482
               within the storage pool for which root-squashing does
483
               not happen, then the client has to be mounted with
484
               --no-root-squash option. But for defrag client and
485
               gsyncd client do not do root-squashing and all-squashing.
486
            */
487
            else if (req->pid != GF_CLIENT_PID_NO_ROOT_SQUASH &&
488
                     req->pid != GF_CLIENT_PID_GSYNCD &&
489
                     req->pid != GF_CLIENT_PID_DEFRAG &&
490
                     req->pid != GF_CLIENT_PID_SELF_HEALD &&
491
                     req->pid != GF_CLIENT_PID_QUOTA_MOUNT) {
492
                RPC_AUTH_ROOT_SQUASH(req);
493
                RPC_AUTH_ALL_SQUASH(req);
494
                goto after_squash;
495
            }
496
        }
497
    }
498

499
after_squash:
500
    /* Add a ref for this fop */
501
    gf_client_ref(client);
502

503
    frame->root->uid = req->uid;
504
    frame->root->gid = req->gid;
505
    frame->root->pid = req->pid;
506
    frame->root->client = client;
507
    lk_owner_copy(&frame->root->lk_owner, &req->lk_owner);
508

509
    if (priv->server_manage_gids)
510
        gid_resolve(priv, frame->root);
511
    else
512
        server_decode_groups(frame, req);
513

514
    memcpy(&frame->root->identifier, trans->peerinfo.identifier, UNIX_PATH_MAX);
515

516
    /* more fields, for the clients which are 3.x series this will be 0 */
517
    frame->root->flags = req->flags;
518
    frame->root->ctime = req->ctime;
519

520
    frame->local = req;
521

522
    state = CALL_STATE(frame);
523
    state->client = client;
524
out:
525
    return frame;
526
}
527

528
int
529
server_build_config(xlator_t *this, server_conf_t *conf)
530
{
531
    data_t *data = NULL;
532
    int ret = -1;
533
    struct stat buf = {
534
        0,
535
    };
536

537
    GF_VALIDATE_OR_GOTO("server", this, out);
538
    GF_VALIDATE_OR_GOTO("server", conf, out);
539

540
    ret = dict_get_int32(this->options, "inode-lru-limit",
541
                         &conf->inode_lru_limit);
542
    if (ret < 0) {
543
        conf->inode_lru_limit = 16384;
544
    }
545

546
    data = dict_get(this->options, "trace");
547
    if (data) {
548
        ret = gf_string2boolean(data->data, &conf->trace);
549
        if (ret != 0) {
550
            gf_smsg(this->name, GF_LOG_WARNING, EINVAL, PS_MSG_INVALID_ENTRY,
551
                    NULL);
552
        }
553
    }
554

555
    data = dict_get(this->options, "config-directory");
556
    if (data) {
557
        /* Check whether the specified directory exists,
558
           or directory specified is non standard */
559
        ret = sys_stat(data->data, &buf);
560
        if ((ret != 0) || !S_ISDIR(buf.st_mode)) {
561
            gf_smsg(this->name, GF_LOG_ERROR, 0, PS_MSG_DIR_NOT_FOUND,
562
                    "data=%s", data->data, NULL);
563
            ret = -1;
564
            goto out;
565
        }
566
        /* Make sure that conf-dir doesn't contain ".." in path */
567
        if ((gf_strstr(data->data, "/", "..")) == -1) {
568
            ret = -1;
569
            gf_smsg(this->name, GF_LOG_ERROR, 0, PS_MSG_CONF_DIR_INVALID,
570
                    "data=%s", data->data, NULL);
571
            goto out;
572
        }
573

574
        conf->conf_dir = gf_strdup(data->data);
575
    }
576
    ret = 0;
577
out:
578
    return ret;
579
}
580

581
void
582
print_caller(char *str, int size, call_frame_t *frame)
583
{
584
    server_state_t *state = NULL;
585

586
    GF_VALIDATE_OR_GOTO("server", str, out);
587
    GF_VALIDATE_OR_GOTO("server", frame, out);
588

589
    state = CALL_STATE(frame);
590

591
    snprintf(str, size, " Callid=%" PRId64 ", Client=%s", frame->root->unique,
592
             state->xprt->peerinfo.identifier);
593

594
out:
595
    return;
596
}
597

598
void
599
server_print_resolve(char *str, int size, server_resolve_t *resolve)
600
{
601
    int filled = 0;
602

603
    GF_VALIDATE_OR_GOTO("server", str, out);
604

605
    if (!resolve) {
606
        snprintf(str, size, "<nul>");
607
        return;
608
    }
609

610
    filled += snprintf(str + filled, size - filled, " Resolve={");
611
    if (resolve->fd_no != -1)
612
        filled += snprintf(str + filled, size - filled, "fd=%" PRId64 ",",
613
                           (uint64_t)resolve->fd_no);
614
    if (resolve->bname)
615
        filled += snprintf(str + filled, size - filled, "bname=%s,",
616
                           resolve->bname);
617
    if (resolve->path)
618
        filled += snprintf(str + filled, size - filled, "path=%s",
619
                           resolve->path);
620

621
    snprintf(str + filled, size - filled, "}");
622
out:
623
    return;
624
}
625

626
void
627
server_print_loc(char *str, int size, loc_t *loc)
628
{
629
    int filled = 0;
630

631
    GF_VALIDATE_OR_GOTO("server", str, out);
632

633
    if (!loc) {
634
        snprintf(str, size, "<nul>");
635
        return;
636
    }
637

638
    filled += snprintf(str + filled, size - filled, " Loc={");
639

640
    if (loc->path)
641
        filled += snprintf(str + filled, size - filled, "path=%s,", loc->path);
642
    if (loc->inode)
643
        filled += snprintf(str + filled, size - filled, "inode=%p,",
644
                           loc->inode);
645
    if (loc->parent)
646
        filled += snprintf(str + filled, size - filled, "parent=%p",
647
                           loc->parent);
648

649
    snprintf(str + filled, size - filled, "}");
650
out:
651
    return;
652
}
653

654
void
655
server_print_params(char *str, int size, server_state_t *state)
656
{
657
    int filled = 0;
658

659
    GF_VALIDATE_OR_GOTO("server", str, out);
660

661
    filled += snprintf(str + filled, size - filled, " Params={");
662

663
    if (state->fd)
664
        filled += snprintf(str + filled, size - filled, "fd=%p,", state->fd);
665
    if (state->valid)
666
        filled += snprintf(str + filled, size - filled, "valid=%d,",
667
                           state->valid);
668
    if (state->flags)
669
        filled += snprintf(str + filled, size - filled, "flags=%d,",
670
                           state->flags);
671
    if (state->size)
672
        filled += snprintf(str + filled, size - filled, "size=%zu,",
673
                           state->size);
674
    if (state->offset)
675
        filled += snprintf(str + filled, size - filled, "offset=%" PRId64 ",",
676
                           state->offset);
677
    if (state->cmd)
678
        filled += snprintf(str + filled, size - filled, "cmd=%d,", state->cmd);
679
    if (state->type)
680
        filled += snprintf(str + filled, size - filled, "type=%d,",
681
                           state->type);
682
    if (state->name)
683
        filled += snprintf(str + filled, size - filled, "name=%s,",
684
                           state->name);
685
    if (state->mask)
686
        filled += snprintf(str + filled, size - filled, "mask=%d,",
687
                           state->mask);
688
    if (state->volume)
689
        filled += snprintf(str + filled, size - filled, "volume=%s,",
690
                           state->volume);
691

692
/* FIXME
693
        snprintf (str + filled, size - filled,
694
                  "bound_xl=%s}", state->client->bound_xl->name);
695
*/
696
out:
697
    return;
698
}
699

700
int
701
server_resolve_is_empty(server_resolve_t *resolve)
702
{
703
    if (resolve->fd_no != -1)
704
        return 0;
705

706
    if (resolve->path != 0)
707
        return 0;
708

709
    if (resolve->bname != 0)
710
        return 0;
711

712
    return 1;
713
}
714

715
void
716
server_print_reply(call_frame_t *frame, int op_ret, int op_errno)
717
{
718
    server_conf_t *conf = NULL;
719
    server_state_t *state = NULL;
720
    xlator_t *this = NULL;
721
    char caller[512];
722
    char fdstr[32];
723
    char *op = "UNKNOWN";
724

725
    GF_VALIDATE_OR_GOTO("server", frame, out);
726

727
    this = frame->this;
728
    conf = this->private;
729

730
    GF_VALIDATE_OR_GOTO("server", conf, out);
731
    GF_VALIDATE_OR_GOTO("server", conf->trace, out);
732

733
    state = CALL_STATE(frame);
734

735
    print_caller(caller, 256, frame);
736

737
    switch (frame->root->type) {
738
        case GF_OP_TYPE_FOP:
739
            op = (char *)gf_fop_list[frame->root->op];
740
            break;
741
        default:
742
            op = "";
743
    }
744

745
    fdstr[0] = '\0';
746
    if (state->fd)
747
        snprintf(fdstr, 32, " fd=%p", state->fd);
748

749
    gf_smsg(this->name, GF_LOG_INFO, op_errno, PS_MSG_SERVER_MSG, "op=%s", op,
750
            "caller=%s", caller, "op_ret=%d", op_ret, "op_errno=%d", op_errno,
751
            "fdstr=%s", fdstr, NULL);
752
out:
753
    return;
754
}
755

756
void
757
server_print_request(call_frame_t *frame)
758
{
759
    server_conf_t *conf = NULL;
760
    xlator_t *this = NULL;
761
    server_state_t *state = NULL;
762
    char *op = "UNKNOWN";
763
    char resolve_vars[256];
764
    char resolve2_vars[256];
765
    char loc_vars[256];
766
    char loc2_vars[256];
767
    char other_vars[512];
768
    char caller[512];
769

770
    GF_VALIDATE_OR_GOTO("server", frame, out);
771

772
    this = frame->this;
773
    conf = this->private;
774

775
    GF_VALIDATE_OR_GOTO("server", conf, out);
776

777
    if (!conf->trace)
778
        goto out;
779

780
    state = CALL_STATE(frame);
781

782
    memset(resolve_vars, '\0', 256);
783
    memset(resolve2_vars, '\0', 256);
784
    memset(loc_vars, '\0', 256);
785
    memset(loc2_vars, '\0', 256);
786
    memset(other_vars, '\0', 256);
787

788
    print_caller(caller, 256, frame);
789

790
    if (!server_resolve_is_empty(&state->resolve)) {
791
        server_print_resolve(resolve_vars, 256, &state->resolve);
792
        server_print_loc(loc_vars, 256, &state->loc);
793
    }
794

795
    if (!server_resolve_is_empty(&state->resolve2)) {
796
        server_print_resolve(resolve2_vars, 256, &state->resolve2);
797
        server_print_loc(loc2_vars, 256, &state->loc2);
798
    }
799

800
    server_print_params(other_vars, 512, state);
801

802
    switch (frame->root->type) {
803
        case GF_OP_TYPE_FOP:
804
            op = (char *)gf_fop_list[frame->root->op];
805
            break;
806
        default:
807
            op = "";
808
            break;
809
    }
810

811
    gf_smsg(this->name, GF_LOG_INFO, 0, PS_MSG_SERVER_MSG, "op=%s", op,
812
            "caller=%s", caller, "resolve_vars=%s", resolve_vars, "loc_vars=%s",
813
            loc_vars, "resolve2_vars=%s", resolve2_vars, "loc2_vars=%s",
814
            loc2_vars, "other_vars=%s", other_vars, NULL);
815
out:
816
    return;
817
}
818

819
int
820
serialize_rsp_direntp_v2(gf_dirent_t *entries, gfx_readdirp_rsp *rsp)
821
{
822
    gf_dirent_t *entry = NULL;
823
    gfx_dirplist *trav = NULL;
824
    gfx_dirplist *prev = NULL;
825
    int ret = -1;
826

827
    GF_VALIDATE_OR_GOTO("server", entries, out);
828
    GF_VALIDATE_OR_GOTO("server", rsp, out);
829

830
    list_for_each_entry(entry, &entries->list, list)
831
    {
832
        trav = GF_CALLOC(1, sizeof(*trav), gf_server_mt_dirent_rsp_t);
833
        if (!trav)
834
            goto out;
835

836
        trav->d_ino = entry->d_ino;
837
        trav->d_off = entry->d_off;
838
        trav->d_len = entry->d_len;
839
        trav->d_type = entry->d_type;
840
        trav->name = entry->d_name;
841

842
        gfx_stat_from_iattx(&trav->stat, &entry->d_stat);
843
        dict_to_xdr(entry->dict, &trav->dict);
844

845
        if (prev)
846
            prev->nextentry = trav;
847
        else
848
            rsp->reply = trav;
849

850
        prev = trav;
851
        trav = NULL;
852
    }
853

854
    ret = 0;
855
out:
856
    GF_FREE(trav);
857

858
    return ret;
859
}
860

861
int
862
serialize_rsp_dirent_v2(gf_dirent_t *entries, gfx_readdir_rsp *rsp)
863
{
864
    gf_dirent_t *entry = NULL;
865
    gfx_dirlist *trav = NULL;
866
    gfx_dirlist *prev = NULL;
867
    int ret = -1;
868

869
    GF_VALIDATE_OR_GOTO("server", rsp, out);
870
    GF_VALIDATE_OR_GOTO("server", entries, out);
871

872
    list_for_each_entry(entry, &entries->list, list)
873
    {
874
        trav = GF_CALLOC(1, sizeof(*trav), gf_server_mt_dirent_rsp_t);
875
        if (!trav)
876
            goto out;
877
        trav->d_ino = entry->d_ino;
878
        trav->d_off = entry->d_off;
879
        trav->d_len = entry->d_len;
880
        trav->d_type = entry->d_type;
881
        trav->name = entry->d_name;
882
        if (prev)
883
            prev->nextentry = trav;
884
        else
885
            rsp->reply = trav;
886

887
        prev = trav;
888
    }
889

890
    ret = 0;
891
out:
892
    return ret;
893
}
894

895
int
896
readdir_rsp_cleanup_v2(gfx_readdir_rsp *rsp)
897
{
898
    gfx_dirlist *prev = NULL;
899
    gfx_dirlist *trav = NULL;
900

901
    trav = rsp->reply;
902
    prev = trav;
903
    while (trav) {
904
        trav = trav->nextentry;
905
        GF_FREE(prev);
906
        prev = trav;
907
    }
908

909
    return 0;
910
}
911

912
int
913
readdirp_rsp_cleanup_v2(gfx_readdirp_rsp *rsp)
914
{
915
    gfx_dirplist *prev = NULL;
916
    gfx_dirplist *trav = NULL;
917

918
    trav = rsp->reply;
919
    prev = trav;
920
    while (trav) {
921
        trav = trav->nextentry;
922
        GF_FREE(prev->dict.pairs.pairs_val);
923
        GF_FREE(prev);
924
        prev = trav;
925
    }
926

927
    return 0;
928
}
929

930
static int
931
common_rsp_locklist(lock_migration_info_t *locklist, gfs3_locklist **reply)
932
{
933
    lock_migration_info_t *tmp = NULL;
934
    gfs3_locklist *trav = NULL;
935
    gfs3_locklist *prev = NULL;
936
    int ret = -1;
937

938
    GF_VALIDATE_OR_GOTO("server", locklist, out);
939

940
    list_for_each_entry(tmp, &locklist->list, list)
941
    {
942
        /* TODO: move to GF_MALLOC() */
943
        trav = GF_CALLOC(1, sizeof(*trav), gf_server_mt_lock_mig_t);
944
        if (!trav)
945
            goto out;
946

947
        switch (tmp->flock.l_type) {
948
            case F_RDLCK:
949
                tmp->flock.l_type = GF_LK_F_RDLCK;
950
                break;
951
            case F_WRLCK:
952
                tmp->flock.l_type = GF_LK_F_WRLCK;
953
                break;
954
            case F_UNLCK:
955
                tmp->flock.l_type = GF_LK_F_UNLCK;
956
                break;
957

958
            default:
959
                gf_smsg(THIS->name, GF_LOG_ERROR, 0, PS_MSG_LOCK_ERROR,
960
                        "lock_type=%" PRId32, tmp->flock.l_type, NULL);
961
                break;
962
        }
963

964
        gf_proto_flock_from_flock(&trav->flock, &tmp->flock);
965

966
        trav->lk_flags = tmp->lk_flags;
967

968
        trav->client_uid = tmp->client_uid;
969

970
        if (prev)
971
            prev->nextentry = trav;
972
        else
973
            *reply = trav;
974

975
        prev = trav;
976
        trav = NULL;
977
    }
978

979
    ret = 0;
980
out:
981
    GF_FREE(trav);
982
    return ret;
983
}
984

985
int
986
serialize_rsp_locklist_v2(lock_migration_info_t *locklist,
987
                          gfx_getactivelk_rsp *rsp)
988
{
989
    int ret = 0;
990

991
    GF_VALIDATE_OR_GOTO("server", rsp, out);
992
    ret = common_rsp_locklist(locklist, &rsp->reply);
993
out:
994
    return ret;
995
}
996

997
int
998
getactivelkinfo_rsp_cleanup_v2(gfx_getactivelk_rsp *rsp)
999
{
1000
    gfs3_locklist *prev = NULL;
1001
    gfs3_locklist *trav = NULL;
1002

1003
    trav = rsp->reply;
1004
    prev = trav;
1005

1006
    while (trav) {
1007
        trav = trav->nextentry;
1008
        GF_FREE(prev);
1009
        prev = trav;
1010
    }
1011

1012
    return 0;
1013
}
1014

1015
int
1016
gf_server_check_getxattr_cmd(call_frame_t *frame, const char *key)
1017
{
1018
    server_conf_t *conf = NULL;
1019
    rpc_transport_t *xprt = NULL;
1020

1021
    conf = frame->this->private;
1022
    if (!conf)
1023
        return 0;
1024

1025
    if (fnmatch("*list*mount*point*", key, 0) == 0) {
1026
        /* list all the client protocol connecting to this process */
1027
        pthread_mutex_lock(&conf->mutex);
1028
        {
1029
            list_for_each_entry(xprt, &conf->xprt_list, list)
1030
            {
1031
                gf_smsg("mount-point-list", GF_LOG_INFO, 0,
1032
                        PS_MSG_MOUNT_PT_FAIL, "identifier=%s",
1033
                        xprt->peerinfo.identifier, NULL);
1034
            }
1035
        }
1036
        pthread_mutex_unlock(&conf->mutex);
1037
    }
1038

1039
    /* Add more options/keys here */
1040

1041
    return 0;
1042
}
1043

1044
int
1045
gf_server_check_setxattr_cmd(call_frame_t *frame, dict_t *dict)
1046
{
1047
    server_conf_t *conf = NULL;
1048
    rpc_transport_t *xprt = NULL;
1049
    uint64_t total_read = 0;
1050
    uint64_t total_write = 0;
1051

1052
    conf = frame->this->private;
1053
    if (!conf || !dict)
1054
        return 0;
1055

1056
    if (dict_foreach_fnmatch(dict, "*io*stat*dump", dict_null_foreach_fn,
1057
                             NULL) > 0) {
1058
        list_for_each_entry(xprt, &conf->xprt_list, list)
1059
        {
1060
            total_read += xprt->total_bytes_read;
1061
            total_write += xprt->total_bytes_write;
1062
        }
1063
        gf_smsg("stats", GF_LOG_INFO, 0, PS_MSG_RW_STAT, "total-read=%" PRIu64,
1064
                total_read, "total-write=%" PRIu64, total_write, NULL);
1065
    }
1066

1067
    return 0;
1068
}
1069

1070
server_ctx_t *
1071
server_ctx_get(client_t *client, xlator_t *xlator)
1072
{
1073
    void *tmp = NULL;
1074
    server_ctx_t *ctx = NULL;
1075
    server_ctx_t *setted_ctx = NULL;
1076

1077
    tmp = client_ctx_get(client, xlator);
1078

1079
    ctx = tmp;
1080

1081
    if (ctx != NULL)
1082
        goto out;
1083

1084
    ctx = GF_CALLOC(1, sizeof(server_ctx_t), gf_server_mt_server_conf_t);
1085

1086
    if (ctx == NULL)
1087
        goto out;
1088

1089
    ctx->fdtable = gf_fd_fdtable_alloc();
1090

1091
    if (ctx->fdtable == NULL) {
1092
        GF_FREE(ctx);
1093
        ctx = NULL;
1094
        goto out;
1095
    }
1096

1097
    LOCK_INIT(&ctx->fdtable_lock);
1098

1099
    setted_ctx = client_ctx_set(client, xlator, ctx);
1100
    if (ctx != setted_ctx) {
1101
        LOCK_DESTROY(&ctx->fdtable_lock);
1102
        GF_FREE(ctx->fdtable);
1103
        GF_FREE(ctx);
1104
        ctx = setted_ctx;
1105
    }
1106

1107
out:
1108
    return ctx;
1109
}
1110

1111
int
1112
auth_set_username_passwd(dict_t *input_params, dict_t *config_params,
1113
                         client_t *client)
1114
{
1115
    int ret = 0;
1116
    data_t *allow_user = NULL;
1117
    data_t *passwd_data = NULL;
1118
    char *username = NULL;
1119
    char *password = NULL;
1120
    char *brick_name = NULL;
1121
    char *searchstr = NULL;
1122
    char *username_str = NULL;
1123
    char *tmp = NULL;
1124
    char *username_cpy = NULL;
1125

1126
    ret = dict_get_str(input_params, "username", &username);
1127
    if (ret) {
1128
        gf_msg_debug("auth/login", 0,
1129
                     "username not found, returning "
1130
                     "DONT-CARE");
1131
        /* For non trusted clients username and password
1132
           will not be there. So don't reject the client.
1133
        */
1134
        ret = 0;
1135
        goto out;
1136
    }
1137

1138
    ret = dict_get_str(input_params, "password", &password);
1139
    if (ret) {
1140
        gf_smsg("auth/login", GF_LOG_WARNING, 0, PS_MSG_PASSWORD_NOT_FOUND,
1141
                NULL);
1142
        goto out;
1143
    }
1144

1145
    ret = dict_get_str(input_params, "remote-subvolume", &brick_name);
1146
    if (ret) {
1147
        gf_smsg("auth/login", GF_LOG_ERROR, 0,
1148
                PS_MSG_REMOTE_SUBVOL_NOT_SPECIFIED, NULL);
1149
        ret = -1;
1150
        goto out;
1151
    }
1152

1153
    ret = gf_asprintf(&searchstr, "auth.login.%s.allow", brick_name);
1154
    if (-1 == ret) {
1155
        ret = 0;
1156
        goto out;
1157
    }
1158

1159
    allow_user = dict_get(config_params, searchstr);
1160
    GF_FREE(searchstr);
1161

1162
    if (allow_user) {
1163
        username_cpy = gf_strdup(allow_user->data);
1164
        if (!username_cpy)
1165
            goto out;
1166

1167
        username_str = strtok_r(username_cpy, " ,", &tmp);
1168

1169
        while (username_str) {
1170
            if (!fnmatch(username_str, username, 0)) {
1171
                ret = gf_asprintf(&searchstr, "auth.login.%s.password",
1172
                                  username);
1173
                if (-1 == ret)
1174
                    goto out;
1175

1176
                passwd_data = dict_get(config_params, searchstr);
1177
                GF_FREE(searchstr);
1178

1179
                if (!passwd_data) {
1180
                    gf_smsg("auth/login", GF_LOG_ERROR, 0, PS_MSG_LOGIN_ERROR,
1181
                            NULL);
1182
                    ret = -1;
1183
                    goto out;
1184
                }
1185

1186
                ret = strcmp(data_to_str(passwd_data), password);
1187
                if (!ret) {
1188
                    client->auth.username = gf_strdup(username);
1189
                    client->auth.passwd = gf_strdup(password);
1190
                } else {
1191
                    gf_smsg("auth/login", GF_LOG_ERROR, 0, PS_MSG_LOGIN_ERROR,
1192
                            "username=%s", username, NULL);
1193
                }
1194
                break;
1195
            }
1196
            username_str = strtok_r(NULL, " ,", &tmp);
1197
        }
1198
    }
1199

1200
out:
1201
    GF_FREE(username_cpy);
1202

1203
    return ret;
1204
}
1205

1206
inode_t *
1207
server_inode_new(inode_table_t *itable, uuid_t gfid)
1208
{
1209
    if (__is_root_gfid(gfid))
1210
        return itable->root;
1211
    else
1212
        return inode_new(itable);
1213
}
1214

1215
int
1216
unserialize_req_locklist_v2(gfx_setactivelk_req *req,
1217
                            lock_migration_info_t *lmi)
1218
{
1219
    struct gfs3_locklist *trav = NULL;
1220
    lock_migration_info_t *temp = NULL;
1221
    int ret = -1;
1222

1223
    trav = req->request;
1224

1225
    INIT_LIST_HEAD(&lmi->list);
1226

1227
    while (trav) {
1228
        /* TODO: move to GF_MALLOC() */
1229
        temp = GF_CALLOC(1, sizeof(*lmi), gf_common_mt_lock_mig);
1230
        if (temp == NULL) {
1231
            gf_smsg(THIS->name, GF_LOG_ERROR, 0, PS_MSG_NO_MEM, NULL);
1232
            goto out;
1233
        }
1234

1235
        INIT_LIST_HEAD(&temp->list);
1236

1237
        gf_proto_flock_to_flock(&trav->flock, &temp->flock);
1238

1239
        temp->lk_flags = trav->lk_flags;
1240

1241
        temp->client_uid = gf_strdup(trav->client_uid);
1242

1243
        list_add_tail(&temp->list, &lmi->list);
1244

1245
        trav = trav->nextentry;
1246
    }
1247

1248
    ret = 0;
1249
out:
1250
    return ret;
1251
}
1252

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.