glusterfs

Форк
0
1178 строк · 27.1 Кб
1
/*
2
  Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
3
  This file is part of GlusterFS.
4

5
  This file is licensed to you under your choice of the GNU Lesser
6
  General Public License, version 3 or any later version (LGPLv3 or
7
  later), or the GNU General Public License, version 2 (GPLv2), in all
8
  cases as published by the Free Software Foundation.
9
*/
10

11
#include "glusterfs/fd.h"
12
#include <errno.h>     // for EINVAL, errno, ENOMEM
13
#include <inttypes.h>  // for PRIu64
14
#include <stdint.h>    // for UINT32_MAX
15
#include <string.h>    // for NULL, memcpy, memset, size_t
16
#include "glusterfs/statedump.h"
17

18
static int
19
gf_fd_fdtable_expand(fdtable_t *fdtable, uint32_t nr);
20

21
fd_t *
22
__fd_ref(fd_t *fd);
23

24
static int
25
gf_fd_chain_fd_entries(fdentry_t *entries, uint32_t startidx, uint32_t endcount)
26
{
27
    uint32_t i = 0;
28

29
    if (!entries) {
30
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
31
                         "!entries");
32
        return -1;
33
    }
34

35
    /* Chain only till the second to last entry because we want to
36
     * ensure that the last entry has GF_FDTABLE_END.
37
     */
38
    for (i = startidx; i < (endcount - 1); i++)
39
        entries[i].next_free = i + 1;
40

41
    /* i has already been incremented up to the last entry. */
42
    entries[i].next_free = GF_FDTABLE_END;
43

44
    return 0;
45
}
46

47
static int
48
gf_fd_fdtable_expand(fdtable_t *fdtable, uint32_t nr)
49
{
50
    fdentry_t *oldfds = NULL;
51
    uint32_t oldmax_fds = -1;
52
    int ret = -1;
53

54
    if (fdtable == NULL || nr > UINT32_MAX) {
55
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
56
                         "invalid argument");
57
        ret = EINVAL;
58
        goto out;
59
    }
60

61
    nr /= (1024 / sizeof(fdentry_t));
62
    nr = gf_roundup_next_power_of_two(nr + 1);
63
    nr *= (1024 / sizeof(fdentry_t));
64

65
    oldfds = fdtable->fdentries;
66
    oldmax_fds = fdtable->max_fds;
67

68
    fdtable->fdentries = GF_CALLOC(nr, sizeof(fdentry_t),
69
                                   gf_common_mt_fdentry_t);
70
    if (!fdtable->fdentries) {
71
        ret = ENOMEM;
72
        goto out;
73
    }
74
    fdtable->max_fds = nr;
75

76
    if (oldfds) {
77
        uint32_t cpy = oldmax_fds * sizeof(fdentry_t);
78
        memcpy(fdtable->fdentries, oldfds, cpy);
79
    }
80

81
    gf_fd_chain_fd_entries(fdtable->fdentries, oldmax_fds, fdtable->max_fds);
82

83
    /* Now that expansion is done, we must update the fd list
84
     * head pointer so that the fd allocation functions can continue
85
     * using the expanded table.
86
     */
87
    fdtable->first_free = oldmax_fds;
88
    GF_FREE(oldfds);
89
    ret = 0;
90
out:
91
    return ret;
92
}
93

94
fdtable_t *
95
gf_fd_fdtable_alloc(void)
96
{
97
    fdtable_t *fdtable = NULL;
98

99
    fdtable = GF_CALLOC(1, sizeof(*fdtable), gf_common_mt_fdtable_t);
100
    if (!fdtable)
101
        return NULL;
102

103
    pthread_rwlock_init(&fdtable->lock, NULL);
104

105
    pthread_rwlock_wrlock(&fdtable->lock);
106
    {
107
        gf_fd_fdtable_expand(fdtable, 0);
108
    }
109
    pthread_rwlock_unlock(&fdtable->lock);
110

111
    return fdtable;
112
}
113

114
static fdentry_t *
115
__gf_fd_fdtable_get_all_fds(fdtable_t *fdtable, uint32_t *count)
116
{
117
    fdentry_t *fdentries = NULL;
118

119
    if (count == NULL) {
120
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
121
                         "!count");
122
        goto out;
123
    }
124

125
    fdentries = fdtable->fdentries;
126
    fdtable->fdentries = GF_CALLOC(fdtable->max_fds, sizeof(fdentry_t),
127
                                   gf_common_mt_fdentry_t);
128
    gf_fd_chain_fd_entries(fdtable->fdentries, 0, fdtable->max_fds);
129
    *count = fdtable->max_fds;
130

131
out:
132
    return fdentries;
133
}
134

135
fdentry_t *
136
gf_fd_fdtable_get_all_fds(fdtable_t *fdtable, uint32_t *count)
137
{
138
    fdentry_t *entries = NULL;
139

140
    if (fdtable) {
141
        pthread_rwlock_wrlock(&fdtable->lock);
142
        {
143
            entries = __gf_fd_fdtable_get_all_fds(fdtable, count);
144
        }
145
        pthread_rwlock_unlock(&fdtable->lock);
146
    }
147

148
    return entries;
149
}
150

151
static fdentry_t *
152
__gf_fd_fdtable_copy_all_fds(fdtable_t *fdtable, uint32_t *count)
153
{
154
    fdentry_t *fdentries = NULL;
155
    int i = 0;
156

157
    if (count == NULL) {
158
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
159
                         "!count");
160
        goto out;
161
    }
162

163
    fdentries = GF_CALLOC(fdtable->max_fds, sizeof(fdentry_t),
164
                          gf_common_mt_fdentry_t);
165
    if (fdentries == NULL) {
166
        goto out;
167
    }
168

169
    *count = fdtable->max_fds;
170

171
    for (i = 0; i < fdtable->max_fds; i++) {
172
        if (fdtable->fdentries[i].fd != NULL) {
173
            fdentries[i].fd = fd_ref(fdtable->fdentries[i].fd);
174
        }
175
    }
176

177
out:
178
    return fdentries;
179
}
180

181
fdentry_t *
182
gf_fd_fdtable_copy_all_fds(fdtable_t *fdtable, uint32_t *count)
183
{
184
    fdentry_t *entries = NULL;
185

186
    if (fdtable) {
187
        pthread_rwlock_rdlock(&fdtable->lock);
188
        {
189
            entries = __gf_fd_fdtable_copy_all_fds(fdtable, count);
190
        }
191
        pthread_rwlock_unlock(&fdtable->lock);
192
    }
193

194
    return entries;
195
}
196

197
void
198
gf_fd_fdtable_destroy(fdtable_t *fdtable)
199
{
200
    struct list_head list = {
201
        0,
202
    };
203
    fd_t *fd = NULL;
204
    fdentry_t *fdentries = NULL;
205
    uint32_t fd_count = 0;
206
    int32_t i = 0;
207

208
    INIT_LIST_HEAD(&list);
209

210
    if (!fdtable) {
211
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
212
                         "!fdtable");
213
        return;
214
    }
215

216
    pthread_rwlock_wrlock(&fdtable->lock);
217
    {
218
        fdentries = __gf_fd_fdtable_get_all_fds(fdtable, &fd_count);
219
        GF_FREE(fdtable->fdentries);
220
    }
221
    pthread_rwlock_unlock(&fdtable->lock);
222

223
    if (fdentries != NULL) {
224
        for (i = 0; i < fd_count; i++) {
225
            fd = fdentries[i].fd;
226
            if (fd != NULL) {
227
                fd_unref(fd);
228
            }
229
        }
230

231
        GF_FREE(fdentries);
232
        pthread_rwlock_destroy(&fdtable->lock);
233
        GF_FREE(fdtable);
234
    }
235
}
236

237
int
238
gf_fd_unused_get(fdtable_t *fdtable, fd_t *fdptr)
239
{
240
    int32_t fd = -1;
241
    fdentry_t *fde = NULL;
242
    int error;
243
    int alloc_attempts = 0;
244

245
    if (fdtable == NULL || fdptr == NULL) {
246
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
247
                         "invalid argument");
248
        return EINVAL;
249
    }
250

251
    pthread_rwlock_wrlock(&fdtable->lock);
252
    {
253
    fd_alloc_try_again:
254
        if (fdtable->first_free != GF_FDTABLE_END) {
255
            fde = &fdtable->fdentries[fdtable->first_free];
256
            fd = fdtable->first_free;
257
            fdtable->first_free = fde->next_free;
258
            fde->next_free = GF_FDENTRY_ALLOCATED;
259
            fde->fd = fdptr;
260
        } else {
261
            /* If this is true, there is something
262
             * seriously wrong with our data structures.
263
             */
264
            if (alloc_attempts >= 2) {
265
                gf_msg("fd", GF_LOG_ERROR, 0, LG_MSG_EXPAND_FD_TABLE_FAILED,
266
                       "multiple attempts to expand fd table"
267
                       " have failed.");
268
                goto out;
269
            }
270
            error = gf_fd_fdtable_expand(fdtable, fdtable->max_fds + 1);
271
            if (error) {
272
                gf_msg("fd", GF_LOG_ERROR, error, LG_MSG_EXPAND_FD_TABLE_FAILED,
273
                       "Cannot expand fdtable");
274
                goto out;
275
            }
276
            ++alloc_attempts;
277
            /* At this point, the table stands expanded
278
             * with the first_free referring to the first
279
             * free entry in the new set of fdentries that
280
             * have just been allocated. That means, the
281
             * above logic should just work.
282
             */
283
            goto fd_alloc_try_again;
284
        }
285
    }
286
out:
287
    pthread_rwlock_unlock(&fdtable->lock);
288

289
    return fd;
290
}
291

292
void
293
gf_fd_put(fdtable_t *fdtable, int32_t fd)
294
{
295
    fd_t *fdptr = NULL;
296
    fdentry_t *fde = NULL;
297

298
    if (fd == GF_ANON_FD_NO)
299
        return;
300

301
    if (fdtable == NULL || fd < 0) {
302
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
303
                         "invalid argument");
304
        return;
305
    }
306

307
    if (!(fd < fdtable->max_fds)) {
308
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
309
                         "invalid argument");
310
        return;
311
    }
312

313
    pthread_rwlock_wrlock(&fdtable->lock);
314
    {
315
        fde = &fdtable->fdentries[fd];
316
        /* If the entry is not allocated, put operation must return
317
         * without doing anything.
318
         * This has the potential of masking out any bugs in a user of
319
         * fd that ends up calling gf_fd_put twice for the same fd or
320
         * for an unallocated fd, but it is a price we have to pay for
321
         * ensuring sanity of our fd-table.
322
         */
323
        if (fde->next_free != GF_FDENTRY_ALLOCATED)
324
            goto unlock_out;
325
        fdptr = fde->fd;
326
        fde->fd = NULL;
327
        fde->next_free = fdtable->first_free;
328
        fdtable->first_free = fd;
329
    }
330
unlock_out:
331
    pthread_rwlock_unlock(&fdtable->lock);
332

333
    if (fdptr) {
334
        fd_unref(fdptr);
335
    }
336
}
337

338
void
339
gf_fdptr_put(fdtable_t *fdtable, fd_t *fd)
340
{
341
    fdentry_t *fde = NULL;
342
    int32_t i = 0;
343

344
    if ((fdtable == NULL) || (fd == NULL)) {
345
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
346
                         "invalid argument");
347
        return;
348
    }
349

350
    pthread_rwlock_wrlock(&fdtable->lock);
351
    {
352
        for (i = 0; i < fdtable->max_fds; i++) {
353
            if (fdtable->fdentries[i].fd == fd) {
354
                fde = &fdtable->fdentries[i];
355
                break;
356
            }
357
        }
358

359
        if (fde == NULL) {
360
            gf_msg_callingfn("fd", GF_LOG_WARNING, 0,
361
                             LG_MSG_FD_NOT_FOUND_IN_FDTABLE,
362
                             "fd (%p) is not present in fdtable", fd);
363
            goto unlock_out;
364
        }
365

366
        /* If the entry is not allocated, put operation must return
367
         * without doing anything.
368
         * This has the potential of masking out any bugs in a user of
369
         * fd that ends up calling gf_fd_put twice for the same fd or
370
         * for an unallocated fd, but it is a price we have to pay for
371
         * ensuring sanity of our fd-table.
372
         */
373
        if (fde->next_free != GF_FDENTRY_ALLOCATED)
374
            goto unlock_out;
375
        fde->fd = NULL;
376
        fde->next_free = fdtable->first_free;
377
        fdtable->first_free = i;
378
    }
379
unlock_out:
380
    pthread_rwlock_unlock(&fdtable->lock);
381

382
    if ((fd != NULL) && (fde != NULL)) {
383
        fd_unref(fd);
384
    }
385
}
386

387
fd_t *
388
gf_fd_fdptr_get(fdtable_t *fdtable, int64_t fd)
389
{
390
    fd_t *fdptr = NULL;
391

392
    if (fdtable == NULL || fd < 0) {
393
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
394
                         "invalid argument");
395
        errno = EINVAL;
396
        return NULL;
397
    }
398

399
    if (!(fd < fdtable->max_fds)) {
400
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
401
                         "invalid argument");
402
        errno = EINVAL;
403
        return NULL;
404
    }
405

406
    pthread_rwlock_rdlock(&fdtable->lock);
407
    {
408
        fdptr = fdtable->fdentries[fd].fd;
409
        if (fdptr) {
410
            __fd_ref(fdptr);
411
        }
412
    }
413
    pthread_rwlock_unlock(&fdtable->lock);
414

415
    return fdptr;
416
}
417

418
fd_t *
419
__fd_ref(fd_t *fd)
420
{
421
    GF_ATOMIC_INC(fd->refcount);
422

423
    return fd;
424
}
425

426
fd_t *
427
fd_ref(fd_t *fd)
428
{
429
    if (!fd) {
430
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
431
                         "null fd");
432
        return NULL;
433
    }
434

435
    GF_ATOMIC_INC(fd->refcount);
436

437
    return fd;
438
}
439

440
static void
441
fd_destroy(fd_t *fd, gf_boolean_t bound)
442
{
443
    xlator_t *xl = NULL;
444
    int i = 0;
445
    xlator_t *old_THIS = NULL;
446

447
    if (fd == NULL) {
448
        gf_msg_callingfn("xlator", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
449
                         "invalid argument");
450
        goto out;
451
    }
452

453
    if (fd->inode == NULL) {
454
        gf_msg_callingfn("xlator", GF_LOG_ERROR, 0, LG_MSG_FD_INODE_NULL,
455
                         "fd->inode is NULL");
456
        goto out;
457
    }
458
    if (!fd->_ctx)
459
        goto out;
460

461
    old_THIS = THIS;
462
    if (IA_ISDIR(fd->inode->ia_type)) {
463
        for (i = 0; i < fd->xl_count; i++) {
464
            xl = fd->_ctx[i].xl_key;
465
            if (xl) {
466
                if (!xl->call_cleanup && xl->cbks->releasedir) {
467
                    THIS = xl;
468
                    xl->cbks->releasedir(xl, fd);
469
                }
470
            }
471
        }
472
    } else {
473
        for (i = 0; i < fd->xl_count; i++) {
474
            xl = fd->_ctx[i].xl_key;
475
            if (xl) {
476
                if (!xl->call_cleanup && xl->cbks->release) {
477
                    THIS = xl;
478
                    xl->cbks->release(xl, fd);
479
                }
480
            }
481
        }
482
    }
483

484
    THIS = old_THIS;
485

486
    LOCK_DESTROY(&fd->lock);
487

488
    GF_FREE(fd->_ctx);
489
    if (bound) {
490
        /*Decrease the count only after close happens on file*/
491
        LOCK(&fd->inode->lock);
492
        {
493
            fd->inode->fd_count--;
494
        }
495
        UNLOCK(&fd->inode->lock);
496
    }
497
    inode_unref(fd->inode);
498
    fd->inode = NULL;
499
    fd_lk_ctx_unref(fd->lk_ctx);
500
    mem_put(fd);
501
out:
502
    return;
503
}
504

505
void
506
fd_close(fd_t *fd)
507
{
508
    xlator_t *xl, *old_THIS;
509

510
    old_THIS = THIS;
511

512
    for (xl = fd->inode->table->xl->graph->first; xl != NULL; xl = xl->next) {
513
        if (!xl->call_cleanup) {
514
            THIS = xl;
515

516
            if (IA_ISDIR(fd->inode->ia_type)) {
517
                if (xl->cbks->fdclosedir != NULL) {
518
                    xl->cbks->fdclosedir(xl, fd);
519
                }
520
            } else {
521
                if (xl->cbks->fdclose != NULL) {
522
                    xl->cbks->fdclose(xl, fd);
523
                }
524
            }
525
        }
526
    }
527

528
    THIS = old_THIS;
529
}
530

531
void
532
fd_unref(fd_t *fd)
533
{
534
    uint32_t refcount = 0;
535
    gf_boolean_t bound = _gf_false;
536

537
    if (!fd) {
538
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
539
                         "fd is NULL");
540
        return;
541
    }
542

543
    LOCK(&fd->inode->lock);
544
    {
545
        refcount = GF_ATOMIC_DEC(fd->refcount);
546
        if (refcount == 0) {
547
            if (!list_empty(&fd->inode_list)) {
548
                list_del_init(&fd->inode_list);
549
                fd->inode->active_fd_count--;
550
                bound = _gf_true;
551
            }
552
        }
553
    }
554
    UNLOCK(&fd->inode->lock);
555

556
    if (refcount == 0) {
557
        fd_destroy(fd, bound);
558
    }
559

560
    return;
561
}
562

563
static fd_t *
564
__fd_bind(fd_t *fd)
565
{
566
    list_del_init(&fd->inode_list);
567
    list_add(&fd->inode_list, &fd->inode->fd_list);
568
    fd->inode->fd_count++;
569
    fd->inode->active_fd_count++;
570

571
    return fd;
572
}
573

574
fd_t *
575
fd_bind(fd_t *fd)
576
{
577
    if (!fd || !fd->inode) {
578
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
579
                         "!fd || !fd->inode");
580
        return NULL;
581
    }
582

583
    LOCK(&fd->inode->lock);
584
    {
585
        fd = __fd_bind(fd);
586
    }
587
    UNLOCK(&fd->inode->lock);
588

589
    return fd;
590
}
591

592
static fd_t *
593
fd_allocate(inode_t *inode, uint64_t pid)
594
{
595
    fd_t *fd;
596

597
    if (inode == NULL) {
598
        gf_msg_callingfn("fd", GF_LOG_ERROR, EINVAL, LG_MSG_INVALID_ARG,
599
                         "invalid argument");
600
        return NULL;
601
    }
602

603
    fd = mem_get0(inode->table->fd_mem_pool);
604
    if (fd == NULL) {
605
        return NULL;
606
    }
607

608
    fd->xl_count = inode->table->xl->graph->xl_count + 1;
609

610
    fd->_ctx = GF_CALLOC(1, (sizeof(struct _fd_ctx) * fd->xl_count),
611
                         gf_common_mt_fd_ctx);
612
    if (fd->_ctx == NULL) {
613
        goto failed;
614
    }
615

616
    fd->lk_ctx = fd_lk_ctx_create();
617
    if (fd->lk_ctx != NULL) {
618
        /* We need to take a reference from the inode, but we cannot do it
619
         * here because this function can be called with the inode lock taken
620
         * and inode_ref() takes the inode's table lock. This is the reverse
621
         * of the logical lock acquisition order and can cause a deadlock. So
622
         * we simply assign the inode here and we delefate the inode reference
623
         * responsibility to the caller (when this function succeeds and the
624
         * inode lock is released). This is safe because the caller must hold
625
         * a reference of the inode to use it, so it's guaranteed that the
626
         * number of references won't reach 0 before the caller finishes.
627
         *
628
         * TODO: minimize use of locks in favor of atomic operations to avoid
629
         *       these dependencies. */
630
        fd->inode = inode;
631
        fd->pid = pid;
632
        INIT_LIST_HEAD(&fd->inode_list);
633
        LOCK_INIT(&fd->lock);
634
        GF_ATOMIC_INIT(fd->refcount, 1);
635
        return fd;
636
    }
637

638
    GF_FREE(fd->_ctx);
639

640
failed:
641
    mem_put(fd);
642

643
    return NULL;
644
}
645

646
fd_t *
647
fd_create_uint64(inode_t *inode, uint64_t pid)
648
{
649
    fd_t *fd;
650

651
    fd = fd_allocate(inode, pid);
652
    if (fd != NULL) {
653
        /* fd_allocate() doesn't get a reference from the inode. We need to
654
         * take it here in case of success. */
655
        inode_ref(inode);
656
    }
657

658
    return fd;
659
}
660

661
fd_t *
662
fd_create(inode_t *inode, pid_t pid)
663
{
664
    return fd_create_uint64(inode, (uint64_t)pid);
665
}
666

667
static fd_t *
668
__fd_lookup(inode_t *inode, uint64_t pid)
669
{
670
    fd_t *iter_fd = NULL;
671
    fd_t *fd = NULL;
672

673
    if (list_empty(&inode->fd_list))
674
        return NULL;
675

676
    list_for_each_entry(iter_fd, &inode->fd_list, inode_list)
677
    {
678
        if (iter_fd->anonymous)
679
            /* If someone was interested in getting an
680
               anonymous fd (or was OK getting an anonymous fd),
681
               they can as well call fd_anonymous() directly */
682
            continue;
683

684
        if (!pid || iter_fd->pid == pid) {
685
            fd = __fd_ref(iter_fd);
686
            break;
687
        }
688
    }
689

690
    return fd;
691
}
692

693
fd_t *
694
fd_lookup(inode_t *inode, pid_t pid)
695
{
696
    fd_t *fd = NULL;
697

698
    if (!inode) {
699
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
700
                         "!inode");
701
        return NULL;
702
    }
703

704
    LOCK(&inode->lock);
705
    {
706
        fd = __fd_lookup(inode, (uint64_t)pid);
707
    }
708
    UNLOCK(&inode->lock);
709

710
    return fd;
711
}
712

713
fd_t *
714
fd_lookup_uint64(inode_t *inode, uint64_t pid)
715
{
716
    fd_t *fd = NULL;
717

718
    if (!inode) {
719
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
720
                         "!inode");
721
        return NULL;
722
    }
723

724
    LOCK(&inode->lock);
725
    {
726
        fd = __fd_lookup(inode, pid);
727
    }
728
    UNLOCK(&inode->lock);
729

730
    return fd;
731
}
732

733
static fd_t *
734
__fd_lookup_anonymous(inode_t *inode, int32_t flags)
735
{
736
    fd_t *iter_fd = NULL;
737
    fd_t *fd = NULL;
738

739
    if (list_empty(&inode->fd_list))
740
        return NULL;
741

742
    list_for_each_entry(iter_fd, &inode->fd_list, inode_list)
743
    {
744
        if ((iter_fd->anonymous) && (flags == iter_fd->flags)) {
745
            fd = __fd_ref(iter_fd);
746
            break;
747
        }
748
    }
749

750
    return fd;
751
}
752

753
fd_t *
754
fd_anonymous_with_flags(inode_t *inode, int32_t flags)
755
{
756
    fd_t *fd = NULL;
757
    bool ref = false;
758

759
    LOCK(&inode->lock);
760

761
    fd = __fd_lookup_anonymous(inode, flags);
762

763
    /* if (fd); then we already have increased the refcount in
764
       __fd_lookup_anonymous(), so no need of one more fd_ref().
765
       if (!fd); then both create and bind won't bump up the ref
766
       count, so we have to call fd_ref() after bind. */
767
    if (fd == NULL) {
768
        fd = fd_allocate(inode, 0);
769
        if (fd != NULL) {
770
            fd->anonymous = _gf_true;
771
            fd->flags = GF_ANON_FD_FLAGS | (flags & O_DIRECT);
772

773
            __fd_bind(fd);
774

775
            ref = true;
776
        }
777
    }
778

779
    UNLOCK(&inode->lock);
780

781
    if (ref) {
782
        /* fd_allocate() doesn't get a reference from the inode. We need to
783
         * take it here in case of success. */
784
        inode_ref(inode);
785
    }
786

787
    return fd;
788
}
789

790
fd_t *
791
fd_anonymous(inode_t *inode)
792
{
793
    return fd_anonymous_with_flags(inode, 0);
794
}
795

796
fd_t *
797
fd_lookup_anonymous(inode_t *inode, int32_t flags)
798
{
799
    fd_t *fd = NULL;
800

801
    if (!inode) {
802
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
803
                         "!inode");
804
        return NULL;
805
    }
806

807
    LOCK(&inode->lock);
808
    {
809
        fd = __fd_lookup_anonymous(inode, flags);
810
    }
811
    UNLOCK(&inode->lock);
812
    return fd;
813
}
814

815
gf_boolean_t
816
fd_is_anonymous(fd_t *fd)
817
{
818
    return (fd && fd->anonymous);
819
}
820

821
uint8_t
822
fd_list_empty(inode_t *inode)
823
{
824
    uint8_t empty = 0;
825

826
    LOCK(&inode->lock);
827
    {
828
        empty = list_empty(&inode->fd_list);
829
    }
830
    UNLOCK(&inode->lock);
831

832
    return empty;
833
}
834

835
int
836
__fd_ctx_set(fd_t *fd, xlator_t *xlator, uint64_t value)
837
{
838
    int index = 0, new_xl_count = 0;
839
    int ret = 0;
840
    int set_idx = -1;
841
    void *begin = NULL;
842
    size_t diff = 0;
843
    struct _fd_ctx *tmp = NULL;
844

845
    if (!fd || !xlator)
846
        return -1;
847

848
    for (index = 0; index < fd->xl_count; index++) {
849
        if (!fd->_ctx[index].key) {
850
            if (set_idx == -1)
851
                set_idx = index;
852
            /* don't break, to check if key already exists
853
               further on */
854
        } else if (fd->_ctx[index].xl_key == xlator) {
855
            set_idx = index;
856
            break;
857
        }
858
    }
859

860
    if (set_idx == -1) {
861
        set_idx = fd->xl_count;
862

863
        new_xl_count = fd->xl_count + xlator->graph->xl_count;
864

865
        tmp = GF_REALLOC(fd->_ctx, (sizeof(struct _fd_ctx) * new_xl_count));
866
        if (tmp == NULL) {
867
            ret = -1;
868
            goto out;
869
        }
870

871
        fd->_ctx = tmp;
872

873
        begin = fd->_ctx;
874
        begin += (fd->xl_count * sizeof(struct _fd_ctx));
875

876
        diff = (new_xl_count - fd->xl_count) * sizeof(struct _fd_ctx);
877

878
        memset(begin, 0, diff);
879

880
        fd->xl_count = new_xl_count;
881
    }
882

883
    fd->_ctx[set_idx].xl_key = xlator;
884
    fd->_ctx[set_idx].value1 = value;
885

886
out:
887
    return ret;
888
}
889

890
int
891
fd_ctx_set(fd_t *fd, xlator_t *xlator, uint64_t value)
892
{
893
    int ret = 0;
894

895
    if (!fd || !xlator) {
896
        gf_msg_callingfn("fd", GF_LOG_WARNING, EINVAL, LG_MSG_INVALID_ARG,
897
                         "%p %p", fd, xlator);
898
        return -1;
899
    }
900

901
    LOCK(&fd->lock);
902
    {
903
        ret = __fd_ctx_set(fd, xlator, value);
904
    }
905
    UNLOCK(&fd->lock);
906

907
    return ret;
908
}
909

910
uint64_t
911
__fd_ctx_get(fd_t *fd, xlator_t *xlator)
912
{
913
    int index = 0;
914

915
    if (!fd || !xlator)
916
        return 0;
917

918
    for (index = 0; index < fd->xl_count; index++) {
919
        if (fd->_ctx[index].xl_key == xlator)
920
            return fd->_ctx[index].value1;
921
    }
922

923
    return 0;
924
}
925

926
uint64_t
927
fd_ctx_get(fd_t *fd, xlator_t *xlator)
928
{
929
    uint64_t ret = 0;
930

931
    if (fd) {
932
        LOCK(&fd->lock);
933
        {
934
            ret = __fd_ctx_get(fd, xlator);
935
        }
936
        UNLOCK(&fd->lock);
937
    }
938

939
    return ret;
940
}
941

942
static uint64_t
943
__fd_ctx_del(fd_t *fd, xlator_t *xlator)
944
{
945
    int index = 0;
946
    uint64_t value = 0;
947

948
    for (index = 0; index < fd->xl_count; index++) {
949
        if (fd->_ctx[index].xl_key == xlator) {
950
            value = fd->_ctx[index].value1;
951
            fd->_ctx[index].key = 0;
952
            fd->_ctx[index].value1 = 0;
953
            return value;
954
        }
955
    }
956

957
    return 0;
958
}
959

960
uint64_t
961
fd_ctx_del(fd_t *fd, xlator_t *xlator)
962
{
963
    uint64_t ret = 0;
964

965
    if (fd && xlator) {
966
        LOCK(&fd->lock);
967
        {
968
            ret = __fd_ctx_del(fd, xlator);
969
        }
970
        UNLOCK(&fd->lock);
971
    }
972
    return ret;
973
}
974

975
void
976
fd_dump(fd_t *fd, char *prefix)
977
{
978
    char key[GF_DUMP_MAX_BUF_LEN];
979

980
    if (!fd)
981
        return;
982

983
    gf_proc_dump_write("pid", "%" PRIu64, fd->pid);
984
    gf_proc_dump_write("refcount", "%" PRIu32, GF_ATOMIC_GET(fd->refcount));
985
    gf_proc_dump_write("flags", "%d", fd->flags);
986

987
    if (fd->inode) {
988
        gf_proc_dump_build_key(key, "inode", NULL);
989
        gf_proc_dump_add_section("%s", key);
990
        inode_dump(fd->inode, key);
991
    }
992
}
993

994
static void
995
fdentry_dump(fdentry_t *fdentry, char *prefix)
996
{
997
    if (!fdentry)
998
        return;
999

1000
    if (GF_FDENTRY_ALLOCATED != fdentry->next_free)
1001
        return;
1002

1003
    if (fdentry->fd)
1004
        fd_dump(fdentry->fd, prefix);
1005
}
1006

1007
void
1008
fdtable_dump(fdtable_t *fdtable, char *prefix)
1009
{
1010
    char key[GF_DUMP_MAX_BUF_LEN];
1011
    int i = 0;
1012
    int ret = -1;
1013

1014
    if (!fdtable)
1015
        return;
1016

1017
    ret = pthread_rwlock_tryrdlock(&fdtable->lock);
1018
    if (ret)
1019
        goto out;
1020

1021
    gf_proc_dump_build_key(key, prefix, "refcount");
1022
    gf_proc_dump_write(key, "%d", fdtable->refcount);
1023
    gf_proc_dump_build_key(key, prefix, "maxfds");
1024
    gf_proc_dump_write(key, "%d", fdtable->max_fds);
1025
    gf_proc_dump_build_key(key, prefix, "first_free");
1026
    gf_proc_dump_write(key, "%d", fdtable->first_free);
1027

1028
    for (i = 0; i < fdtable->max_fds; i++) {
1029
        if (GF_FDENTRY_ALLOCATED == fdtable->fdentries[i].next_free) {
1030
            gf_proc_dump_build_key(key, prefix, "fdentry[%d]", i);
1031
            gf_proc_dump_add_section("%s", key);
1032
            fdentry_dump(&fdtable->fdentries[i], key);
1033
        }
1034
    }
1035

1036
    pthread_rwlock_unlock(&fdtable->lock);
1037

1038
out:
1039
    if (ret != 0)
1040
        gf_proc_dump_write("Unable to dump the fdtable",
1041
                           "(Lock acquistion failed) %p", fdtable);
1042
    return;
1043
}
1044

1045
void
1046
fd_ctx_dump(fd_t *fd, char *prefix)
1047
{
1048
    struct _fd_ctx *fd_ctx = NULL;
1049
    xlator_t *xl = NULL;
1050
    int i = 0;
1051

1052
    if ((fd == NULL) || (fd->_ctx == NULL)) {
1053
        goto out;
1054
    }
1055

1056
    LOCK(&fd->lock);
1057
    {
1058
        if (fd->_ctx != NULL) {
1059
            fd_ctx = GF_CALLOC(fd->xl_count, sizeof(*fd_ctx),
1060
                               gf_common_mt_fd_ctx);
1061
            if (fd_ctx == NULL) {
1062
                goto unlock;
1063
            }
1064

1065
            for (i = 0; i < fd->xl_count; i++) {
1066
                fd_ctx[i] = fd->_ctx[i];
1067
            }
1068
        }
1069
    }
1070
unlock:
1071
    UNLOCK(&fd->lock);
1072

1073
    if (fd_ctx == NULL) {
1074
        goto out;
1075
    }
1076

1077
    for (i = 0; i < fd->xl_count; i++) {
1078
        if (fd_ctx[i].xl_key) {
1079
            xl = (xlator_t *)(long)fd_ctx[i].xl_key;
1080
            if (xl->dumpops && xl->dumpops->fdctx)
1081
                xl->dumpops->fdctx(xl, fd);
1082
        }
1083
    }
1084

1085
out:
1086
    GF_FREE(fd_ctx);
1087

1088
    return;
1089
}
1090

1091
void
1092
fdentry_dump_to_dict(fdentry_t *fdentry, char *prefix, dict_t *dict,
1093
                     int *openfds)
1094
{
1095
    char key[GF_DUMP_MAX_BUF_LEN] = {
1096
        0,
1097
    };
1098
    int ret = -1;
1099

1100
    if (!fdentry)
1101
        return;
1102
    if (!dict)
1103
        return;
1104

1105
    if (GF_FDENTRY_ALLOCATED != fdentry->next_free)
1106
        return;
1107

1108
    if (fdentry->fd) {
1109
        snprintf(key, sizeof(key), "%s.pid", prefix);
1110
        ret = dict_set_uint64(dict, key, fdentry->fd->pid);
1111
        if (ret)
1112
            return;
1113

1114
        snprintf(key, sizeof(key), "%s.refcount", prefix);
1115
        ret = dict_set_int32(dict, key, GF_ATOMIC_GET(fdentry->fd->refcount));
1116
        if (ret)
1117
            return;
1118

1119
        snprintf(key, sizeof(key), "%s.flags", prefix);
1120
        ret = dict_set_int32(dict, key, fdentry->fd->flags);
1121
        if (ret)
1122
            return;
1123

1124
        (*openfds)++;
1125
    }
1126
    return;
1127
}
1128

1129
void
1130
fdtable_dump_to_dict(fdtable_t *fdtable, char *prefix, dict_t *dict)
1131
{
1132
    char key[GF_DUMP_MAX_BUF_LEN] = {
1133
        0,
1134
    };
1135
    int i = 0;
1136
    int openfds = 0;
1137
    int ret = -1;
1138

1139
    if (!fdtable)
1140
        return;
1141
    if (!dict)
1142
        return;
1143

1144
    ret = pthread_rwlock_tryrdlock(&fdtable->lock);
1145
    if (ret)
1146
        return;
1147

1148
    snprintf(key, sizeof(key), "%s.fdtable.refcount", prefix);
1149
    ret = dict_set_int32(dict, key, fdtable->refcount);
1150
    if (ret)
1151
        goto out;
1152

1153
    snprintf(key, sizeof(key), "%s.fdtable.maxfds", prefix);
1154
    ret = dict_set_uint32(dict, key, fdtable->max_fds);
1155
    if (ret)
1156
        goto out;
1157

1158
    snprintf(key, sizeof(key), "%s.fdtable.firstfree", prefix);
1159
    ret = dict_set_int32(dict, key, fdtable->first_free);
1160
    if (ret)
1161
        goto out;
1162

1163
    for (i = 0; i < fdtable->max_fds; i++) {
1164
        if (GF_FDENTRY_ALLOCATED == fdtable->fdentries[i].next_free) {
1165
            snprintf(key, sizeof(key), "%s.fdtable.fdentry%d", prefix, i);
1166
            fdentry_dump_to_dict(&fdtable->fdentries[i], key, dict, &openfds);
1167
        }
1168
    }
1169

1170
    snprintf(key, sizeof(key), "%s.fdtable.openfds", prefix);
1171
    ret = dict_set_int32(dict, key, openfds);
1172
    if (ret)
1173
        goto out;
1174

1175
out:
1176
    pthread_rwlock_unlock(&fdtable->lock);
1177
    return;
1178
}
1179

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.