glusterfs

Форк
0
1020 строк · 25.3 Кб
1
/*
2
  Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
3
  This file is part of GlusterFS.
4

5
  This file is licensed to you under your choice of the GNU Lesser
6
  General Public License, version 3 or any later version (LGPLv3 or
7
  later), or the GNU General Public License, version 2 (GPLv2), in all
8
  cases as published by the Free Software Foundation.
9
*/
10

11
#include "glusterfs/iobuf.h"
12
#include "glusterfs/statedump.h"
13
#include "glusterfs/libglusterfs-messages.h"
14

15
/*
16
  TODO: implement destroy margins and prefetching of arenas
17
*/
18

19
#define IOBUF_ARENA_MAX_INDEX                                                  \
20
    (sizeof(gf_iobuf_init_config) / (sizeof(struct iobuf_init_config)))
21

22
/* Make sure this array is sorted based on pagesize */
23
static const struct iobuf_init_config gf_iobuf_init_config[] = {
24
    /* { pagesize, num_pages }, */
25
    {128, 1024},     {512, 512},       {2 * 1024, 512}, {8 * 1024, 128},
26
    {32 * 1024, 64}, {128 * 1024, 32}, {256 * 1024, 8}, {1 * 1024 * 1024, 2},
27
};
28

29
static int
30
gf_iobuf_get_arena_index(const size_t page_size)
31
{
32
    int i;
33

34
    for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
35
        if (page_size <= gf_iobuf_init_config[i].pagesize)
36
            return i;
37
    }
38

39
    return -1;
40
}
41

42
static size_t
43
gf_iobuf_get_pagesize(const size_t page_size, int *index)
44
{
45
    int i;
46
    size_t size = 0;
47

48
    for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
49
        size = gf_iobuf_init_config[i].pagesize;
50
        if (page_size <= size) {
51
            if (index != NULL)
52
                *index = i;
53
            return size;
54
        }
55
    }
56

57
    return -1;
58
}
59

60
static void
61
__iobuf_arena_init_iobufs(struct iobuf_arena *iobuf_arena)
62
{
63
    const int iobuf_cnt = iobuf_arena->page_count;
64
    struct iobuf *iobuf = NULL;
65
    int offset = 0;
66
    int i = 0;
67

68
    iobuf = iobuf_arena->iobufs;
69
    for (i = 0; i < iobuf_cnt; i++) {
70
        INIT_LIST_HEAD(&iobuf->list);
71
        LOCK_INIT(&iobuf->lock);
72

73
        iobuf->iobuf_arena = iobuf_arena;
74

75
        iobuf->ptr = iobuf_arena->mem_base + offset;
76

77
        list_add(&iobuf->list, &iobuf_arena->passive_list);
78
        iobuf_arena->passive_cnt++;
79

80
        offset += iobuf_arena->page_size;
81
        iobuf++;
82
    }
83
}
84

85
static void
86
__iobuf_arena_destroy_iobufs(struct iobuf_arena *iobuf_arena)
87
{
88
    int iobuf_cnt = 0;
89
    struct iobuf *iobuf = NULL;
90
    int i = 0;
91

92
    iobuf_cnt = iobuf_arena->page_count;
93
    iobuf = iobuf_arena->iobufs;
94
    for (i = 0; i < iobuf_cnt; i++) {
95
        GF_ASSERT(GF_ATOMIC_GET(iobuf->ref) == 0);
96

97
        LOCK_DESTROY(&iobuf->lock);
98
        list_del_init(&iobuf->list);
99
        iobuf++;
100
    }
101
}
102

103
static void
104
__iobuf_arena_destroy(struct iobuf_arena *iobuf_arena)
105
{
106
    munmap(iobuf_arena->mem_base, iobuf_arena->arena_size);
107

108
    __iobuf_arena_destroy_iobufs(iobuf_arena);
109

110
    GF_FREE(iobuf_arena);
111
}
112

113
static struct iobuf_arena *
114
__iobuf_arena_alloc(struct iobuf_pool *iobuf_pool, size_t page_size,
115
                    int32_t num_iobufs)
116
{
117
    struct iobuf_arena *iobuf_arena = NULL;
118
    size_t rounded_size = 0;
119
    int index = 0; /* unused */
120

121
    GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);
122

123
    iobuf_arena = GF_CALLOC(
124
        1, sizeof(struct iobuf_arena) + sizeof(struct iobuf) * num_iobufs,
125
        gf_common_mt_iobuf_arena);
126
    if (!iobuf_arena)
127
        goto out;
128

129
    INIT_LIST_HEAD(&iobuf_arena->list);
130
    INIT_LIST_HEAD(&iobuf_arena->passive_list);
131
    INIT_LIST_HEAD(&iobuf_arena->active_list);
132
    iobuf_arena->iobuf_pool = iobuf_pool;
133

134
    rounded_size = gf_iobuf_get_pagesize(page_size, &index);
135

136
    iobuf_arena->page_size = rounded_size;
137
    iobuf_arena->page_count = num_iobufs;
138

139
    iobuf_arena->arena_size = rounded_size * num_iobufs;
140

141
    iobuf_arena->mem_base = mmap(NULL, iobuf_arena->arena_size,
142
                                 PROT_READ | PROT_WRITE,
143
                                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
144
    if (iobuf_arena->mem_base == MAP_FAILED) {
145
        gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_MAPPING_FAILED, NULL);
146
        GF_FREE(iobuf_arena);
147
        goto out;
148
    }
149

150
    __iobuf_arena_init_iobufs(iobuf_arena);
151

152
    iobuf_pool->arena_cnt++;
153

154
    return iobuf_arena;
155

156
out:
157
    return NULL;
158
}
159

160
static struct iobuf_arena *
161
__iobuf_arena_unprune(struct iobuf_pool *iobuf_pool, const int index)
162
{
163
    struct iobuf_arena *tmp = NULL;
164

165
    list_for_each_entry(tmp, &iobuf_pool->purge[index], list)
166
    {
167
        list_del_init(&tmp->list);
168
        return tmp;
169
    }
170

171
    return NULL;
172
}
173

174
static struct iobuf_arena *
175
__iobuf_pool_add_arena(struct iobuf_pool *iobuf_pool, const size_t page_size,
176
                       const int32_t num_pages, const int index)
177
{
178
    struct iobuf_arena *iobuf_arena = NULL;
179

180
    iobuf_arena = __iobuf_arena_unprune(iobuf_pool, index);
181

182
    if (!iobuf_arena) {
183
        iobuf_arena = __iobuf_arena_alloc(iobuf_pool, page_size, num_pages);
184
        if (!iobuf_arena) {
185
            gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND,
186
                    NULL);
187
            return NULL;
188
        }
189
    }
190
    list_add(&iobuf_arena->list, &iobuf_pool->arenas[index]);
191

192
    return iobuf_arena;
193
}
194

195
/* This function destroys all the iobufs and the iobuf_pool */
196
void
197
iobuf_pool_destroy(struct iobuf_pool *iobuf_pool)
198
{
199
    struct iobuf_arena *iobuf_arena = NULL;
200
    struct iobuf_arena *tmp = NULL;
201
    int i = 0;
202

203
    GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);
204

205
    pthread_mutex_lock(&iobuf_pool->mutex);
206
    {
207
        for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
208
            list_for_each_entry_safe(iobuf_arena, tmp, &iobuf_pool->arenas[i],
209
                                     list)
210
            {
211
                list_del_init(&iobuf_arena->list);
212
                iobuf_pool->arena_cnt--;
213

214
                __iobuf_arena_destroy(iobuf_arena);
215
            }
216
            list_for_each_entry_safe(iobuf_arena, tmp, &iobuf_pool->purge[i],
217
                                     list)
218
            {
219
                list_del_init(&iobuf_arena->list);
220
                iobuf_pool->arena_cnt--;
221
                __iobuf_arena_destroy(iobuf_arena);
222
            }
223
            /* If there are no iobuf leaks, there should be no
224
             * arenas in the filled list. If at all there are any
225
             * arenas in the filled list, the below function will
226
             * assert.
227
             */
228
            list_for_each_entry_safe(iobuf_arena, tmp, &iobuf_pool->filled[i],
229
                                     list)
230
            {
231
                list_del_init(&iobuf_arena->list);
232
                iobuf_pool->arena_cnt--;
233
                __iobuf_arena_destroy(iobuf_arena);
234
            }
235
            /* If there are no iobuf leaks, there shoould be
236
             * no standard allocated arenas, iobuf_put will free
237
             * such arenas.
238
             * TODO: Free the stdalloc arenas forcefully if present?
239
             */
240
        }
241
    }
242
    pthread_mutex_unlock(&iobuf_pool->mutex);
243

244
    pthread_mutex_destroy(&iobuf_pool->mutex);
245

246
    GF_FREE(iobuf_pool);
247

248
out:
249
    return;
250
}
251

252
static void
253
iobuf_create_stdalloc_arena(struct iobuf_pool *iobuf_pool)
254
{
255
    struct iobuf_arena *iobuf_arena = NULL;
256

257
    /* No locking required here as its called only once during init */
258
    iobuf_arena = GF_CALLOC(sizeof(*iobuf_arena), 1, gf_common_mt_iobuf_arena);
259
    if (!iobuf_arena)
260
        goto err;
261

262
    INIT_LIST_HEAD(&iobuf_arena->list);
263
    INIT_LIST_HEAD(&iobuf_arena->passive_list);
264
    INIT_LIST_HEAD(&iobuf_arena->active_list);
265

266
    iobuf_arena->iobuf_pool = iobuf_pool;
267

268
    iobuf_arena->page_size = 0x7fffffff;
269

270
    list_add_tail(&iobuf_arena->list,
271
                  &iobuf_pool->arenas[IOBUF_ARENA_MAX_INDEX]);
272

273
err:
274
    return;
275
}
276

277
struct iobuf_pool *
278
iobuf_pool_new(void)
279
{
280
    struct iobuf_pool *iobuf_pool = NULL;
281
    int i = 0;
282
    size_t page_size = 0;
283
    size_t arena_size = 0;
284
    int32_t num_pages = 0;
285

286
    iobuf_pool = GF_CALLOC(sizeof(*iobuf_pool), 1, gf_common_mt_iobuf_pool);
287
    if (!iobuf_pool)
288
        goto out;
289

290
    pthread_mutex_init(&iobuf_pool->mutex, NULL);
291
    for (i = 0; i <= IOBUF_ARENA_MAX_INDEX; i++) {
292
        INIT_LIST_HEAD(&iobuf_pool->arenas[i]);
293
        INIT_LIST_HEAD(&iobuf_pool->filled[i]);
294
        INIT_LIST_HEAD(&iobuf_pool->purge[i]);
295
    }
296

297
    iobuf_pool->default_page_size = 128 * GF_UNIT_KB;
298

299
    /* No locking required here
300
     * as no one else can use this pool yet
301
     */
302
    for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {
303
        page_size = gf_iobuf_init_config[i].pagesize;
304
        num_pages = gf_iobuf_init_config[i].num_pages;
305

306
        if (__iobuf_pool_add_arena(iobuf_pool, page_size, num_pages, i) != NULL)
307
            arena_size += page_size * num_pages;
308
    }
309

310
    /* Need an arena to handle all the bigger iobuf requests */
311
    iobuf_create_stdalloc_arena(iobuf_pool);
312

313
    iobuf_pool->arena_size = arena_size;
314
out:
315

316
    return iobuf_pool;
317
}
318

319
static void
320
__iobuf_arena_prune(struct iobuf_pool *iobuf_pool,
321
                    struct iobuf_arena *iobuf_arena, const int index)
322
{
323
    list_del(&iobuf_arena->list);
324

325
    /* code flow comes here only if the arena is in purge list and we can
326
     * free the arena only if we have at least one arena in 'arenas' list
327
     * (ie, at least few iobufs free in arena), that way, there won't
328
     * be spurious mmap/unmap of buffers.
329
     * If the list empty, add to the purge list and return.
330
     */
331
    if (list_empty(&iobuf_pool->arenas[index])) {
332
        list_add_tail(&iobuf_arena->list, &iobuf_pool->purge[index]);
333
        goto out;
334
    }
335

336
    /* All cases matched, destroy */
337
    iobuf_pool->arena_cnt--;
338

339
    __iobuf_arena_destroy(iobuf_arena);
340

341
out:
342
    return;
343
}
344

345
/* Always called under the iobuf_pool mutex lock */
346
static struct iobuf_arena *
347
__iobuf_select_arena(struct iobuf_pool *iobuf_pool, const size_t page_size,
348
                     const int index)
349
{
350
    struct iobuf_arena *iobuf_arena = NULL;
351
    struct iobuf_arena *trav = NULL;
352

353
    /* look for unused iobuf from the head-most arena */
354
    list_for_each_entry(trav, &iobuf_pool->arenas[index], list)
355
    {
356
        if (trav->passive_cnt) {
357
            iobuf_arena = trav;
358
            break;
359
        }
360
    }
361

362
    if (!iobuf_arena) {
363
        /* all arenas were full, find the right count to add */
364
        iobuf_arena = __iobuf_pool_add_arena(
365
            iobuf_pool, page_size, gf_iobuf_init_config[index].num_pages,
366
            index);
367
    }
368

369
    return iobuf_arena;
370
}
371

372
/* Always called under the iobuf_pool mutex lock */
373
static struct iobuf *
374
__iobuf_get(struct iobuf_pool *iobuf_pool, const size_t page_size,
375
            const int index)
376
{
377
    struct iobuf *iobuf = NULL;
378
    struct iobuf_arena *iobuf_arena = NULL;
379

380
    /* most eligible arena for picking an iobuf */
381
    iobuf_arena = __iobuf_select_arena(iobuf_pool, page_size, index);
382
    if (!iobuf_arena)
383
        return NULL;
384

385
    iobuf = list_first_entry(&iobuf_arena->passive_list, struct iobuf, list);
386

387
    list_del(&iobuf->list);
388
    iobuf_arena->passive_cnt--;
389

390
    list_add(&iobuf->list, &iobuf_arena->active_list);
391
    iobuf_arena->active_cnt++;
392

393
    /* no resetting requied for this element */
394
    iobuf_arena->alloc_cnt++;
395

396
    if (iobuf_arena->max_active < iobuf_arena->active_cnt)
397
        iobuf_arena->max_active = iobuf_arena->active_cnt;
398

399
    if (iobuf_arena->passive_cnt == 0) {
400
        list_del(&iobuf_arena->list);
401
        list_add(&iobuf_arena->list, &iobuf_pool->filled[index]);
402
    }
403

404
    iobuf->page_size = page_size;
405
    return iobuf;
406
}
407

408
static void
409
__iobuf_free(struct iobuf *iobuf)
410
{
411
    LOCK_DESTROY(&iobuf->lock);
412
    GF_FREE(iobuf);
413
}
414

415
static struct iobuf *
416
iobuf_get_from_stdalloc(struct iobuf_pool *iobuf_pool, const size_t page_size)
417
{
418
    struct iobuf *iobuf = NULL;
419
    struct iobuf_arena *iobuf_arena = NULL;
420
    struct iobuf_arena *trav = NULL;
421

422
    /* The first arena in the 'MAX-INDEX' will always be used for misc */
423
    list_for_each_entry(trav, &iobuf_pool->arenas[IOBUF_ARENA_MAX_INDEX], list)
424
    {
425
        iobuf_arena = trav;
426
        break;
427
    }
428

429
    iobuf = GF_MALLOC(sizeof(*iobuf) + ((page_size + GF_IOBUF_ALIGN_SIZE) - 1),
430
                      gf_common_mt_iobuf);
431
    if (caa_unlikely(!iobuf))
432
        return NULL;
433

434
    INIT_LIST_HEAD(&iobuf->list);
435
    iobuf->iobuf_arena = iobuf_arena;
436
    LOCK_INIT(&iobuf->lock);
437
    /* Hold a ref because you are allocating and using it */
438
    GF_ATOMIC_INIT(iobuf->ref, 1);
439

440
    iobuf->ptr = GF_ALIGN_BUF(iobuf->allocated_buffer, GF_IOBUF_ALIGN_SIZE);
441
    iobuf->page_size = page_size;
442

443
    return iobuf;
444
}
445

446
struct iobuf *
447
iobuf_get_from_small(const size_t page_size)
448
{
449
    struct iobuf *iobuf = NULL;
450

451
    iobuf = GF_MALLOC(sizeof(*iobuf) + page_size, gf_common_mt_iobuf);
452
    if (caa_unlikely(!iobuf))
453
        return NULL;
454

455
    INIT_LIST_HEAD(&iobuf->list);
456
    iobuf->iobuf_arena = NULL;
457
    LOCK_INIT(&iobuf->lock);
458
    /* Hold a ref because you are allocating and using it */
459
    GF_ATOMIC_INIT(iobuf->ref, 1);
460
    iobuf->ptr = iobuf->allocated_buffer;
461
    iobuf->page_size = page_size;
462
    return iobuf;
463
}
464

465
struct iobuf *
466
iobuf_get2(struct iobuf_pool *iobuf_pool, size_t page_size)
467
{
468
    struct iobuf *iobuf = NULL;
469
    size_t rounded_size = 0;
470
    int index = 0;
471

472
    if (page_size == 0) {
473
        page_size = iobuf_pool->default_page_size;
474
    }
475

476
    /* During smallfile testing we have observed the performance
477
       is improved significantly while use standard allocation if
478
       page size is less than equal to 128KB, the data is available
479
       on the link https://github.com/gluster/glusterfs/issues/2771
480
    */
481
    if (page_size <= USE_IOBUF_POOL_IF_SIZE_GREATER_THAN) {
482
        iobuf = iobuf_get_from_small(page_size);
483
        if (!iobuf)
484
            gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,
485
                    NULL);
486
        return iobuf;
487
    }
488

489
    rounded_size = gf_iobuf_get_pagesize(page_size, &index);
490
    if (rounded_size == -1) {
491
        /* make sure to provide the requested buffer with standard
492
           memory allocations */
493
        iobuf = iobuf_get_from_stdalloc(iobuf_pool, page_size);
494

495
        gf_msg_debug("iobuf", 0,
496
                     "request for iobuf of size %zu "
497
                     "is serviced using standard calloc() (%p) as it "
498
                     "exceeds the maximum available buffer size",
499
                     page_size, iobuf);
500

501
        iobuf_pool->request_misses++;
502
        return iobuf;
503
    } else if (index == -1) {
504
        gf_smsg("iobuf", GF_LOG_ERROR, 0, LG_MSG_PAGE_SIZE_EXCEEDED,
505
                "page_size=%zu", page_size, NULL);
506
        return NULL;
507
    }
508

509
    pthread_mutex_lock(&iobuf_pool->mutex);
510
    {
511
        iobuf = __iobuf_get(iobuf_pool, rounded_size, index);
512
        if (!iobuf) {
513
            pthread_mutex_unlock(&iobuf_pool->mutex);
514
            gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,
515
                    NULL);
516
            goto post_unlock;
517
        }
518
        iobuf_ref(iobuf);
519
    }
520
    pthread_mutex_unlock(&iobuf_pool->mutex);
521
post_unlock:
522
    return iobuf;
523
}
524

525
struct iobuf *
526
iobuf_get_page_aligned(struct iobuf_pool *iobuf_pool, size_t page_size,
527
                       size_t align_size)
528
{
529
    size_t req_size = 0;
530
    struct iobuf *iobuf = NULL;
531

532
    req_size = page_size;
533

534
    if (req_size == 0) {
535
        req_size = iobuf_pool->default_page_size;
536
    }
537

538
    req_size = req_size + align_size;
539
    iobuf = iobuf_get2(iobuf_pool, req_size);
540
    if (!iobuf)
541
        return NULL;
542
    iobuf->ptr = GF_ALIGN_BUF(iobuf->ptr, align_size);
543

544
    return iobuf;
545
}
546

547
struct iobuf *
548
iobuf_get(struct iobuf_pool *iobuf_pool)
549
{
550
    struct iobuf *iobuf = NULL;
551
    size_t page_size = 0;
552

553
    GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);
554

555
    page_size = iobuf_pool->default_page_size;
556
    iobuf = iobuf_get2(iobuf_pool, page_size);
557

558
out:
559
    return iobuf;
560
}
561

562
static void
563
__iobuf_put(struct iobuf *iobuf, struct iobuf_arena *iobuf_arena)
564
{
565
    struct iobuf_pool *iobuf_pool = NULL;
566
    int index = 0;
567

568
    index = gf_iobuf_get_arena_index(iobuf_arena->page_size);
569
    if (index == -1) {
570
        gf_msg_debug("iobuf", 0,
571
                     "freeing the iobuf (%p) "
572
                     "allocated with standard calloc()",
573
                     iobuf);
574

575
        /* free up properly without bothering about lists and all */
576
        __iobuf_free(iobuf);
577
        return;
578
    }
579

580
    iobuf_pool = iobuf_arena->iobuf_pool;
581

582
    if (iobuf_arena->passive_cnt == 0) {
583
        list_del(&iobuf_arena->list);
584
        list_add_tail(&iobuf_arena->list, &iobuf_pool->arenas[index]);
585
    }
586

587
    list_del_init(&iobuf->list);
588
    iobuf_arena->active_cnt--;
589

590
    list_add(&iobuf->list, &iobuf_arena->passive_list);
591
    iobuf_arena->passive_cnt++;
592

593
    if (iobuf_arena->active_cnt == 0) {
594
        __iobuf_arena_prune(iobuf_pool, iobuf_arena, index);
595
    }
596
}
597

598
static void
599
iobuf_put(struct iobuf *iobuf)
600
{
601
    struct iobuf_arena *iobuf_arena = NULL;
602
    struct iobuf_pool *iobuf_pool = NULL;
603

604
    GF_ASSERT(iobuf);
605

606
    iobuf_arena = iobuf->iobuf_arena;
607
    if (!iobuf_arena) {
608
        __iobuf_free(iobuf);
609
        return;
610
    }
611

612
    iobuf_pool = iobuf_arena->iobuf_pool;
613
    if (!iobuf_pool) {
614
        gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_POOL_NOT_FOUND, "iobuf",
615
                NULL);
616
        return;
617
    }
618

619
    pthread_mutex_lock(&iobuf_pool->mutex);
620
    {
621
        __iobuf_put(iobuf, iobuf_arena);
622
    }
623
    pthread_mutex_unlock(&iobuf_pool->mutex);
624
}
625

626
void
627
iobuf_unref(struct iobuf *iobuf)
628
{
629
    int ref = 0;
630

631
    GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);
632

633
    ref = GF_ATOMIC_DEC(iobuf->ref);
634

635
    if (!ref)
636
        iobuf_put(iobuf);
637

638
out:
639
    return;
640
}
641

642
struct iobuf *
643
iobuf_ref(struct iobuf *iobuf)
644
{
645
    GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);
646
    GF_ATOMIC_INC(iobuf->ref);
647

648
out:
649
    return iobuf;
650
}
651

652
struct iobref *
653
iobref_new(void)
654
{
655
    struct iobref *iobref = NULL;
656

657
    iobref = GF_MALLOC(sizeof(*iobref), gf_common_mt_iobref);
658
    if (!iobref)
659
        return NULL;
660

661
    iobref->iobrefs = GF_CALLOC(sizeof(*iobref->iobrefs), 16,
662
                                gf_common_mt_iobrefs);
663
    if (!iobref->iobrefs) {
664
        GF_FREE(iobref);
665
        return NULL;
666
    }
667

668
    iobref->allocated = 16;
669
    iobref->used = 0;
670

671
    LOCK_INIT(&iobref->lock);
672

673
    GF_ATOMIC_INIT(iobref->ref, 1);
674
    return iobref;
675
}
676

677
struct iobref *
678
iobref_ref(struct iobref *iobref)
679
{
680
    GF_VALIDATE_OR_GOTO("iobuf", iobref, out);
681
    GF_ATOMIC_INC(iobref->ref);
682

683
out:
684
    return iobref;
685
}
686

687
static void
688
iobref_destroy(struct iobref *iobref)
689
{
690
    int i = 0;
691
    struct iobuf *iobuf = NULL;
692

693
    GF_ASSERT(iobref);
694

695
    for (i = 0; i < iobref->used; i++) {
696
        iobuf = iobref->iobrefs[i];
697
        GF_ASSERT(iobuf);
698
        iobref->iobrefs[i] = NULL;
699
        iobuf_unref(iobuf);
700
    }
701

702
    LOCK_DESTROY(&iobref->lock);
703

704
    GF_FREE(iobref->iobrefs);
705
    GF_FREE(iobref);
706
}
707

708
void
709
iobref_unref(struct iobref *iobref)
710
{
711
    int ref = 0;
712

713
    GF_VALIDATE_OR_GOTO("iobuf", iobref, out);
714
    ref = GF_ATOMIC_DEC(iobref->ref);
715

716
    if (!ref)
717
        iobref_destroy(iobref);
718

719
out:
720
    return;
721
}
722

723
void
724
iobref_clear(struct iobref *iobref)
725
{
726
    int i = 0;
727

728
    GF_VALIDATE_OR_GOTO("iobuf", iobref, out);
729

730
    for (; i < iobref->used; i++) {
731
        GF_ASSERT(iobref->iobrefs[i]);
732
        iobuf_unref(iobref->iobrefs[i]);
733
    }
734

735
    iobref_unref(iobref);
736

737
out:
738
    return;
739
}
740

741
static void
742
__iobref_grow(struct iobref *iobref)
743
{
744
    void *newptr = NULL;
745
    int i = 0;
746

747
    newptr = GF_REALLOC(iobref->iobrefs,
748
                        iobref->allocated * 2 * (sizeof(*iobref->iobrefs)));
749
    if (newptr) {
750
        iobref->iobrefs = newptr;
751
        iobref->allocated *= 2;
752

753
        for (i = iobref->used; i < iobref->allocated; i++)
754
            iobref->iobrefs[i] = NULL;
755
    }
756
}
757

758
static int
759
__iobref_add(struct iobref *iobref, struct iobuf *iobuf)
760
{
761
    GF_ASSERT(iobref);
762
    GF_ASSERT(iobuf);
763

764
    if (iobref->used == iobref->allocated) {
765
        __iobref_grow(iobref);
766

767
        if (iobref->used == iobref->allocated)
768
            return -ENOMEM;
769
    }
770

771
    iobref->iobrefs[iobref->used++] = iobuf_ref(iobuf);
772
    return 0;
773
}
774

775
int
776
iobref_add(struct iobref *iobref, struct iobuf *iobuf)
777
{
778
    int ret = -EINVAL;
779

780
    GF_VALIDATE_OR_GOTO("iobuf", iobref, out);
781
    GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);
782

783
    LOCK(&iobref->lock);
784
    {
785
        ret = __iobref_add(iobref, iobuf);
786
    }
787
    UNLOCK(&iobref->lock);
788

789
out:
790
    return ret;
791
}
792

793
int
794
iobref_merge(struct iobref *to, struct iobref *from)
795
{
796
    int i = 0;
797
    int ret = 0;
798
    struct iobuf *iobuf = NULL;
799

800
    GF_VALIDATE_OR_GOTO("iobuf", to, out);
801
    GF_VALIDATE_OR_GOTO("iobuf", from, out);
802

803
    LOCK(&from->lock);
804
    {
805
        for (i = 0; i < from->used; i++) {
806
            iobuf = from->iobrefs[i];
807
            GF_ASSERT(iobuf);
808

809
            ret = iobref_add(to, iobuf);
810
            if (ret < 0)
811
                break;
812
        }
813
    }
814
    UNLOCK(&from->lock);
815

816
out:
817
    return ret;
818
}
819

820
size_t
821
iobuf_size(struct iobuf *iobuf)
822
{
823
    size_t size = 0;
824

825
    GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);
826
    size = iobuf_pagesize(iobuf);
827

828
out:
829
    return size;
830
}
831

832
size_t
833
iobref_size(struct iobref *iobref)
834
{
835
    size_t size = 0;
836
    int i = 0;
837

838
    GF_VALIDATE_OR_GOTO("iobuf", iobref, out);
839

840
    LOCK(&iobref->lock);
841
    {
842
        for (i = 0; i < iobref->used; i++) {
843
            GF_ASSERT(iobref->iobrefs[i]);
844
            size += iobuf_size(iobref->iobrefs[i]);
845
        }
846
    }
847
    UNLOCK(&iobref->lock);
848

849
out:
850
    return size;
851
}
852

853
void
854
iobuf_info_dump(struct iobuf *iobuf, const char *key_prefix)
855
{
856
    char key[GF_DUMP_MAX_BUF_LEN];
857
    struct iobuf my_iobuf;
858
    int ret = 0;
859

860
    GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);
861

862
    ret = TRY_LOCK(&iobuf->lock);
863
    if (ret) {
864
        return;
865
    }
866
    memcpy(&my_iobuf, iobuf, sizeof(my_iobuf));
867
    UNLOCK(&iobuf->lock);
868

869
    gf_proc_dump_build_key(key, key_prefix, "ref");
870
    gf_proc_dump_write(key, "%" GF_PRI_ATOMIC, GF_ATOMIC_GET(my_iobuf.ref));
871
    gf_proc_dump_build_key(key, key_prefix, "ptr");
872
    gf_proc_dump_write(key, "%p", my_iobuf.ptr);
873

874
out:
875
    return;
876
}
877

878
void
879
iobuf_arena_info_dump(struct iobuf_arena *iobuf_arena, const char *key_prefix)
880
{
881
    char key[GF_DUMP_MAX_BUF_LEN];
882
    int i = 1;
883
    struct iobuf *trav;
884

885
    GF_VALIDATE_OR_GOTO("iobuf", iobuf_arena, out);
886

887
    gf_proc_dump_build_key(key, key_prefix, "mem_base");
888
    gf_proc_dump_write(key, "%p", iobuf_arena->mem_base);
889
    gf_proc_dump_build_key(key, key_prefix, "active_cnt");
890
    gf_proc_dump_write(key, "%d", iobuf_arena->active_cnt);
891
    gf_proc_dump_build_key(key, key_prefix, "passive_cnt");
892
    gf_proc_dump_write(key, "%d", iobuf_arena->passive_cnt);
893
    gf_proc_dump_build_key(key, key_prefix, "alloc_cnt");
894
    gf_proc_dump_write(key, "%" PRIu64, iobuf_arena->alloc_cnt);
895
    gf_proc_dump_build_key(key, key_prefix, "max_active");
896
    gf_proc_dump_write(key, "%d", iobuf_arena->max_active);
897
    gf_proc_dump_build_key(key, key_prefix, "page_size");
898
    gf_proc_dump_write(key, "%" GF_PRI_SIZET, iobuf_arena->page_size);
899
    list_for_each_entry(trav, &iobuf_arena->active_list, list)
900
    {
901
        gf_proc_dump_build_key(key, key_prefix, "active_iobuf.%d", i++);
902
        gf_proc_dump_add_section("%s", key);
903
        iobuf_info_dump(trav, key);
904
    }
905

906
out:
907
    return;
908
}
909

910
void
911
iobuf_stats_dump(struct iobuf_pool *iobuf_pool)
912
{
913
    char msg[1024];
914
    struct iobuf_arena *trav = NULL;
915
    int i = 1;
916
    int j = 0;
917
    int ret = -1;
918

919
    GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);
920

921
    ret = pthread_mutex_trylock(&iobuf_pool->mutex);
922

923
    if (ret) {
924
        return;
925
    }
926
    gf_proc_dump_add_section("iobuf.global");
927
    gf_proc_dump_write("iobuf_pool", "%p", iobuf_pool);
928
    gf_proc_dump_write("iobuf_pool.default_page_size", "%" GF_PRI_SIZET,
929
                       iobuf_pool->default_page_size);
930
    gf_proc_dump_write("iobuf_pool.arena_size", "%" GF_PRI_SIZET,
931
                       iobuf_pool->arena_size);
932
    gf_proc_dump_write("iobuf_pool.arena_cnt", "%d", iobuf_pool->arena_cnt);
933
    gf_proc_dump_write("iobuf_pool.request_misses", "%" PRId64,
934
                       iobuf_pool->request_misses);
935

936
    for (j = 0; j < IOBUF_ARENA_MAX_INDEX; j++) {
937
        list_for_each_entry(trav, &iobuf_pool->arenas[j], list)
938
        {
939
            snprintf(msg, sizeof(msg), "arena.%d", i);
940
            gf_proc_dump_add_section("%s", msg);
941
            iobuf_arena_info_dump(trav, msg);
942
            i++;
943
        }
944
        list_for_each_entry(trav, &iobuf_pool->purge[j], list)
945
        {
946
            snprintf(msg, sizeof(msg), "purge.%d", i);
947
            gf_proc_dump_add_section("%s", msg);
948
            iobuf_arena_info_dump(trav, msg);
949
            i++;
950
        }
951
        list_for_each_entry(trav, &iobuf_pool->filled[j], list)
952
        {
953
            snprintf(msg, sizeof(msg), "filled.%d", i);
954
            gf_proc_dump_add_section("%s", msg);
955
            iobuf_arena_info_dump(trav, msg);
956
            i++;
957
        }
958
    }
959

960
    pthread_mutex_unlock(&iobuf_pool->mutex);
961

962
out:
963
    return;
964
}
965

966
void
967
iobuf_to_iovec(struct iobuf *iob, struct iovec *iov)
968
{
969
    GF_VALIDATE_OR_GOTO("iobuf", iob, out);
970
    GF_VALIDATE_OR_GOTO("iobuf", iov, out);
971

972
    iov->iov_base = iobuf_ptr(iob);
973
    iov->iov_len = iobuf_pagesize(iob);
974

975
out:
976
    return;
977
}
978

979
int
980
iobuf_copy(struct iobuf_pool *iobuf_pool, const struct iovec *iovec_src,
981
           int iovcnt, struct iobref **iobref, struct iobuf **iobuf,
982
           struct iovec *iov_dst)
983
{
984
    size_t size = -1;
985
    int ret = 0;
986

987
    size = iov_length(iovec_src, iovcnt);
988

989
    *iobuf = iobuf_get2(iobuf_pool, size);
990
    if (!(*iobuf)) {
991
        ret = -1;
992
        errno = ENOMEM;
993
        goto out;
994
    }
995

996
    *iobref = iobref_new();
997
    if (!(*iobref)) {
998
        iobuf_unref(*iobuf);
999
        errno = ENOMEM;
1000
        ret = -1;
1001
        goto out;
1002
    }
1003

1004
    ret = iobref_add(*iobref, *iobuf);
1005
    if (ret) {
1006
        iobuf_unref(*iobuf);
1007
        iobref_unref(*iobref);
1008
        errno = ENOMEM;
1009
        ret = -1;
1010
        goto out;
1011
    }
1012

1013
    iov_unload(iobuf_ptr(*iobuf), iovec_src, iovcnt);
1014

1015
    iov_dst->iov_base = iobuf_ptr(*iobuf);
1016
    iov_dst->iov_len = size;
1017

1018
out:
1019
    return ret;
1020
}
1021

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.