glusterfs
1020 строк · 25.3 Кб
1/*
2Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
3This file is part of GlusterFS.
4
5This file is licensed to you under your choice of the GNU Lesser
6General Public License, version 3 or any later version (LGPLv3 or
7later), or the GNU General Public License, version 2 (GPLv2), in all
8cases as published by the Free Software Foundation.
9*/
10
11#include "glusterfs/iobuf.h"12#include "glusterfs/statedump.h"13#include "glusterfs/libglusterfs-messages.h"14
15/*
16TODO: implement destroy margins and prefetching of arenas
17*/
18
19#define IOBUF_ARENA_MAX_INDEX \20(sizeof(gf_iobuf_init_config) / (sizeof(struct iobuf_init_config)))21
22/* Make sure this array is sorted based on pagesize */
23static const struct iobuf_init_config gf_iobuf_init_config[] = {24/* { pagesize, num_pages }, */25{128, 1024}, {512, 512}, {2 * 1024, 512}, {8 * 1024, 128},26{32 * 1024, 64}, {128 * 1024, 32}, {256 * 1024, 8}, {1 * 1024 * 1024, 2},27};28
29static int30gf_iobuf_get_arena_index(const size_t page_size)31{
32int i;33
34for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {35if (page_size <= gf_iobuf_init_config[i].pagesize)36return i;37}38
39return -1;40}
41
42static size_t43gf_iobuf_get_pagesize(const size_t page_size, int *index)44{
45int i;46size_t size = 0;47
48for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {49size = gf_iobuf_init_config[i].pagesize;50if (page_size <= size) {51if (index != NULL)52*index = i;53return size;54}55}56
57return -1;58}
59
60static void61__iobuf_arena_init_iobufs(struct iobuf_arena *iobuf_arena)62{
63const int iobuf_cnt = iobuf_arena->page_count;64struct iobuf *iobuf = NULL;65int offset = 0;66int i = 0;67
68iobuf = iobuf_arena->iobufs;69for (i = 0; i < iobuf_cnt; i++) {70INIT_LIST_HEAD(&iobuf->list);71LOCK_INIT(&iobuf->lock);72
73iobuf->iobuf_arena = iobuf_arena;74
75iobuf->ptr = iobuf_arena->mem_base + offset;76
77list_add(&iobuf->list, &iobuf_arena->passive_list);78iobuf_arena->passive_cnt++;79
80offset += iobuf_arena->page_size;81iobuf++;82}83}
84
85static void86__iobuf_arena_destroy_iobufs(struct iobuf_arena *iobuf_arena)87{
88int iobuf_cnt = 0;89struct iobuf *iobuf = NULL;90int i = 0;91
92iobuf_cnt = iobuf_arena->page_count;93iobuf = iobuf_arena->iobufs;94for (i = 0; i < iobuf_cnt; i++) {95GF_ASSERT(GF_ATOMIC_GET(iobuf->ref) == 0);96
97LOCK_DESTROY(&iobuf->lock);98list_del_init(&iobuf->list);99iobuf++;100}101}
102
103static void104__iobuf_arena_destroy(struct iobuf_arena *iobuf_arena)105{
106munmap(iobuf_arena->mem_base, iobuf_arena->arena_size);107
108__iobuf_arena_destroy_iobufs(iobuf_arena);109
110GF_FREE(iobuf_arena);111}
112
113static struct iobuf_arena *114__iobuf_arena_alloc(struct iobuf_pool *iobuf_pool, size_t page_size,115int32_t num_iobufs)116{
117struct iobuf_arena *iobuf_arena = NULL;118size_t rounded_size = 0;119int index = 0; /* unused */120
121GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);122
123iobuf_arena = GF_CALLOC(1241, sizeof(struct iobuf_arena) + sizeof(struct iobuf) * num_iobufs,125gf_common_mt_iobuf_arena);126if (!iobuf_arena)127goto out;128
129INIT_LIST_HEAD(&iobuf_arena->list);130INIT_LIST_HEAD(&iobuf_arena->passive_list);131INIT_LIST_HEAD(&iobuf_arena->active_list);132iobuf_arena->iobuf_pool = iobuf_pool;133
134rounded_size = gf_iobuf_get_pagesize(page_size, &index);135
136iobuf_arena->page_size = rounded_size;137iobuf_arena->page_count = num_iobufs;138
139iobuf_arena->arena_size = rounded_size * num_iobufs;140
141iobuf_arena->mem_base = mmap(NULL, iobuf_arena->arena_size,142PROT_READ | PROT_WRITE,143MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);144if (iobuf_arena->mem_base == MAP_FAILED) {145gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_MAPPING_FAILED, NULL);146GF_FREE(iobuf_arena);147goto out;148}149
150__iobuf_arena_init_iobufs(iobuf_arena);151
152iobuf_pool->arena_cnt++;153
154return iobuf_arena;155
156out:157return NULL;158}
159
160static struct iobuf_arena *161__iobuf_arena_unprune(struct iobuf_pool *iobuf_pool, const int index)162{
163struct iobuf_arena *tmp = NULL;164
165list_for_each_entry(tmp, &iobuf_pool->purge[index], list)166{167list_del_init(&tmp->list);168return tmp;169}170
171return NULL;172}
173
174static struct iobuf_arena *175__iobuf_pool_add_arena(struct iobuf_pool *iobuf_pool, const size_t page_size,176const int32_t num_pages, const int index)177{
178struct iobuf_arena *iobuf_arena = NULL;179
180iobuf_arena = __iobuf_arena_unprune(iobuf_pool, index);181
182if (!iobuf_arena) {183iobuf_arena = __iobuf_arena_alloc(iobuf_pool, page_size, num_pages);184if (!iobuf_arena) {185gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_ARENA_NOT_FOUND,186NULL);187return NULL;188}189}190list_add(&iobuf_arena->list, &iobuf_pool->arenas[index]);191
192return iobuf_arena;193}
194
195/* This function destroys all the iobufs and the iobuf_pool */
196void
197iobuf_pool_destroy(struct iobuf_pool *iobuf_pool)198{
199struct iobuf_arena *iobuf_arena = NULL;200struct iobuf_arena *tmp = NULL;201int i = 0;202
203GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);204
205pthread_mutex_lock(&iobuf_pool->mutex);206{207for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {208list_for_each_entry_safe(iobuf_arena, tmp, &iobuf_pool->arenas[i],209list)210{211list_del_init(&iobuf_arena->list);212iobuf_pool->arena_cnt--;213
214__iobuf_arena_destroy(iobuf_arena);215}216list_for_each_entry_safe(iobuf_arena, tmp, &iobuf_pool->purge[i],217list)218{219list_del_init(&iobuf_arena->list);220iobuf_pool->arena_cnt--;221__iobuf_arena_destroy(iobuf_arena);222}223/* If there are no iobuf leaks, there should be no224* arenas in the filled list. If at all there are any
225* arenas in the filled list, the below function will
226* assert.
227*/
228list_for_each_entry_safe(iobuf_arena, tmp, &iobuf_pool->filled[i],229list)230{231list_del_init(&iobuf_arena->list);232iobuf_pool->arena_cnt--;233__iobuf_arena_destroy(iobuf_arena);234}235/* If there are no iobuf leaks, there shoould be236* no standard allocated arenas, iobuf_put will free
237* such arenas.
238* TODO: Free the stdalloc arenas forcefully if present?
239*/
240}241}242pthread_mutex_unlock(&iobuf_pool->mutex);243
244pthread_mutex_destroy(&iobuf_pool->mutex);245
246GF_FREE(iobuf_pool);247
248out:249return;250}
251
252static void253iobuf_create_stdalloc_arena(struct iobuf_pool *iobuf_pool)254{
255struct iobuf_arena *iobuf_arena = NULL;256
257/* No locking required here as its called only once during init */258iobuf_arena = GF_CALLOC(sizeof(*iobuf_arena), 1, gf_common_mt_iobuf_arena);259if (!iobuf_arena)260goto err;261
262INIT_LIST_HEAD(&iobuf_arena->list);263INIT_LIST_HEAD(&iobuf_arena->passive_list);264INIT_LIST_HEAD(&iobuf_arena->active_list);265
266iobuf_arena->iobuf_pool = iobuf_pool;267
268iobuf_arena->page_size = 0x7fffffff;269
270list_add_tail(&iobuf_arena->list,271&iobuf_pool->arenas[IOBUF_ARENA_MAX_INDEX]);272
273err:274return;275}
276
277struct iobuf_pool *278iobuf_pool_new(void)279{
280struct iobuf_pool *iobuf_pool = NULL;281int i = 0;282size_t page_size = 0;283size_t arena_size = 0;284int32_t num_pages = 0;285
286iobuf_pool = GF_CALLOC(sizeof(*iobuf_pool), 1, gf_common_mt_iobuf_pool);287if (!iobuf_pool)288goto out;289
290pthread_mutex_init(&iobuf_pool->mutex, NULL);291for (i = 0; i <= IOBUF_ARENA_MAX_INDEX; i++) {292INIT_LIST_HEAD(&iobuf_pool->arenas[i]);293INIT_LIST_HEAD(&iobuf_pool->filled[i]);294INIT_LIST_HEAD(&iobuf_pool->purge[i]);295}296
297iobuf_pool->default_page_size = 128 * GF_UNIT_KB;298
299/* No locking required here300* as no one else can use this pool yet
301*/
302for (i = 0; i < IOBUF_ARENA_MAX_INDEX; i++) {303page_size = gf_iobuf_init_config[i].pagesize;304num_pages = gf_iobuf_init_config[i].num_pages;305
306if (__iobuf_pool_add_arena(iobuf_pool, page_size, num_pages, i) != NULL)307arena_size += page_size * num_pages;308}309
310/* Need an arena to handle all the bigger iobuf requests */311iobuf_create_stdalloc_arena(iobuf_pool);312
313iobuf_pool->arena_size = arena_size;314out:315
316return iobuf_pool;317}
318
319static void320__iobuf_arena_prune(struct iobuf_pool *iobuf_pool,321struct iobuf_arena *iobuf_arena, const int index)322{
323list_del(&iobuf_arena->list);324
325/* code flow comes here only if the arena is in purge list and we can326* free the arena only if we have at least one arena in 'arenas' list
327* (ie, at least few iobufs free in arena), that way, there won't
328* be spurious mmap/unmap of buffers.
329* If the list empty, add to the purge list and return.
330*/
331if (list_empty(&iobuf_pool->arenas[index])) {332list_add_tail(&iobuf_arena->list, &iobuf_pool->purge[index]);333goto out;334}335
336/* All cases matched, destroy */337iobuf_pool->arena_cnt--;338
339__iobuf_arena_destroy(iobuf_arena);340
341out:342return;343}
344
345/* Always called under the iobuf_pool mutex lock */
346static struct iobuf_arena *347__iobuf_select_arena(struct iobuf_pool *iobuf_pool, const size_t page_size,348const int index)349{
350struct iobuf_arena *iobuf_arena = NULL;351struct iobuf_arena *trav = NULL;352
353/* look for unused iobuf from the head-most arena */354list_for_each_entry(trav, &iobuf_pool->arenas[index], list)355{356if (trav->passive_cnt) {357iobuf_arena = trav;358break;359}360}361
362if (!iobuf_arena) {363/* all arenas were full, find the right count to add */364iobuf_arena = __iobuf_pool_add_arena(365iobuf_pool, page_size, gf_iobuf_init_config[index].num_pages,366index);367}368
369return iobuf_arena;370}
371
372/* Always called under the iobuf_pool mutex lock */
373static struct iobuf *374__iobuf_get(struct iobuf_pool *iobuf_pool, const size_t page_size,375const int index)376{
377struct iobuf *iobuf = NULL;378struct iobuf_arena *iobuf_arena = NULL;379
380/* most eligible arena for picking an iobuf */381iobuf_arena = __iobuf_select_arena(iobuf_pool, page_size, index);382if (!iobuf_arena)383return NULL;384
385iobuf = list_first_entry(&iobuf_arena->passive_list, struct iobuf, list);386
387list_del(&iobuf->list);388iobuf_arena->passive_cnt--;389
390list_add(&iobuf->list, &iobuf_arena->active_list);391iobuf_arena->active_cnt++;392
393/* no resetting requied for this element */394iobuf_arena->alloc_cnt++;395
396if (iobuf_arena->max_active < iobuf_arena->active_cnt)397iobuf_arena->max_active = iobuf_arena->active_cnt;398
399if (iobuf_arena->passive_cnt == 0) {400list_del(&iobuf_arena->list);401list_add(&iobuf_arena->list, &iobuf_pool->filled[index]);402}403
404iobuf->page_size = page_size;405return iobuf;406}
407
408static void409__iobuf_free(struct iobuf *iobuf)410{
411LOCK_DESTROY(&iobuf->lock);412GF_FREE(iobuf);413}
414
415static struct iobuf *416iobuf_get_from_stdalloc(struct iobuf_pool *iobuf_pool, const size_t page_size)417{
418struct iobuf *iobuf = NULL;419struct iobuf_arena *iobuf_arena = NULL;420struct iobuf_arena *trav = NULL;421
422/* The first arena in the 'MAX-INDEX' will always be used for misc */423list_for_each_entry(trav, &iobuf_pool->arenas[IOBUF_ARENA_MAX_INDEX], list)424{425iobuf_arena = trav;426break;427}428
429iobuf = GF_MALLOC(sizeof(*iobuf) + ((page_size + GF_IOBUF_ALIGN_SIZE) - 1),430gf_common_mt_iobuf);431if (caa_unlikely(!iobuf))432return NULL;433
434INIT_LIST_HEAD(&iobuf->list);435iobuf->iobuf_arena = iobuf_arena;436LOCK_INIT(&iobuf->lock);437/* Hold a ref because you are allocating and using it */438GF_ATOMIC_INIT(iobuf->ref, 1);439
440iobuf->ptr = GF_ALIGN_BUF(iobuf->allocated_buffer, GF_IOBUF_ALIGN_SIZE);441iobuf->page_size = page_size;442
443return iobuf;444}
445
446struct iobuf *447iobuf_get_from_small(const size_t page_size)448{
449struct iobuf *iobuf = NULL;450
451iobuf = GF_MALLOC(sizeof(*iobuf) + page_size, gf_common_mt_iobuf);452if (caa_unlikely(!iobuf))453return NULL;454
455INIT_LIST_HEAD(&iobuf->list);456iobuf->iobuf_arena = NULL;457LOCK_INIT(&iobuf->lock);458/* Hold a ref because you are allocating and using it */459GF_ATOMIC_INIT(iobuf->ref, 1);460iobuf->ptr = iobuf->allocated_buffer;461iobuf->page_size = page_size;462return iobuf;463}
464
465struct iobuf *466iobuf_get2(struct iobuf_pool *iobuf_pool, size_t page_size)467{
468struct iobuf *iobuf = NULL;469size_t rounded_size = 0;470int index = 0;471
472if (page_size == 0) {473page_size = iobuf_pool->default_page_size;474}475
476/* During smallfile testing we have observed the performance477is improved significantly while use standard allocation if
478page size is less than equal to 128KB, the data is available
479on the link https://github.com/gluster/glusterfs/issues/2771
480*/
481if (page_size <= USE_IOBUF_POOL_IF_SIZE_GREATER_THAN) {482iobuf = iobuf_get_from_small(page_size);483if (!iobuf)484gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,485NULL);486return iobuf;487}488
489rounded_size = gf_iobuf_get_pagesize(page_size, &index);490if (rounded_size == -1) {491/* make sure to provide the requested buffer with standard492memory allocations */
493iobuf = iobuf_get_from_stdalloc(iobuf_pool, page_size);494
495gf_msg_debug("iobuf", 0,496"request for iobuf of size %zu "497"is serviced using standard calloc() (%p) as it "498"exceeds the maximum available buffer size",499page_size, iobuf);500
501iobuf_pool->request_misses++;502return iobuf;503} else if (index == -1) {504gf_smsg("iobuf", GF_LOG_ERROR, 0, LG_MSG_PAGE_SIZE_EXCEEDED,505"page_size=%zu", page_size, NULL);506return NULL;507}508
509pthread_mutex_lock(&iobuf_pool->mutex);510{511iobuf = __iobuf_get(iobuf_pool, rounded_size, index);512if (!iobuf) {513pthread_mutex_unlock(&iobuf_pool->mutex);514gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_IOBUF_NOT_FOUND,515NULL);516goto post_unlock;517}518iobuf_ref(iobuf);519}520pthread_mutex_unlock(&iobuf_pool->mutex);521post_unlock:522return iobuf;523}
524
525struct iobuf *526iobuf_get_page_aligned(struct iobuf_pool *iobuf_pool, size_t page_size,527size_t align_size)528{
529size_t req_size = 0;530struct iobuf *iobuf = NULL;531
532req_size = page_size;533
534if (req_size == 0) {535req_size = iobuf_pool->default_page_size;536}537
538req_size = req_size + align_size;539iobuf = iobuf_get2(iobuf_pool, req_size);540if (!iobuf)541return NULL;542iobuf->ptr = GF_ALIGN_BUF(iobuf->ptr, align_size);543
544return iobuf;545}
546
547struct iobuf *548iobuf_get(struct iobuf_pool *iobuf_pool)549{
550struct iobuf *iobuf = NULL;551size_t page_size = 0;552
553GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);554
555page_size = iobuf_pool->default_page_size;556iobuf = iobuf_get2(iobuf_pool, page_size);557
558out:559return iobuf;560}
561
562static void563__iobuf_put(struct iobuf *iobuf, struct iobuf_arena *iobuf_arena)564{
565struct iobuf_pool *iobuf_pool = NULL;566int index = 0;567
568index = gf_iobuf_get_arena_index(iobuf_arena->page_size);569if (index == -1) {570gf_msg_debug("iobuf", 0,571"freeing the iobuf (%p) "572"allocated with standard calloc()",573iobuf);574
575/* free up properly without bothering about lists and all */576__iobuf_free(iobuf);577return;578}579
580iobuf_pool = iobuf_arena->iobuf_pool;581
582if (iobuf_arena->passive_cnt == 0) {583list_del(&iobuf_arena->list);584list_add_tail(&iobuf_arena->list, &iobuf_pool->arenas[index]);585}586
587list_del_init(&iobuf->list);588iobuf_arena->active_cnt--;589
590list_add(&iobuf->list, &iobuf_arena->passive_list);591iobuf_arena->passive_cnt++;592
593if (iobuf_arena->active_cnt == 0) {594__iobuf_arena_prune(iobuf_pool, iobuf_arena, index);595}596}
597
598static void599iobuf_put(struct iobuf *iobuf)600{
601struct iobuf_arena *iobuf_arena = NULL;602struct iobuf_pool *iobuf_pool = NULL;603
604GF_ASSERT(iobuf);605
606iobuf_arena = iobuf->iobuf_arena;607if (!iobuf_arena) {608__iobuf_free(iobuf);609return;610}611
612iobuf_pool = iobuf_arena->iobuf_pool;613if (!iobuf_pool) {614gf_smsg(THIS->name, GF_LOG_WARNING, 0, LG_MSG_POOL_NOT_FOUND, "iobuf",615NULL);616return;617}618
619pthread_mutex_lock(&iobuf_pool->mutex);620{621__iobuf_put(iobuf, iobuf_arena);622}623pthread_mutex_unlock(&iobuf_pool->mutex);624}
625
626void
627iobuf_unref(struct iobuf *iobuf)628{
629int ref = 0;630
631GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);632
633ref = GF_ATOMIC_DEC(iobuf->ref);634
635if (!ref)636iobuf_put(iobuf);637
638out:639return;640}
641
642struct iobuf *643iobuf_ref(struct iobuf *iobuf)644{
645GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);646GF_ATOMIC_INC(iobuf->ref);647
648out:649return iobuf;650}
651
652struct iobref *653iobref_new(void)654{
655struct iobref *iobref = NULL;656
657iobref = GF_MALLOC(sizeof(*iobref), gf_common_mt_iobref);658if (!iobref)659return NULL;660
661iobref->iobrefs = GF_CALLOC(sizeof(*iobref->iobrefs), 16,662gf_common_mt_iobrefs);663if (!iobref->iobrefs) {664GF_FREE(iobref);665return NULL;666}667
668iobref->allocated = 16;669iobref->used = 0;670
671LOCK_INIT(&iobref->lock);672
673GF_ATOMIC_INIT(iobref->ref, 1);674return iobref;675}
676
677struct iobref *678iobref_ref(struct iobref *iobref)679{
680GF_VALIDATE_OR_GOTO("iobuf", iobref, out);681GF_ATOMIC_INC(iobref->ref);682
683out:684return iobref;685}
686
687static void688iobref_destroy(struct iobref *iobref)689{
690int i = 0;691struct iobuf *iobuf = NULL;692
693GF_ASSERT(iobref);694
695for (i = 0; i < iobref->used; i++) {696iobuf = iobref->iobrefs[i];697GF_ASSERT(iobuf);698iobref->iobrefs[i] = NULL;699iobuf_unref(iobuf);700}701
702LOCK_DESTROY(&iobref->lock);703
704GF_FREE(iobref->iobrefs);705GF_FREE(iobref);706}
707
708void
709iobref_unref(struct iobref *iobref)710{
711int ref = 0;712
713GF_VALIDATE_OR_GOTO("iobuf", iobref, out);714ref = GF_ATOMIC_DEC(iobref->ref);715
716if (!ref)717iobref_destroy(iobref);718
719out:720return;721}
722
723void
724iobref_clear(struct iobref *iobref)725{
726int i = 0;727
728GF_VALIDATE_OR_GOTO("iobuf", iobref, out);729
730for (; i < iobref->used; i++) {731GF_ASSERT(iobref->iobrefs[i]);732iobuf_unref(iobref->iobrefs[i]);733}734
735iobref_unref(iobref);736
737out:738return;739}
740
741static void742__iobref_grow(struct iobref *iobref)743{
744void *newptr = NULL;745int i = 0;746
747newptr = GF_REALLOC(iobref->iobrefs,748iobref->allocated * 2 * (sizeof(*iobref->iobrefs)));749if (newptr) {750iobref->iobrefs = newptr;751iobref->allocated *= 2;752
753for (i = iobref->used; i < iobref->allocated; i++)754iobref->iobrefs[i] = NULL;755}756}
757
758static int759__iobref_add(struct iobref *iobref, struct iobuf *iobuf)760{
761GF_ASSERT(iobref);762GF_ASSERT(iobuf);763
764if (iobref->used == iobref->allocated) {765__iobref_grow(iobref);766
767if (iobref->used == iobref->allocated)768return -ENOMEM;769}770
771iobref->iobrefs[iobref->used++] = iobuf_ref(iobuf);772return 0;773}
774
775int
776iobref_add(struct iobref *iobref, struct iobuf *iobuf)777{
778int ret = -EINVAL;779
780GF_VALIDATE_OR_GOTO("iobuf", iobref, out);781GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);782
783LOCK(&iobref->lock);784{785ret = __iobref_add(iobref, iobuf);786}787UNLOCK(&iobref->lock);788
789out:790return ret;791}
792
793int
794iobref_merge(struct iobref *to, struct iobref *from)795{
796int i = 0;797int ret = 0;798struct iobuf *iobuf = NULL;799
800GF_VALIDATE_OR_GOTO("iobuf", to, out);801GF_VALIDATE_OR_GOTO("iobuf", from, out);802
803LOCK(&from->lock);804{805for (i = 0; i < from->used; i++) {806iobuf = from->iobrefs[i];807GF_ASSERT(iobuf);808
809ret = iobref_add(to, iobuf);810if (ret < 0)811break;812}813}814UNLOCK(&from->lock);815
816out:817return ret;818}
819
820size_t
821iobuf_size(struct iobuf *iobuf)822{
823size_t size = 0;824
825GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);826size = iobuf_pagesize(iobuf);827
828out:829return size;830}
831
832size_t
833iobref_size(struct iobref *iobref)834{
835size_t size = 0;836int i = 0;837
838GF_VALIDATE_OR_GOTO("iobuf", iobref, out);839
840LOCK(&iobref->lock);841{842for (i = 0; i < iobref->used; i++) {843GF_ASSERT(iobref->iobrefs[i]);844size += iobuf_size(iobref->iobrefs[i]);845}846}847UNLOCK(&iobref->lock);848
849out:850return size;851}
852
853void
854iobuf_info_dump(struct iobuf *iobuf, const char *key_prefix)855{
856char key[GF_DUMP_MAX_BUF_LEN];857struct iobuf my_iobuf;858int ret = 0;859
860GF_VALIDATE_OR_GOTO("iobuf", iobuf, out);861
862ret = TRY_LOCK(&iobuf->lock);863if (ret) {864return;865}866memcpy(&my_iobuf, iobuf, sizeof(my_iobuf));867UNLOCK(&iobuf->lock);868
869gf_proc_dump_build_key(key, key_prefix, "ref");870gf_proc_dump_write(key, "%" GF_PRI_ATOMIC, GF_ATOMIC_GET(my_iobuf.ref));871gf_proc_dump_build_key(key, key_prefix, "ptr");872gf_proc_dump_write(key, "%p", my_iobuf.ptr);873
874out:875return;876}
877
878void
879iobuf_arena_info_dump(struct iobuf_arena *iobuf_arena, const char *key_prefix)880{
881char key[GF_DUMP_MAX_BUF_LEN];882int i = 1;883struct iobuf *trav;884
885GF_VALIDATE_OR_GOTO("iobuf", iobuf_arena, out);886
887gf_proc_dump_build_key(key, key_prefix, "mem_base");888gf_proc_dump_write(key, "%p", iobuf_arena->mem_base);889gf_proc_dump_build_key(key, key_prefix, "active_cnt");890gf_proc_dump_write(key, "%d", iobuf_arena->active_cnt);891gf_proc_dump_build_key(key, key_prefix, "passive_cnt");892gf_proc_dump_write(key, "%d", iobuf_arena->passive_cnt);893gf_proc_dump_build_key(key, key_prefix, "alloc_cnt");894gf_proc_dump_write(key, "%" PRIu64, iobuf_arena->alloc_cnt);895gf_proc_dump_build_key(key, key_prefix, "max_active");896gf_proc_dump_write(key, "%d", iobuf_arena->max_active);897gf_proc_dump_build_key(key, key_prefix, "page_size");898gf_proc_dump_write(key, "%" GF_PRI_SIZET, iobuf_arena->page_size);899list_for_each_entry(trav, &iobuf_arena->active_list, list)900{901gf_proc_dump_build_key(key, key_prefix, "active_iobuf.%d", i++);902gf_proc_dump_add_section("%s", key);903iobuf_info_dump(trav, key);904}905
906out:907return;908}
909
910void
911iobuf_stats_dump(struct iobuf_pool *iobuf_pool)912{
913char msg[1024];914struct iobuf_arena *trav = NULL;915int i = 1;916int j = 0;917int ret = -1;918
919GF_VALIDATE_OR_GOTO("iobuf", iobuf_pool, out);920
921ret = pthread_mutex_trylock(&iobuf_pool->mutex);922
923if (ret) {924return;925}926gf_proc_dump_add_section("iobuf.global");927gf_proc_dump_write("iobuf_pool", "%p", iobuf_pool);928gf_proc_dump_write("iobuf_pool.default_page_size", "%" GF_PRI_SIZET,929iobuf_pool->default_page_size);930gf_proc_dump_write("iobuf_pool.arena_size", "%" GF_PRI_SIZET,931iobuf_pool->arena_size);932gf_proc_dump_write("iobuf_pool.arena_cnt", "%d", iobuf_pool->arena_cnt);933gf_proc_dump_write("iobuf_pool.request_misses", "%" PRId64,934iobuf_pool->request_misses);935
936for (j = 0; j < IOBUF_ARENA_MAX_INDEX; j++) {937list_for_each_entry(trav, &iobuf_pool->arenas[j], list)938{939snprintf(msg, sizeof(msg), "arena.%d", i);940gf_proc_dump_add_section("%s", msg);941iobuf_arena_info_dump(trav, msg);942i++;943}944list_for_each_entry(trav, &iobuf_pool->purge[j], list)945{946snprintf(msg, sizeof(msg), "purge.%d", i);947gf_proc_dump_add_section("%s", msg);948iobuf_arena_info_dump(trav, msg);949i++;950}951list_for_each_entry(trav, &iobuf_pool->filled[j], list)952{953snprintf(msg, sizeof(msg), "filled.%d", i);954gf_proc_dump_add_section("%s", msg);955iobuf_arena_info_dump(trav, msg);956i++;957}958}959
960pthread_mutex_unlock(&iobuf_pool->mutex);961
962out:963return;964}
965
966void
967iobuf_to_iovec(struct iobuf *iob, struct iovec *iov)968{
969GF_VALIDATE_OR_GOTO("iobuf", iob, out);970GF_VALIDATE_OR_GOTO("iobuf", iov, out);971
972iov->iov_base = iobuf_ptr(iob);973iov->iov_len = iobuf_pagesize(iob);974
975out:976return;977}
978
979int
980iobuf_copy(struct iobuf_pool *iobuf_pool, const struct iovec *iovec_src,981int iovcnt, struct iobref **iobref, struct iobuf **iobuf,982struct iovec *iov_dst)983{
984size_t size = -1;985int ret = 0;986
987size = iov_length(iovec_src, iovcnt);988
989*iobuf = iobuf_get2(iobuf_pool, size);990if (!(*iobuf)) {991ret = -1;992errno = ENOMEM;993goto out;994}995
996*iobref = iobref_new();997if (!(*iobref)) {998iobuf_unref(*iobuf);999errno = ENOMEM;1000ret = -1;1001goto out;1002}1003
1004ret = iobref_add(*iobref, *iobuf);1005if (ret) {1006iobuf_unref(*iobuf);1007iobref_unref(*iobref);1008errno = ENOMEM;1009ret = -1;1010goto out;1011}1012
1013iov_unload(iobuf_ptr(*iobuf), iovec_src, iovcnt);1014
1015iov_dst->iov_base = iobuf_ptr(*iobuf);1016iov_dst->iov_len = size;1017
1018out:1019return ret;1020}
1021