glusterfs
3668 строк · 82.1 Кб
1/*
2Copyright (c) 2008-2013 Red Hat, Inc. <http://www.redhat.com>
3This file is part of GlusterFS.
4
5This file is licensed to you under your choice of the GNU Lesser
6General Public License, version 3 or any later version (LGPLv3 or
7later), or the GNU General Public License, version 2 (GPLv2), in all
8cases as published by the Free Software Foundation.
9*/
10
11#include "glusterfs/syncop.h"12#include "glusterfs/libglusterfs-messages.h"13
14#ifdef HAVE_ASAN_API15#include <sanitizer/common_interface_defs.h>16#endif17
18#ifdef HAVE_TSAN_API19#include <sanitizer/tsan_interface.h>20#endif21
22#ifdef HAVE_VALGRIND_API23#include <valgrind/valgrind.h>24#endif25
26int
27syncopctx_setfsuid(void *uid)28{
29struct syncopctx *opctx = NULL;30int ret = 0;31
32/* In args check */33if (!uid) {34ret = -1;35errno = EINVAL;36goto out;37}38
39opctx = syncopctx_getctx();40
41opctx->uid = *(uid_t *)uid;42opctx->valid |= SYNCOPCTX_UID;43
44out:45return ret;46}
47
48int
49syncopctx_setfsgid(void *gid)50{
51struct syncopctx *opctx = NULL;52int ret = 0;53
54/* In args check */55if (!gid) {56ret = -1;57errno = EINVAL;58goto out;59}60
61opctx = syncopctx_getctx();62
63opctx->gid = *(gid_t *)gid;64opctx->valid |= SYNCOPCTX_GID;65
66out:67return ret;68}
69
70int
71syncopctx_setfsgroups(int count, const void *groups)72{
73struct syncopctx *opctx = NULL;74gid_t *tmpgroups = NULL;75int ret = 0;76
77/* In args check */78if (count != 0 && !groups) {79ret = -1;80errno = EINVAL;81goto out;82}83
84opctx = syncopctx_getctx();85
86/* resize internal groups as required */87if (count && opctx->grpsize < count) {88if (opctx->groups) {89/* Group list will be updated later, so no need to keep current90* data and waste time copying it. It's better to free the current
91* allocation and then allocate a fresh new memory block. */
92GF_FREE(opctx->groups);93opctx->groups = NULL;94opctx->grpsize = 0;95}96tmpgroups = GF_MALLOC(count * sizeof(gid_t), gf_common_mt_syncopctx);97if (tmpgroups == NULL) {98ret = -1;99goto out;100}101
102opctx->groups = tmpgroups;103opctx->grpsize = count;104}105
106/* copy out the groups passed */107if (count)108memcpy(opctx->groups, groups, (sizeof(gid_t) * count));109
110/* set/reset the ngrps, this is where reset of groups is handled */111opctx->ngrps = count;112
113if ((opctx->valid & SYNCOPCTX_GROUPS) == 0) {114/* This is the first time we are storing groups into the TLS structure115* so we mark the current thread so that it will be properly cleaned
116* up when the thread terminates. */
117gf_thread_needs_cleanup();118}119opctx->valid |= SYNCOPCTX_GROUPS;120
121out:122return ret;123}
124
125int
126syncopctx_setfspid(void *pid)127{
128struct syncopctx *opctx = NULL;129int ret = 0;130
131/* In args check */132if (!pid) {133ret = -1;134errno = EINVAL;135goto out;136}137
138opctx = syncopctx_getctx();139
140opctx->pid = *(pid_t *)pid;141opctx->valid |= SYNCOPCTX_PID;142
143out:144return ret;145}
146
147int
148syncopctx_setfslkowner(gf_lkowner_t *lk_owner)149{
150struct syncopctx *opctx = NULL;151int ret = 0;152
153/* In args check */154if (!lk_owner) {155ret = -1;156errno = EINVAL;157goto out;158}159
160opctx = syncopctx_getctx();161
162lk_owner_copy(&opctx->lk_owner, lk_owner);163opctx->valid |= SYNCOPCTX_LKOWNER;164
165out:166return ret;167}
168
169void *170syncenv_processor(void *thdata);171
172static void173__run(struct synctask *task)174{
175struct syncenv *env = NULL;176int32_t total, ret, i;177
178env = task->env;179
180list_del_init(&task->all_tasks);181switch (task->state) {182case SYNCTASK_INIT:183case SYNCTASK_SUSPEND:184break;185case SYNCTASK_RUN:186gf_msg_debug(task->xl->name, 0,187"re-running already running"188" task");189env->runcount--;190break;191case SYNCTASK_WAIT:192break;193case SYNCTASK_DONE:194gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,195"running completed task");196return;197case SYNCTASK_ZOMBIE:198gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_WAKE_UP_ZOMBIE,199"attempted to wake up "200"zombie!!");201return;202}203
204list_add_tail(&task->all_tasks, &env->runq);205task->state = SYNCTASK_RUN;206
207env->runcount++;208
209total = env->procs + env->runcount - env->procs_idle;210if (total > env->procmax) {211total = env->procmax;212}213if (total > env->procs) {214for (i = 0; i < env->procmax; i++) {215if (env->proc[i].env == NULL) {216env->proc[i].env = env;217ret = gf_thread_create(&env->proc[i].processor, NULL,218syncenv_processor, &env->proc[i],219"sproc%d", i);220if ((ret < 0) || (++env->procs >= total)) {221break;222}223}224}225}226}
227
228static void229__wait(struct synctask *task)230{
231struct syncenv *env = NULL;232
233env = task->env;234
235list_del_init(&task->all_tasks);236switch (task->state) {237case SYNCTASK_INIT:238case SYNCTASK_SUSPEND:239break;240case SYNCTASK_RUN:241env->runcount--;242break;243case SYNCTASK_WAIT:244gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_REWAITING_TASK,245"re-waiting already waiting "246"task");247break;248case SYNCTASK_DONE:249gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,250"running completed task");251return;252case SYNCTASK_ZOMBIE:253gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_SLEEP_ZOMBIE,254"attempted to sleep a zombie!!");255return;256}257
258list_add_tail(&task->all_tasks, &env->waitq);259task->state = SYNCTASK_WAIT;260}
261
262void
263synctask_yield(struct synctask *task, struct timespec *delta)264{
265xlator_t *oldTHIS = THIS;266
267#if defined(__NetBSD__) && defined(_UC_TLSBASE)268/* Preserve pthread private pointer through swapcontex() */269task->proc->sched.uc_flags &= ~_UC_TLSBASE;270#endif271
272task->delta = delta;273
274if (task->state != SYNCTASK_DONE) {275task->state = SYNCTASK_SUSPEND;276}277
278#ifdef HAVE_TSAN_API279__tsan_switch_to_fiber(task->proc->tsan.fiber, 0);280#endif281
282#ifdef HAVE_ASAN_API283__sanitizer_start_switch_fiber(&task->fake_stack,284task->proc->sched.uc_stack.ss_sp,285task->proc->sched.uc_stack.ss_size);286#endif287
288if (swapcontext(&task->ctx, &task->proc->sched) < 0) {289gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_SWAPCONTEXT_FAILED,290"swapcontext failed");291}292
293#ifdef HAVE_ASAN_API294__sanitizer_finish_switch_fiber(task->proc->fake_stack, NULL, NULL);295#endif296
297THIS = oldTHIS;298}
299
300void
301synctask_sleep(int32_t secs)302{
303struct timespec delta;304struct synctask *task;305
306task = synctask_get();307
308if (task == NULL) {309sleep(secs);310} else {311delta.tv_sec = secs;312delta.tv_nsec = 0;313
314synctask_yield(task, &delta);315}316}
317
318void
319synctask_usleep(int32_t usecs)320{
321struct timespec delta;322struct synctask *task;323
324task = synctask_get();325
326if (task == NULL) {327usleep(usecs);328} else {329delta.tv_sec = usecs / 1000000;330delta.tv_nsec = (usecs % 1000000) * 1000;331
332synctask_yield(task, &delta);333}334}
335
336static void337__synctask_wake(struct synctask *task)338{
339task->woken = 1;340
341if (task->slept)342__run(task);343
344pthread_cond_broadcast(&task->env->cond);345}
346
347void
348synctask_wake(struct synctask *task)349{
350struct syncenv *env = NULL;351
352env = task->env;353
354pthread_mutex_lock(&env->mutex);355{356if (task->timer != NULL) {357if (gf_timer_call_cancel(task->xl->ctx, task->timer) != 0) {358goto unlock;359}360
361task->timer = NULL;362task->synccond = NULL;363}364
365__synctask_wake(task);366}367unlock:368pthread_mutex_unlock(&env->mutex);369}
370
371void
372synctask_wrap(void)373{
374struct synctask *task = NULL;375
376/* Do not trust the pointer received. It may be377wrong and can lead to crashes. */
378
379task = synctask_get();380
381#ifdef HAVE_ASAN_API382__sanitizer_finish_switch_fiber(task->fake_stack, NULL, NULL);383#endif384
385task->ret = task->syncfn(task->opaque);386if (task->synccbk)387task->synccbk(task->ret, task->frame, task->opaque);388
389task->state = SYNCTASK_DONE;390
391synctask_yield(task, NULL);392}
393
394static void395synctask_destroy(struct synctask *task)396{
397if (task->opframe && (task->opframe != task->frame))398STACK_DESTROY(task->opframe->root);399
400if (task->synccbk == NULL) {401pthread_mutex_destroy(&task->mutex);402pthread_cond_destroy(&task->cond);403}404
405#ifdef HAVE_TSAN_API406__tsan_destroy_fiber(task->tsan.fiber);407#endif408
409#ifdef HAVE_VALGRIND_API410VALGRIND_STACK_DEREGISTER(task->stackid);411#endif412
413GF_FREE(task);414}
415
416void
417synctask_done(struct synctask *task)418{
419if (task->synccbk) {420synctask_destroy(task);421return;422}423
424pthread_mutex_lock(&task->mutex);425{426task->state = SYNCTASK_ZOMBIE;427task->done = 1;428pthread_cond_broadcast(&task->cond);429}430pthread_mutex_unlock(&task->mutex);431}
432
433int
434synctask_setid(struct synctask *task, uid_t uid, gid_t gid)435{
436if (!task)437return -1;438
439if (uid != -1)440task->uid = uid;441
442if (gid != -1)443task->gid = gid;444
445return 0;446}
447
448static struct synctask *449synctask_create(struct syncenv *env, size_t stacksize, synctask_fn_t fn,450synctask_cbk_t cbk, call_frame_t *frame, void *opaque)451{
452struct synctask *newtask = NULL;453xlator_t *this = THIS;454int destroymode = 0;455
456VALIDATE_OR_GOTO(env, out);457VALIDATE_OR_GOTO(fn, out);458
459/* Check if the syncenv is in destroymode i.e. destroy is SET.460* If YES, then don't allow any new synctasks on it. Return NULL.
461*/
462pthread_mutex_lock(&env->mutex);463{464destroymode = env->destroy;465}466pthread_mutex_unlock(&env->mutex);467
468/* syncenv is in DESTROY mode, return from here */469if (destroymode)470return NULL;471
472if (stacksize <= 0) {473newtask = GF_MALLOC(sizeof(struct synctask) + env->stacksize,474gf_common_mt_synctask);475if (caa_unlikely(!newtask))476return NULL;477
478memset(newtask, 0, sizeof(struct synctask));479newtask->ctx.uc_stack.ss_size = env->stacksize;480} else {481newtask = GF_MALLOC(sizeof(struct synctask) + stacksize,482gf_common_mt_synctask);483if (caa_unlikely(!newtask))484return NULL;485
486memset(newtask, 0, sizeof(struct synctask));487newtask->ctx.uc_stack.ss_size = stacksize;488}489
490INIT_LIST_HEAD(&newtask->all_tasks);491newtask->env = env;492newtask->xl = this;493newtask->frame = frame;494if (!frame) {495newtask->opframe = create_frame(this, this->ctx->pool);496if (!newtask->opframe)497goto err;498set_lk_owner_from_ptr(&newtask->opframe->root->lk_owner,499newtask->opframe->root);500} else {501newtask->opframe = frame;502}503
504newtask->synccbk = cbk;505newtask->syncfn = fn;506newtask->delta = NULL;507newtask->opaque = opaque;508newtask->timer = NULL;509newtask->synccond = NULL;510newtask->state = SYNCTASK_INIT;511newtask->woken = 0;512newtask->slept = 1;513newtask->ret = 0;514
515/* default to the uid/gid of the passed frame */516newtask->uid = newtask->opframe->root->uid;517newtask->gid = newtask->opframe->root->gid;518
519#ifdef HAVE_TSAN_API520newtask->tsan.fiber = __tsan_create_fiber(0);521snprintf(newtask->tsan.name, TSAN_THREAD_NAMELEN, "<synctask of %s>",522this->name);523__tsan_set_fiber_name(newtask->tsan.fiber, newtask->tsan.name);524#endif525
526#ifdef HAVE_ASAN_API527newtask->fake_stack = NULL;528#endif529
530#ifdef HAVE_VALGRIND_API531newtask->stackid = VALGRIND_STACK_REGISTER(532newtask->ctx.uc_stack.ss_sp,533newtask->ctx.uc_stack.ss_sp + newtask->ctx.uc_stack.ss_size);534#endif535
536if (getcontext(&newtask->ctx) < 0) {537gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_GETCONTEXT_FAILED,538"getcontext failed");539goto err;540}541newtask->ctx.uc_stack.ss_sp = newtask->stack;542makecontext(&newtask->ctx, (void (*)(void))synctask_wrap, 0);543
544newtask->proc = NULL;545
546if (!cbk) {547pthread_mutex_init(&newtask->mutex, NULL);548pthread_cond_init(&newtask->cond, NULL);549}550
551INIT_LIST_HEAD(&newtask->waitq);552newtask->done = 0;553
554synctask_wake(newtask);555
556return newtask;557err:558if (newtask) {559if (newtask->opframe && (newtask->opframe != newtask->frame))560STACK_DESTROY(newtask->opframe->root);561GF_FREE(newtask);562}563out:564return NULL;565}
566
567int
568synctask_join(struct synctask *task)569{
570int ret = 0;571
572pthread_mutex_lock(&task->mutex);573{574while (!task->done)575pthread_cond_wait(&task->cond, &task->mutex);576}577pthread_mutex_unlock(&task->mutex);578
579ret = task->ret;580
581synctask_destroy(task);582
583return ret;584}
585
586int
587synctask_new1(struct syncenv *env, size_t stacksize, synctask_fn_t fn,588synctask_cbk_t cbk, call_frame_t *frame, void *opaque)589{
590struct synctask *newtask = NULL;591int ret = 0;592
593newtask = synctask_create(env, stacksize, fn, cbk, frame, opaque);594if (!newtask)595return -1;596
597if (!cbk)598ret = synctask_join(newtask);599
600return ret;601}
602
603int
604synctask_new(struct syncenv *env, synctask_fn_t fn, synctask_cbk_t cbk,605call_frame_t *frame, void *opaque)606{
607return synctask_new1(env, 0, fn, cbk, frame, opaque);608}
609
610struct synctask *611syncenv_task(struct syncproc *proc)612{
613struct syncenv *env = NULL;614struct synctask *task = NULL;615struct timespec sleep_till = {6160,617};618int ret = 0;619
620env = proc->env;621
622pthread_mutex_lock(&env->mutex);623{624while (list_empty(&env->runq)) {625/* If either of the conditions are met then exit626* the current thread:
627* 1. syncenv has to scale down(procs > procmin)
628* 2. syncenv is in destroy mode and no tasks in
629* either waitq or runq.
630*
631* At any point in time, a task can be either in runq,
632* or in executing state or in the waitq. Once the
633* destroy mode is set, no new synctask creates will
634* be allowed, but whatever in waitq or runq should be
635* allowed to finish before exiting any of the syncenv
636* processor threads.
637*/
638if (((ret == ETIMEDOUT) && (env->procs > env->procmin)) ||639(env->destroy && list_empty(&env->waitq))) {640task = NULL;641env->procs--;642memset(proc, 0, sizeof(*proc));643pthread_cond_broadcast(&env->cond);644goto unlock;645}646
647env->procs_idle++;648
649sleep_till.tv_sec = gf_time() + SYNCPROC_IDLE_TIME;650ret = pthread_cond_timedwait(&env->cond, &env->mutex, &sleep_till);651
652env->procs_idle--;653}654
655task = list_entry(env->runq.next, struct synctask, all_tasks);656
657list_del_init(&task->all_tasks);658env->runcount--;659
660task->woken = 0;661task->slept = 0;662
663task->proc = proc;664}665unlock:666pthread_mutex_unlock(&env->mutex);667
668return task;669}
670
671static void672synctask_timer(void *data)673{
674struct synctask *task = data;675struct synccond *cond;676
677cond = task->synccond;678if (cond != NULL) {679pthread_mutex_lock(&cond->pmutex);680
681list_del_init(&task->waitq);682task->synccond = NULL;683
684pthread_mutex_unlock(&cond->pmutex);685
686task->ret = -ETIMEDOUT;687}688
689pthread_mutex_lock(&task->env->mutex);690
691gf_timer_call_cancel(task->xl->ctx, task->timer);692task->timer = NULL;693
694__synctask_wake(task);695
696pthread_mutex_unlock(&task->env->mutex);697}
698
699void
700synctask_switchto(struct synctask *task)701{
702struct syncenv *env = NULL;703
704env = task->env;705
706synctask_set(task);707THIS = task->xl;708
709#if defined(__NetBSD__) && defined(_UC_TLSBASE)710/* Preserve pthread private pointer through swapcontex() */711task->ctx.uc_flags &= ~_UC_TLSBASE;712#endif713
714#ifdef HAVE_TSAN_API715__tsan_switch_to_fiber(task->tsan.fiber, 0);716#endif717
718#ifdef HAVE_ASAN_API719__sanitizer_start_switch_fiber(&task->proc->fake_stack,720task->ctx.uc_stack.ss_sp,721task->ctx.uc_stack.ss_size);722#endif723
724if (swapcontext(&task->proc->sched, &task->ctx) < 0) {725gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_SWAPCONTEXT_FAILED,726"swapcontext failed");727}728
729#ifdef HAVE_ASAN_API730__sanitizer_finish_switch_fiber(task->fake_stack, NULL, NULL);731#endif732
733if (task->state == SYNCTASK_DONE) {734synctask_done(task);735return;736}737
738pthread_mutex_lock(&env->mutex);739{740if (task->woken) {741__run(task);742} else {743task->slept = 1;744__wait(task);745
746if (task->delta != NULL) {747task->timer = gf_timer_call_after(task->xl->ctx, *task->delta,748synctask_timer, task);749}750}751
752task->delta = NULL;753}754pthread_mutex_unlock(&env->mutex);755}
756
757#ifdef HAVE_VALGRIND_API758
759static unsigned760__valgrind_register_current_stack(void)761{
762pthread_attr_t attr;763size_t stacksize;764void *stack;765int ret;766
767ret = pthread_getattr_np(pthread_self(), &attr);768GF_ASSERT(ret == 0);769
770ret = pthread_attr_getstack(&attr, &stack, &stacksize);771GF_ASSERT(ret == 0);772
773return VALGRIND_STACK_REGISTER(stack, stack + stacksize);774}
775
776#endif /* HAVE_VALGRIND_API */777
778void *779syncenv_processor(void *thdata)780{
781struct syncproc *proc = NULL;782struct synctask *task = NULL;783
784proc = thdata;785
786#ifdef HAVE_TSAN_API787proc->tsan.fiber = __tsan_create_fiber(0);788snprintf(proc->tsan.name, TSAN_THREAD_NAMELEN, "<sched of syncenv@%p>",789proc);790__tsan_set_fiber_name(proc->tsan.fiber, proc->tsan.name);791#endif792
793#ifdef HAVE_VALGRIND_API794proc->stackid = __valgrind_register_current_stack();795#endif796
797while ((task = syncenv_task(proc)) != NULL) {798synctask_switchto(task);799}800
801#ifdef HAVE_TSAN_API802__tsan_destroy_fiber(proc->tsan.fiber);803#endif804
805#ifdef HAVE_VALGRIND_API806VALGRIND_STACK_DEREGISTER(proc->stackid);807#endif808
809return NULL;810}
811
812/* The syncenv threads are cleaned up in this routine.
813*/
814void
815syncenv_destroy(struct syncenv *env)816{
817if (env == NULL)818return;819
820/* SET the 'destroy' in syncenv structure to prohibit any821* further synctask(s) on this syncenv which is in destroy mode.
822*
823* If syncenv threads are in pthread cond wait with no tasks in
824* their run or wait queue, then the threads are woken up by
825* broadcasting the cond variable and if destroy field is set,
826* the infinite loop in syncenv_processor is broken and the
827* threads return.
828*
829* If syncenv threads have tasks in runq or waitq, the tasks are
830* completed and only then the thread returns.
831*/
832pthread_mutex_lock(&env->mutex);833{834env->destroy = 1;835/* This broadcast will wake threads in pthread_cond_wait836* in syncenv_task
837*/
838pthread_cond_broadcast(&env->cond);839
840/* when the syncenv_task() thread is exiting, it broadcasts to841* wake the below wait.
842*/
843while (env->procs != 0) {844pthread_cond_wait(&env->cond, &env->mutex);845}846}847pthread_mutex_unlock(&env->mutex);848
849pthread_mutex_destroy(&env->mutex);850pthread_cond_destroy(&env->cond);851
852GF_FREE(env);853
854return;855}
856
857struct syncenv *858syncenv_new(size_t stacksize, int procmin, int procmax)859{
860struct syncenv *newenv = NULL;861int ret = 0;862int i = 0;863
864if (!procmin || procmin < 0)865procmin = SYNCENV_PROC_MIN;866if (!procmax || procmax > SYNCENV_PROC_MAX)867procmax = SYNCENV_PROC_MAX;868
869if (procmin > procmax)870return NULL;871
872newenv = GF_CALLOC(1, sizeof(*newenv), gf_common_mt_syncenv);873
874if (!newenv)875return NULL;876
877pthread_mutex_init(&newenv->mutex, NULL);878pthread_cond_init(&newenv->cond, NULL);879
880INIT_LIST_HEAD(&newenv->runq);881INIT_LIST_HEAD(&newenv->waitq);882
883newenv->stacksize = SYNCENV_DEFAULT_STACKSIZE;884if (stacksize)885newenv->stacksize = stacksize;886newenv->procmin = procmin;887newenv->procmax = procmax;888newenv->procs_idle = 0;889
890for (i = 0; i < newenv->procmin; i++) {891newenv->proc[i].env = newenv;892ret = gf_thread_create(&newenv->proc[i].processor, NULL,893syncenv_processor, &newenv->proc[i], "sproc%d",894i);895if (ret)896break;897newenv->procs++;898}899
900if (ret != 0) {901syncenv_destroy(newenv);902newenv = NULL;903}904
905return newenv;906}
907
908int
909synclock_init(synclock_t *lock, lock_attr_t attr)910{
911if (!lock)912return -1;913
914pthread_cond_init(&lock->cond, 0);915lock->type = LOCK_NULL;916lock->owner = NULL;917lock->owner_tid = 0;918lock->lock = 0;919lock->attr = attr;920INIT_LIST_HEAD(&lock->waitq);921
922return pthread_mutex_init(&lock->guard, 0);923}
924
925int
926synclock_destroy(synclock_t *lock)927{
928if (!lock)929return -1;930
931pthread_cond_destroy(&lock->cond);932return pthread_mutex_destroy(&lock->guard);933}
934
935static int936__synclock_lock(struct synclock *lock)937{
938struct synctask *task = NULL;939
940if (!lock)941return -1;942
943task = synctask_get();944
945if (lock->lock && (lock->attr == SYNC_LOCK_RECURSIVE)) {946/*Recursive lock (if same owner requested for lock again then947*increment lock count and return success).
948*Note:same number of unlocks required.
949*/
950switch (lock->type) {951case LOCK_TASK:952if (task == lock->owner) {953lock->lock++;954gf_msg_trace("", 0,955"Recursive lock called by"956" sync task.owner= %p,lock=%d",957lock->owner, lock->lock);958return 0;959}960break;961case LOCK_THREAD:962if (pthread_equal(pthread_self(), lock->owner_tid)) {963lock->lock++;964gf_msg_trace("", 0,965"Recursive lock called by"966" thread ,owner=%u lock=%d",967(unsigned int)lock->owner_tid, lock->lock);968return 0;969}970break;971default:972gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_UNKNOWN_LOCK_TYPE,973"unknown lock type");974break;975}976}977
978while (lock->lock) {979if (task) {980/* called within a synctask */981task->woken = 0;982list_add_tail(&task->waitq, &lock->waitq);983pthread_mutex_unlock(&lock->guard);984synctask_yield(task, NULL);985/* task is removed from waitq in unlock,986* under lock->guard.*/
987pthread_mutex_lock(&lock->guard);988} else {989/* called by a non-synctask */990pthread_cond_wait(&lock->cond, &lock->guard);991}992}993
994if (task) {995lock->type = LOCK_TASK;996lock->owner = task; /* for synctask*/997
998} else {999lock->type = LOCK_THREAD;1000lock->owner_tid = pthread_self(); /* for non-synctask */1001}1002lock->lock = 1;1003
1004return 0;1005}
1006
1007int
1008synclock_lock(synclock_t *lock)1009{
1010int ret = 0;1011
1012pthread_mutex_lock(&lock->guard);1013{1014ret = __synclock_lock(lock);1015}1016pthread_mutex_unlock(&lock->guard);1017
1018return ret;1019}
1020
1021int
1022synclock_trylock(synclock_t *lock)1023{
1024int ret = 0;1025
1026errno = 0;1027
1028pthread_mutex_lock(&lock->guard);1029{1030if (lock->lock) {1031errno = EBUSY;1032ret = -1;1033goto unlock;1034}1035
1036ret = __synclock_lock(lock);1037}1038unlock:1039pthread_mutex_unlock(&lock->guard);1040
1041return ret;1042}
1043
1044static int1045__synclock_unlock(synclock_t *lock)1046{
1047struct synctask *task = NULL;1048struct synctask *curr = NULL;1049
1050if (!lock)1051return -1;1052
1053if (lock->lock == 0) {1054gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_UNLOCK_BEFORE_LOCK,1055"Unlock called before lock ");1056return -1;1057}1058curr = synctask_get();1059/*unlock should be called by lock owner1060*i.e this will not allow the lock in nonsync task and unlock
1061* in sync task and vice-versa
1062*/
1063switch (lock->type) {1064case LOCK_TASK:1065if (curr == lock->owner) {1066lock->lock--;1067gf_msg_trace("", 0,1068"Unlock success %p, remaining"1069" locks=%d",1070lock->owner, lock->lock);1071} else {1072gf_msg("", GF_LOG_WARNING, 0, LG_MSG_LOCK_OWNER_ERROR,1073"Unlock called by %p, but lock held by %p", curr,1074lock->owner);1075}1076
1077break;1078case LOCK_THREAD:1079if (pthread_equal(pthread_self(), lock->owner_tid)) {1080lock->lock--;1081gf_msg_trace("", 0,1082"Unlock success %u, remaining "1083"locks=%d",1084(unsigned int)lock->owner_tid, lock->lock);1085} else {1086gf_msg("", GF_LOG_WARNING, 0, LG_MSG_LOCK_OWNER_ERROR,1087"Unlock called by %u, but lock held by %u",1088(unsigned int)pthread_self(),1089(unsigned int)lock->owner_tid);1090}1091
1092break;1093default:1094break;1095}1096
1097if (lock->lock > 0) {1098return 0;1099}1100lock->type = LOCK_NULL;1101lock->owner = NULL;1102lock->owner_tid = 0;1103lock->lock = 0;1104/* There could be both synctasks and non synctasks1105waiting (or none, or either). As a mid-approach
1106between maintaining too many waiting counters
1107at one extreme and a thundering herd on unlock
1108at the other, call a cond_signal (which wakes
1109one waiter) and first synctask waiter. So at
1110most we have two threads waking up to grab the
1111just released lock.
1112*/
1113pthread_cond_signal(&lock->cond);1114if (!list_empty(&lock->waitq)) {1115task = list_entry(lock->waitq.next, struct synctask, waitq);1116list_del_init(&task->waitq);1117synctask_wake(task);1118}1119
1120return 0;1121}
1122
1123int
1124synclock_unlock(synclock_t *lock)1125{
1126int ret = 0;1127
1128pthread_mutex_lock(&lock->guard);1129{1130ret = __synclock_unlock(lock);1131}1132pthread_mutex_unlock(&lock->guard);1133
1134return ret;1135}
1136
1137/* Condition variables */
1138
1139int32_t
1140synccond_init(synccond_t *cond)1141{
1142int32_t ret;1143
1144INIT_LIST_HEAD(&cond->waitq);1145
1146ret = pthread_mutex_init(&cond->pmutex, NULL);1147if (ret != 0) {1148return -ret;1149}1150
1151ret = pthread_cond_init(&cond->pcond, NULL);1152if (ret != 0) {1153pthread_mutex_destroy(&cond->pmutex);1154}1155
1156return -ret;1157}
1158
1159void
1160synccond_destroy(synccond_t *cond)1161{
1162pthread_cond_destroy(&cond->pcond);1163pthread_mutex_destroy(&cond->pmutex);1164}
1165
1166int
1167synccond_timedwait(synccond_t *cond, synclock_t *lock, struct timespec *delta)1168{
1169struct timespec now;1170struct synctask *task = NULL;1171int ret;1172
1173task = synctask_get();1174
1175if (task == NULL) {1176if (delta != NULL) {1177timespec_now_realtime(&now);1178timespec_adjust_delta(&now, *delta);1179}1180
1181pthread_mutex_lock(&cond->pmutex);1182
1183if (delta == NULL) {1184ret = -pthread_cond_wait(&cond->pcond, &cond->pmutex);1185} else {1186ret = -pthread_cond_timedwait(&cond->pcond, &cond->pmutex, &now);1187}1188} else {1189pthread_mutex_lock(&cond->pmutex);1190
1191list_add_tail(&task->waitq, &cond->waitq);1192task->synccond = cond;1193
1194ret = synclock_unlock(lock);1195if (ret == 0) {1196pthread_mutex_unlock(&cond->pmutex);1197
1198synctask_yield(task, delta);1199
1200ret = synclock_lock(lock);1201if (ret == 0) {1202ret = task->ret;1203}1204task->ret = 0;1205
1206return ret;1207}1208
1209list_del_init(&task->waitq);1210}1211
1212pthread_mutex_unlock(&cond->pmutex);1213
1214return ret;1215}
1216
1217int
1218synccond_wait(synccond_t *cond, synclock_t *lock)1219{
1220return synccond_timedwait(cond, lock, NULL);1221}
1222
1223void
1224synccond_signal(synccond_t *cond)1225{
1226struct synctask *task;1227
1228pthread_mutex_lock(&cond->pmutex);1229
1230if (!list_empty(&cond->waitq)) {1231task = list_first_entry(&cond->waitq, struct synctask, waitq);1232list_del_init(&task->waitq);1233
1234pthread_mutex_unlock(&cond->pmutex);1235
1236synctask_wake(task);1237} else {1238pthread_cond_signal(&cond->pcond);1239
1240pthread_mutex_unlock(&cond->pmutex);1241}1242}
1243
1244void
1245synccond_broadcast(synccond_t *cond)1246{
1247struct list_head list;1248struct synctask *task;1249
1250INIT_LIST_HEAD(&list);1251
1252pthread_mutex_lock(&cond->pmutex);1253
1254list_splice_init(&cond->waitq, &list);1255pthread_cond_broadcast(&cond->pcond);1256
1257pthread_mutex_unlock(&cond->pmutex);1258
1259while (!list_empty(&list)) {1260task = list_first_entry(&list, struct synctask, waitq);1261list_del_init(&task->waitq);1262
1263synctask_wake(task);1264}1265}
1266
1267/* Barriers */
1268
1269int
1270syncbarrier_init(struct syncbarrier *barrier)1271{
1272int ret = 0;1273if (!barrier) {1274errno = EINVAL;1275return -1;1276}1277
1278ret = pthread_cond_init(&barrier->cond, 0);1279if (ret) {1280errno = ret;1281return -1;1282}1283barrier->count = 0;1284barrier->waitfor = 0;1285INIT_LIST_HEAD(&barrier->waitq);1286
1287ret = pthread_mutex_init(&barrier->guard, 0);1288if (ret) {1289(void)pthread_cond_destroy(&barrier->cond);1290errno = ret;1291return -1;1292}1293barrier->initialized = _gf_true;1294return 0;1295}
1296
1297int
1298syncbarrier_destroy(struct syncbarrier *barrier)1299{
1300int ret = 0;1301int ret1 = 0;1302if (!barrier) {1303errno = EINVAL;1304return -1;1305}1306
1307if (barrier->initialized) {1308ret = pthread_cond_destroy(&barrier->cond);1309ret1 = pthread_mutex_destroy(&barrier->guard);1310barrier->initialized = _gf_false;1311}1312if (ret || ret1) {1313errno = ret ? ret : ret1;1314return -1;1315}1316return 0;1317}
1318
1319static int1320__syncbarrier_wait(struct syncbarrier *barrier, int waitfor)1321{
1322struct synctask *task = NULL;1323
1324if (!barrier) {1325errno = EINVAL;1326return -1;1327}1328
1329task = synctask_get();1330
1331while (barrier->count < waitfor) {1332if (task) {1333/* called within a synctask */1334list_add_tail(&task->waitq, &barrier->waitq);1335pthread_mutex_unlock(&barrier->guard);1336synctask_yield(task, NULL);1337pthread_mutex_lock(&barrier->guard);1338} else {1339/* called by a non-synctask */1340pthread_cond_wait(&barrier->cond, &barrier->guard);1341}1342}1343
1344barrier->count = 0;1345
1346return 0;1347}
1348
1349int
1350syncbarrier_wait(struct syncbarrier *barrier, int waitfor)1351{
1352int ret = 0;1353
1354pthread_mutex_lock(&barrier->guard);1355{1356ret = __syncbarrier_wait(barrier, waitfor);1357}1358pthread_mutex_unlock(&barrier->guard);1359
1360return ret;1361}
1362
1363static int1364__syncbarrier_wake(struct syncbarrier *barrier)1365{
1366struct synctask *task = NULL;1367
1368if (!barrier) {1369errno = EINVAL;1370return -1;1371}1372
1373barrier->count++;1374if (barrier->waitfor && (barrier->count < barrier->waitfor))1375return 0;1376
1377pthread_cond_signal(&barrier->cond);1378if (!list_empty(&barrier->waitq)) {1379task = list_entry(barrier->waitq.next, struct synctask, waitq);1380list_del_init(&task->waitq);1381synctask_wake(task);1382}1383barrier->waitfor = 0;1384
1385return 0;1386}
1387
1388int
1389syncbarrier_wake(struct syncbarrier *barrier)1390{
1391int ret = 0;1392
1393pthread_mutex_lock(&barrier->guard);1394{1395ret = __syncbarrier_wake(barrier);1396}1397pthread_mutex_unlock(&barrier->guard);1398
1399return ret;1400}
1401
1402/* FOPS */
1403
1404int
1405syncop_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,1406int op_errno, inode_t *inode, struct iatt *iatt,1407dict_t *xdata, struct iatt *parent)1408{
1409struct syncargs *args = NULL;1410
1411args = cookie;1412
1413args->op_ret = op_ret;1414args->op_errno = op_errno;1415if (xdata)1416args->xdata = dict_ref(xdata);1417
1418if (op_ret == 0) {1419args->iatt1 = *iatt;1420args->iatt2 = *parent;1421}1422
1423__wake(args);1424
1425return 0;1426}
1427
1428int
1429syncop_lookup(xlator_t *subvol, loc_t *loc, struct iatt *iatt,1430struct iatt *parent, dict_t *xdata_in, dict_t **xdata_out)1431{
1432struct syncargs args = {14330,1434};1435
1436SYNCOP(subvol, (&args), syncop_lookup_cbk, subvol->fops->lookup, loc,1437xdata_in);1438
1439if (iatt)1440*iatt = args.iatt1;1441if (parent)1442*parent = args.iatt2;1443if (xdata_out)1444*xdata_out = args.xdata;1445else if (args.xdata)1446dict_unref(args.xdata);1447
1448if (args.op_ret < 0)1449return -args.op_errno;1450return args.op_ret;1451}
1452
1453int32_t
1454syncop_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1455int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,1456dict_t *xdata)1457{
1458struct syncargs *args = NULL;1459gf_dirent_t *entry = NULL;1460gf_dirent_t *tmp = NULL;1461
1462int count = 0;1463
1464args = cookie;1465
1466INIT_LIST_HEAD(&args->entries.list);1467
1468args->op_ret = op_ret;1469args->op_errno = op_errno;1470if (xdata)1471args->xdata = dict_ref(xdata);1472
1473if (op_ret >= 0) {1474list_for_each_entry(entry, &entries->list, list)1475{1476tmp = entry_copy(entry);1477if (!tmp) {1478args->op_ret = -1;1479args->op_errno = ENOMEM;1480gf_dirent_free(&(args->entries));1481break;1482}1483gf_msg_trace(this->name, 0,1484"adding entry=%s, "1485"count=%d",1486tmp->d_name, count);1487list_add_tail(&tmp->list, &(args->entries.list));1488count++;1489}1490}1491
1492__wake(args);1493
1494return 0;1495}
1496
1497int
1498syncop_readdirp(xlator_t *subvol, fd_t *fd, size_t size, off_t off,1499gf_dirent_t *entries, dict_t *xdata_in, dict_t **xdata_out)1500{
1501struct syncargs args = {15020,1503};1504
1505SYNCOP(subvol, (&args), syncop_readdirp_cbk, subvol->fops->readdirp, fd,1506size, off, xdata_in);1507
1508if (entries)1509list_splice_init(&args.entries.list, &entries->list);1510else1511gf_dirent_free(&args.entries);1512
1513if (xdata_out)1514*xdata_out = args.xdata;1515else if (args.xdata)1516dict_unref(args.xdata);1517
1518if (args.op_ret < 0)1519return -args.op_errno;1520return args.op_ret;1521}
1522
1523int32_t
1524syncop_readdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1525int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,1526dict_t *xdata)1527{
1528struct syncargs *args = NULL;1529gf_dirent_t *entry = NULL;1530gf_dirent_t *tmp = NULL;1531
1532int count = 0;1533
1534args = cookie;1535
1536INIT_LIST_HEAD(&args->entries.list);1537
1538args->op_ret = op_ret;1539args->op_errno = op_errno;1540if (xdata)1541args->xdata = dict_ref(xdata);1542
1543if (op_ret >= 0) {1544list_for_each_entry(entry, &entries->list, list)1545{1546tmp = entry_copy(entry);1547if (!tmp) {1548args->op_ret = -1;1549args->op_errno = ENOMEM;1550gf_dirent_free(&(args->entries));1551break;1552}1553gf_msg_trace(this->name, 0,1554"adding "1555"entry=%s, count=%d",1556tmp->d_name, count);1557list_add_tail(&tmp->list, &(args->entries.list));1558count++;1559}1560}1561
1562__wake(args);1563
1564return 0;1565}
1566
1567int
1568syncop_readdir(xlator_t *subvol, fd_t *fd, size_t size, off_t off,1569gf_dirent_t *entries, dict_t *xdata_in, dict_t **xdata_out)1570{
1571struct syncargs args = {15720,1573};1574
1575INIT_LIST_HEAD(&args.entries.list);1576
1577SYNCOP(subvol, (&args), syncop_readdir_cbk, subvol->fops->readdir, fd, size,1578off, xdata_in);1579
1580if (entries)1581list_splice_init(&args.entries.list, &entries->list);1582else1583gf_dirent_free(&args.entries);1584
1585if (xdata_out)1586*xdata_out = args.xdata;1587else if (args.xdata)1588dict_unref(args.xdata);1589
1590if (args.op_ret < 0)1591return -args.op_errno;1592return args.op_ret;1593}
1594
1595int32_t
1596syncop_opendir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1597int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)1598{
1599struct syncargs *args = NULL;1600
1601args = cookie;1602
1603args->op_ret = op_ret;1604args->op_errno = op_errno;1605if (xdata)1606args->xdata = dict_ref(xdata);1607
1608__wake(args);1609
1610return 0;1611}
1612
1613int
1614syncop_opendir(xlator_t *subvol, loc_t *loc, fd_t *fd, dict_t *xdata_in,1615dict_t **xdata_out)1616{
1617struct syncargs args = {16180,1619};1620
1621SYNCOP(subvol, (&args), syncop_opendir_cbk, subvol->fops->opendir, loc, fd,1622xdata_in);1623
1624if (xdata_out)1625*xdata_out = args.xdata;1626else if (args.xdata)1627dict_unref(args.xdata);1628
1629if (args.op_ret < 0)1630return -args.op_errno;1631return args.op_ret;1632}
1633
1634int
1635syncop_fsyncdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1636int op_ret, int op_errno, dict_t *xdata)1637{
1638struct syncargs *args = NULL;1639
1640args = cookie;1641
1642args->op_ret = op_ret;1643args->op_errno = op_errno;1644if (xdata)1645args->xdata = dict_ref(xdata);1646
1647__wake(args);1648
1649return 0;1650}
1651
1652int
1653syncop_fsyncdir(xlator_t *subvol, fd_t *fd, int datasync, dict_t *xdata_in,1654dict_t **xdata_out)1655{
1656struct syncargs args = {16570,1658};1659
1660SYNCOP(subvol, (&args), syncop_fsyncdir_cbk, subvol->fops->fsyncdir, fd,1661datasync, xdata_in);1662
1663if (xdata_out)1664*xdata_out = args.xdata;1665else if (args.xdata)1666dict_unref(args.xdata);1667
1668if (args.op_ret < 0)1669return -args.op_errno;1670return args.op_ret;1671}
1672
1673int
1674syncop_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1675int op_ret, int op_errno, dict_t *xdata)1676{
1677struct syncargs *args = NULL;1678
1679args = cookie;1680
1681args->op_ret = op_ret;1682args->op_errno = op_errno;1683if (xdata)1684args->xdata = dict_ref(xdata);1685
1686__wake(args);1687
1688return 0;1689}
1690
1691int
1692syncop_removexattr(xlator_t *subvol, loc_t *loc, const char *name,1693dict_t *xdata_in, dict_t **xdata_out)1694{
1695struct syncargs args = {16960,1697};1698
1699SYNCOP(subvol, (&args), syncop_removexattr_cbk, subvol->fops->removexattr,1700loc, name, xdata_in);1701
1702if (xdata_out)1703*xdata_out = args.xdata;1704else if (args.xdata)1705dict_unref(args.xdata);1706
1707if (args.op_ret < 0)1708return -args.op_errno;1709return args.op_ret;1710}
1711
1712int
1713syncop_fremovexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1714int op_ret, int op_errno, dict_t *xdata)1715{
1716struct syncargs *args = NULL;1717
1718args = cookie;1719
1720args->op_ret = op_ret;1721args->op_errno = op_errno;1722if (xdata)1723args->xdata = dict_ref(xdata);1724
1725__wake(args);1726
1727return 0;1728}
1729
1730int
1731syncop_fremovexattr(xlator_t *subvol, fd_t *fd, const char *name,1732dict_t *xdata_in, dict_t **xdata_out)1733{
1734struct syncargs args = {17350,1736};1737
1738SYNCOP(subvol, (&args), syncop_fremovexattr_cbk, subvol->fops->fremovexattr,1739fd, name, xdata_in);1740
1741if (xdata_out)1742*xdata_out = args.xdata;1743else if (args.xdata)1744dict_unref(args.xdata);1745
1746if (args.op_ret < 0)1747return -args.op_errno;1748return args.op_ret;1749}
1750
1751int
1752syncop_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1753int op_ret, int op_errno, dict_t *xdata)1754{
1755struct syncargs *args = NULL;1756
1757args = cookie;1758
1759args->op_ret = op_ret;1760args->op_errno = op_errno;1761if (xdata)1762args->xdata = dict_ref(xdata);1763
1764__wake(args);1765
1766return 0;1767}
1768
1769int
1770syncop_setxattr(xlator_t *subvol, loc_t *loc, dict_t *dict, int32_t flags,1771dict_t *xdata_in, dict_t **xdata_out)1772{
1773struct syncargs args = {17740,1775};1776
1777SYNCOP(subvol, (&args), syncop_setxattr_cbk, subvol->fops->setxattr, loc,1778dict, flags, xdata_in);1779
1780if (xdata_out)1781*xdata_out = args.xdata;1782else if (args.xdata)1783dict_unref(args.xdata);1784
1785if (args.op_ret < 0)1786return -args.op_errno;1787return args.op_ret;1788}
1789
1790int
1791syncop_fsetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1792int op_ret, int op_errno, dict_t *xdata)1793{
1794struct syncargs *args = NULL;1795
1796args = cookie;1797
1798args->op_ret = op_ret;1799args->op_errno = op_errno;1800if (xdata)1801args->xdata = dict_ref(xdata);1802
1803__wake(args);1804
1805return 0;1806}
1807
1808int
1809syncop_fsetxattr(xlator_t *subvol, fd_t *fd, dict_t *dict, int32_t flags,1810dict_t *xdata_in, dict_t **xdata_out)1811{
1812struct syncargs args = {18130,1814};1815
1816SYNCOP(subvol, (&args), syncop_fsetxattr_cbk, subvol->fops->fsetxattr, fd,1817dict, flags, xdata_in);1818
1819if (xdata_out)1820*xdata_out = args.xdata;1821else if (args.xdata)1822dict_unref(args.xdata);1823
1824if (args.op_ret < 0)1825return -args.op_errno;1826return args.op_ret;1827}
1828
1829int
1830syncop_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1831int op_ret, int op_errno, dict_t *dict, dict_t *xdata)1832{
1833struct syncargs *args = NULL;1834
1835args = cookie;1836
1837args->op_ret = op_ret;1838args->op_errno = op_errno;1839if (xdata)1840args->xdata = dict_ref(xdata);1841
1842if (op_ret >= 0)1843args->xattr = dict_ref(dict);1844
1845__wake(args);1846
1847return 0;1848}
1849
1850int
1851syncop_listxattr(xlator_t *subvol, loc_t *loc, dict_t **dict, dict_t *xdata_in,1852dict_t **xdata_out)1853{
1854struct syncargs args = {18550,1856};1857
1858SYNCOP(subvol, (&args), syncop_getxattr_cbk, subvol->fops->getxattr, loc,1859NULL, xdata_in);1860
1861if (dict)1862*dict = args.xattr;1863else if (args.xattr)1864dict_unref(args.xattr);1865
1866if (xdata_out)1867*xdata_out = args.xdata;1868else if (args.xdata)1869dict_unref(args.xdata);1870
1871if (args.op_ret < 0)1872return -args.op_errno;1873return args.op_ret;1874}
1875
1876int
1877syncop_getxattr(xlator_t *subvol, loc_t *loc, dict_t **dict, const char *key,1878dict_t *xdata_in, dict_t **xdata_out)1879{
1880struct syncargs args = {18810,1882};1883
1884SYNCOP(subvol, (&args), syncop_getxattr_cbk, subvol->fops->getxattr, loc,1885key, xdata_in);1886
1887if (dict)1888*dict = args.xattr;1889else if (args.xattr)1890dict_unref(args.xattr);1891
1892if (xdata_out)1893*xdata_out = args.xdata;1894else if (args.xdata)1895dict_unref(args.xdata);1896
1897if (args.op_ret < 0)1898return -args.op_errno;1899return args.op_ret;1900}
1901
1902int
1903syncop_fgetxattr(xlator_t *subvol, fd_t *fd, dict_t **dict, const char *key,1904dict_t *xdata_in, dict_t **xdata_out)1905{
1906struct syncargs args = {19070,1908};1909
1910SYNCOP(subvol, (&args), syncop_getxattr_cbk, subvol->fops->fgetxattr, fd,1911key, xdata_in);1912
1913if (dict)1914*dict = args.xattr;1915else if (args.xattr)1916dict_unref(args.xattr);1917
1918if (xdata_out)1919*xdata_out = args.xdata;1920else if (args.xdata)1921dict_unref(args.xdata);1922
1923if (args.op_ret < 0)1924return -args.op_errno;1925return args.op_ret;1926}
1927
1928int
1929syncop_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1930int32_t op_ret, int32_t op_errno, struct statvfs *buf,1931dict_t *xdata)1932
1933{
1934struct syncargs *args = NULL;1935
1936args = cookie;1937
1938args->op_ret = op_ret;1939args->op_errno = op_errno;1940if (xdata)1941args->xdata = dict_ref(xdata);1942
1943if (op_ret == 0) {1944args->statvfs_buf = *buf;1945}1946
1947__wake(args);1948
1949return 0;1950}
1951
1952int
1953syncop_statfs(xlator_t *subvol, loc_t *loc, struct statvfs *buf,1954dict_t *xdata_in, dict_t **xdata_out)1955
1956{
1957struct syncargs args = {19580,1959};1960
1961SYNCOP(subvol, (&args), syncop_statfs_cbk, subvol->fops->statfs, loc,1962xdata_in);1963
1964if (buf)1965*buf = args.statvfs_buf;1966if (xdata_out)1967*xdata_out = args.xdata;1968else if (args.xdata)1969dict_unref(args.xdata);1970
1971if (args.op_ret < 0)1972return -args.op_errno;1973return args.op_ret;1974}
1975
1976int
1977syncop_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,1978int op_ret, int op_errno, struct iatt *preop,1979struct iatt *postop, dict_t *xdata)1980{
1981struct syncargs *args = NULL;1982
1983args = cookie;1984
1985args->op_ret = op_ret;1986args->op_errno = op_errno;1987if (xdata)1988args->xdata = dict_ref(xdata);1989
1990if (op_ret == 0) {1991args->iatt1 = *preop;1992args->iatt2 = *postop;1993}1994
1995__wake(args);1996
1997return 0;1998}
1999
2000int
2001syncop_setattr(xlator_t *subvol, loc_t *loc, struct iatt *iatt, int valid,2002struct iatt *preop, struct iatt *postop, dict_t *xdata_in,2003dict_t **xdata_out)2004{
2005struct syncargs args = {20060,2007};2008
2009SYNCOP(subvol, (&args), syncop_setattr_cbk, subvol->fops->setattr, loc,2010iatt, valid, xdata_in);2011
2012if (preop)2013*preop = args.iatt1;2014if (postop)2015*postop = args.iatt2;2016
2017if (xdata_out)2018*xdata_out = args.xdata;2019else if (args.xdata)2020dict_unref(args.xdata);2021
2022if (args.op_ret < 0)2023return -args.op_errno;2024return args.op_ret;2025}
2026
2027int
2028syncop_fsetattr(xlator_t *subvol, fd_t *fd, struct iatt *iatt, int valid,2029struct iatt *preop, struct iatt *postop, dict_t *xdata_in,2030dict_t **xdata_out)2031{
2032struct syncargs args = {20330,2034};2035
2036SYNCOP(subvol, (&args), syncop_setattr_cbk, subvol->fops->fsetattr, fd,2037iatt, valid, xdata_in);2038
2039if (preop)2040*preop = args.iatt1;2041if (postop)2042*postop = args.iatt2;2043
2044if (xdata_out)2045*xdata_out = args.xdata;2046else if (args.xdata)2047dict_unref(args.xdata);2048
2049if (args.op_ret < 0)2050return -args.op_errno;2051return args.op_ret;2052}
2053
2054int32_t
2055syncop_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2056int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)2057{
2058struct syncargs *args = NULL;2059
2060args = cookie;2061
2062args->op_ret = op_ret;2063args->op_errno = op_errno;2064if (xdata)2065args->xdata = dict_ref(xdata);2066
2067__wake(args);2068
2069return 0;2070}
2071
2072int
2073syncop_open(xlator_t *subvol, loc_t *loc, int32_t flags, fd_t *fd,2074dict_t *xdata_in, dict_t **xdata_out)2075{
2076struct syncargs args = {20770,2078};2079
2080SYNCOP(subvol, (&args), syncop_open_cbk, subvol->fops->open, loc, flags, fd,2081xdata_in);2082
2083if (xdata_out)2084*xdata_out = args.xdata;2085else if (args.xdata)2086dict_unref(args.xdata);2087
2088if (args.op_ret < 0)2089return -args.op_errno;2090return args.op_ret;2091}
2092
2093int32_t
2094syncop_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2095int32_t op_ret, int32_t op_errno, struct iovec *vector,2096int32_t count, struct iatt *stbuf, struct iobref *iobref,2097dict_t *xdata)2098{
2099struct syncargs *args = NULL;2100
2101args = cookie;2102
2103INIT_LIST_HEAD(&args->entries.list);2104
2105args->op_ret = op_ret;2106args->op_errno = op_errno;2107if (xdata)2108args->xdata = dict_ref(xdata);2109
2110if (args->op_ret >= 0) {2111if (iobref)2112args->iobref = iobref_ref(iobref);2113args->vector = iov_dup(vector, count);2114args->count = count;2115args->iatt1 = *stbuf;2116}2117
2118__wake(args);2119
2120return 0;2121}
2122
2123int
2124syncop_readv(xlator_t *subvol, fd_t *fd, size_t size, off_t off, uint32_t flags,2125struct iovec **vector, int *count, struct iobref **iobref,2126struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)2127{
2128struct syncargs args = {21290,2130};2131
2132SYNCOP(subvol, (&args), syncop_readv_cbk, subvol->fops->readv, fd, size,2133off, flags, xdata_in);2134
2135if (xdata_out)2136*xdata_out = args.xdata;2137else if (args.xdata)2138dict_unref(args.xdata);2139
2140if (iatt)2141*iatt = args.iatt1;2142
2143if (args.op_ret < 0)2144goto out;2145
2146if (vector)2147*vector = args.vector;2148else2149GF_FREE(args.vector);2150
2151if (count)2152*count = args.count;2153
2154/* Do we need a 'ref' here? */2155if (iobref)2156*iobref = args.iobref;2157else if (args.iobref)2158iobref_unref(args.iobref);2159
2160out:2161if (args.op_ret < 0)2162return -args.op_errno;2163return args.op_ret;2164}
2165
2166int
2167syncop_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,2168int op_errno, struct iatt *prebuf, struct iatt *postbuf,2169dict_t *xdata)2170{
2171struct syncargs *args = NULL;2172
2173args = cookie;2174
2175args->op_ret = op_ret;2176args->op_errno = op_errno;2177if (xdata)2178args->xdata = dict_ref(xdata);2179
2180if (op_ret >= 0) {2181args->iatt1 = *prebuf;2182args->iatt2 = *postbuf;2183}2184
2185__wake(args);2186
2187return 0;2188}
2189
2190int
2191syncop_writev(xlator_t *subvol, fd_t *fd, const struct iovec *vector,2192int32_t count, off_t offset, struct iobref *iobref,2193uint32_t flags, struct iatt *preiatt, struct iatt *postiatt,2194dict_t *xdata_in, dict_t **xdata_out)2195{
2196struct syncargs args = {21970,2198};2199
2200SYNCOP(subvol, (&args), syncop_writev_cbk, subvol->fops->writev, fd,2201(struct iovec *)vector, count, offset, flags, iobref, xdata_in);2202
2203if (preiatt)2204*preiatt = args.iatt1;2205if (postiatt)2206*postiatt = args.iatt2;2207
2208if (xdata_out)2209*xdata_out = args.xdata;2210else if (args.xdata)2211dict_unref(args.xdata);2212
2213if (args.op_ret < 0)2214return -args.op_errno;2215return args.op_ret;2216}
2217
2218int
2219syncop_write(xlator_t *subvol, fd_t *fd, const char *buf, int size,2220off_t offset, struct iobref *iobref, uint32_t flags,2221dict_t *xdata_in, dict_t **xdata_out)2222{
2223struct syncargs args = {22240,2225};2226struct iovec vec = {22270,2228};2229
2230vec.iov_len = size;2231vec.iov_base = (void *)buf;2232
2233SYNCOP(subvol, (&args), syncop_writev_cbk, subvol->fops->writev, fd, &vec,22341, offset, flags, iobref, xdata_in);2235
2236if (xdata_out)2237*xdata_out = args.xdata;2238else if (args.xdata)2239dict_unref(args.xdata);2240
2241if (args.op_ret < 0)2242return -args.op_errno;2243return args.op_ret;2244}
2245
2246int
2247syncop_close(fd_t *fd)2248{
2249if (fd)2250fd_unref(fd);2251return 0;2252}
2253
2254int32_t
2255syncop_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2256int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,2257struct iatt *buf, struct iatt *preparent,2258struct iatt *postparent, dict_t *xdata)2259{
2260struct syncargs *args = NULL;2261
2262args = cookie;2263
2264args->op_ret = op_ret;2265args->op_errno = op_errno;2266if (xdata)2267args->xdata = dict_ref(xdata);2268
2269if (buf)2270args->iatt1 = *buf;2271
2272__wake(args);2273
2274return 0;2275}
2276
2277int
2278syncop_create(xlator_t *subvol, loc_t *loc, int32_t flags, mode_t mode,2279fd_t *fd, struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)2280{
2281struct syncargs args = {22820,2283};2284
2285SYNCOP(subvol, (&args), syncop_create_cbk, subvol->fops->create, loc, flags,2286mode, 0, fd, xdata_in);2287
2288if (iatt)2289*iatt = args.iatt1;2290
2291if (xdata_out)2292*xdata_out = args.xdata;2293else if (args.xdata)2294dict_unref(args.xdata);2295
2296if (args.op_ret < 0)2297return -args.op_errno;2298return args.op_ret;2299}
2300
2301int32_t
2302syncop_put_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2303int32_t op_ret, int32_t op_errno, inode_t *inode,2304struct iatt *buf, struct iatt *preparent,2305struct iatt *postparent, dict_t *xdata)2306{
2307struct syncargs *args = NULL;2308
2309args = cookie;2310
2311args->op_ret = op_ret;2312args->op_errno = op_errno;2313if (xdata)2314args->xdata = dict_ref(xdata);2315
2316if (buf)2317args->iatt1 = *buf;2318
2319__wake(args);2320
2321return 0;2322}
2323
2324int
2325syncop_put(xlator_t *subvol, loc_t *loc, mode_t mode, mode_t umask,2326uint32_t flags, struct iovec *vector, int32_t count, off_t offset,2327struct iobref *iobref, dict_t *xattr, struct iatt *iatt,2328dict_t *xdata_in, dict_t **xdata_out)2329{
2330struct syncargs args = {23310,2332};2333
2334SYNCOP(subvol, (&args), syncop_put_cbk, subvol->fops->put, loc, mode, umask,2335flags, (struct iovec *)vector, count, offset, iobref, xattr,2336xdata_in);2337
2338if (iatt)2339*iatt = args.iatt1;2340
2341if (xdata_out)2342*xdata_out = args.xdata;2343else if (args.xdata)2344dict_unref(args.xdata);2345
2346if (args.op_ret < 0)2347return -args.op_errno;2348return args.op_ret;2349}
2350
2351int
2352syncop_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,2353int op_errno, struct iatt *preparent, struct iatt *postparent,2354dict_t *xdata)2355{
2356struct syncargs *args = NULL;2357
2358args = cookie;2359
2360args->op_ret = op_ret;2361args->op_errno = op_errno;2362if (xdata)2363args->xdata = dict_ref(xdata);2364
2365__wake(args);2366
2367return 0;2368}
2369
2370int
2371syncop_unlink(xlator_t *subvol, loc_t *loc, dict_t *xdata_in,2372dict_t **xdata_out)2373{
2374struct syncargs args = {23750,2376};2377
2378SYNCOP(subvol, (&args), syncop_unlink_cbk, subvol->fops->unlink, loc, 0,2379xdata_in);2380
2381if (xdata_out)2382*xdata_out = args.xdata;2383else if (args.xdata)2384dict_unref(args.xdata);2385
2386if (args.op_ret < 0)2387return -args.op_errno;2388return args.op_ret;2389}
2390
2391int
2392syncop_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,2393int op_errno, struct iatt *preparent, struct iatt *postparent,2394dict_t *xdata)2395{
2396struct syncargs *args = NULL;2397
2398args = cookie;2399
2400args->op_ret = op_ret;2401args->op_errno = op_errno;2402if (xdata)2403args->xdata = dict_ref(xdata);2404
2405__wake(args);2406
2407return 0;2408}
2409
2410int
2411syncop_rmdir(xlator_t *subvol, loc_t *loc, int flags, dict_t *xdata_in,2412dict_t **xdata_out)2413{
2414struct syncargs args = {24150,2416};2417
2418SYNCOP(subvol, (&args), syncop_rmdir_cbk, subvol->fops->rmdir, loc, flags,2419xdata_in);2420
2421if (xdata_out)2422*xdata_out = args.xdata;2423else if (args.xdata)2424dict_unref(args.xdata);2425
2426if (args.op_ret < 0)2427return -args.op_errno;2428return args.op_ret;2429}
2430
2431int
2432syncop_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2433int32_t op_ret, int32_t op_errno, inode_t *inode,2434struct iatt *buf, struct iatt *preparent,2435struct iatt *postparent, dict_t *xdata)2436{
2437struct syncargs *args = NULL;2438
2439args = cookie;2440
2441args->op_ret = op_ret;2442args->op_errno = op_errno;2443if (xdata)2444args->xdata = dict_ref(xdata);2445
2446if (buf)2447args->iatt1 = *buf;2448
2449__wake(args);2450
2451return 0;2452}
2453
2454int
2455syncop_link(xlator_t *subvol, loc_t *oldloc, loc_t *newloc, struct iatt *iatt,2456dict_t *xdata_in, dict_t **xdata_out)2457{
2458struct syncargs args = {24590,2460};2461
2462SYNCOP(subvol, (&args), syncop_link_cbk, subvol->fops->link, oldloc, newloc,2463xdata_in);2464
2465if (iatt)2466*iatt = args.iatt1;2467
2468if (xdata_out)2469*xdata_out = args.xdata;2470else if (args.xdata)2471dict_unref(args.xdata);2472
2473if (args.op_ret < 0)2474return -args.op_errno;2475
2476return args.op_ret;2477}
2478
2479int
2480syncop_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2481int32_t op_ret, int32_t op_errno, struct iatt *buf,2482struct iatt *preoldparent, struct iatt *postoldparent,2483struct iatt *prenewparent, struct iatt *postnewparent,2484dict_t *xdata)2485{
2486struct syncargs *args = NULL;2487
2488args = cookie;2489
2490args->op_ret = op_ret;2491args->op_errno = op_errno;2492if (xdata)2493args->xdata = dict_ref(xdata);2494
2495__wake(args);2496
2497return 0;2498}
2499
2500int
2501syncop_rename(xlator_t *subvol, loc_t *oldloc, loc_t *newloc, dict_t *xdata_in,2502dict_t **xdata_out)2503{
2504struct syncargs args = {25050,2506};2507
2508SYNCOP(subvol, (&args), syncop_rename_cbk, subvol->fops->rename, oldloc,2509newloc, xdata_in);2510
2511if (xdata_out)2512*xdata_out = args.xdata;2513else if (args.xdata)2514dict_unref(args.xdata);2515
2516if (args.op_ret < 0)2517return -args.op_errno;2518
2519return args.op_ret;2520}
2521
2522int
2523syncop_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2524int op_ret, int op_errno, struct iatt *prebuf,2525struct iatt *postbuf, dict_t *xdata)2526{
2527struct syncargs *args = NULL;2528
2529args = cookie;2530
2531args->op_ret = op_ret;2532args->op_errno = op_errno;2533if (xdata)2534args->xdata = dict_ref(xdata);2535
2536if (op_ret >= 0) {2537args->iatt1 = *prebuf;2538args->iatt2 = *postbuf;2539}2540
2541__wake(args);2542
2543return 0;2544}
2545
2546int
2547syncop_ftruncate(xlator_t *subvol, fd_t *fd, off_t offset, struct iatt *preiatt,2548struct iatt *postiatt, dict_t *xdata_in, dict_t **xdata_out)2549{
2550struct syncargs args = {25510,2552};2553
2554SYNCOP(subvol, (&args), syncop_ftruncate_cbk, subvol->fops->ftruncate, fd,2555offset, xdata_in);2556
2557if (preiatt)2558*preiatt = args.iatt1;2559if (postiatt)2560*postiatt = args.iatt2;2561
2562if (xdata_out)2563*xdata_out = args.xdata;2564else if (args.xdata)2565dict_unref(args.xdata);2566
2567if (args.op_ret < 0)2568return -args.op_errno;2569return args.op_ret;2570}
2571
2572int
2573syncop_truncate(xlator_t *subvol, loc_t *loc, off_t offset, dict_t *xdata_in,2574dict_t **xdata_out)2575{
2576struct syncargs args = {25770,2578};2579
2580SYNCOP(subvol, (&args), syncop_ftruncate_cbk, subvol->fops->truncate, loc,2581offset, xdata_in);2582
2583if (xdata_out)2584*xdata_out = args.xdata;2585else if (args.xdata)2586dict_unref(args.xdata);2587
2588if (args.op_ret < 0)2589return -args.op_errno;2590return args.op_ret;2591}
2592
2593int
2594syncop_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2595int32_t op_ret, int32_t op_errno, struct iatt *prebuf,2596struct iatt *postbuf, dict_t *xdata)2597{
2598struct syncargs *args = NULL;2599
2600args = cookie;2601
2602args->op_ret = op_ret;2603args->op_errno = op_errno;2604if (xdata)2605args->xdata = dict_ref(xdata);2606
2607if (op_ret >= 0) {2608args->iatt1 = *prebuf;2609args->iatt2 = *postbuf;2610}2611
2612__wake(args);2613
2614return 0;2615}
2616
2617int
2618syncop_fsync(xlator_t *subvol, fd_t *fd, int dataonly, struct iatt *preiatt,2619struct iatt *postiatt, dict_t *xdata_in, dict_t **xdata_out)2620{
2621struct syncargs args = {26220,2623};2624
2625SYNCOP(subvol, (&args), syncop_fsync_cbk, subvol->fops->fsync, fd, dataonly,2626xdata_in);2627
2628if (preiatt)2629*preiatt = args.iatt1;2630if (postiatt)2631*postiatt = args.iatt2;2632
2633if (xdata_out)2634*xdata_out = args.xdata;2635else if (args.xdata)2636dict_unref(args.xdata);2637
2638if (args.op_ret < 0)2639return -args.op_errno;2640return args.op_ret;2641}
2642
2643int
2644syncop_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2645int32_t op_ret, int32_t op_errno, dict_t *xdata)2646{
2647struct syncargs *args = NULL;2648
2649args = cookie;2650
2651args->op_ret = op_ret;2652args->op_errno = op_errno;2653if (xdata)2654args->xdata = dict_ref(xdata);2655
2656__wake(args);2657
2658return 0;2659}
2660
2661int
2662syncop_flush(xlator_t *subvol, fd_t *fd, dict_t *xdata_in, dict_t **xdata_out)2663{
2664struct syncargs args = {0};2665
2666SYNCOP(subvol, (&args), syncop_flush_cbk, subvol->fops->flush, fd,2667xdata_in);2668
2669if (xdata_out)2670*xdata_out = args.xdata;2671else if (args.xdata)2672dict_unref(args.xdata);2673
2674if (args.op_ret < 0)2675return -args.op_errno;2676return args.op_ret;2677}
2678
2679int
2680syncop_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2681int32_t op_ret, int32_t op_errno, struct iatt *stbuf,2682dict_t *xdata)2683{
2684struct syncargs *args = NULL;2685
2686args = cookie;2687
2688args->op_ret = op_ret;2689args->op_errno = op_errno;2690if (xdata)2691args->xdata = dict_ref(xdata);2692
2693if (op_ret == 0)2694args->iatt1 = *stbuf;2695
2696__wake(args);2697
2698return 0;2699}
2700
2701int
2702syncop_fstat(xlator_t *subvol, fd_t *fd, struct iatt *stbuf, dict_t *xdata_in,2703dict_t **xdata_out)2704{
2705struct syncargs args = {27060,2707};2708
2709SYNCOP(subvol, (&args), syncop_fstat_cbk, subvol->fops->fstat, fd,2710xdata_in);2711
2712if (stbuf)2713*stbuf = args.iatt1;2714
2715if (xdata_out)2716*xdata_out = args.xdata;2717else if (args.xdata)2718dict_unref(args.xdata);2719
2720if (args.op_ret < 0)2721return -args.op_errno;2722return args.op_ret;2723}
2724
2725int
2726syncop_stat(xlator_t *subvol, loc_t *loc, struct iatt *stbuf, dict_t *xdata_in,2727dict_t **xdata_out)2728{
2729struct syncargs args = {27300,2731};2732
2733SYNCOP(subvol, (&args), syncop_fstat_cbk, subvol->fops->stat, loc,2734xdata_in);2735
2736if (stbuf)2737*stbuf = args.iatt1;2738
2739if (xdata_out)2740*xdata_out = args.xdata;2741else if (args.xdata)2742dict_unref(args.xdata);2743
2744if (args.op_ret < 0)2745return -args.op_errno;2746return args.op_ret;2747}
2748
2749int32_t
2750syncop_symlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2751int32_t op_ret, int32_t op_errno, inode_t *inode,2752struct iatt *buf, struct iatt *preparent,2753struct iatt *postparent, dict_t *xdata)2754{
2755struct syncargs *args = NULL;2756
2757args = cookie;2758
2759args->op_ret = op_ret;2760args->op_errno = op_errno;2761if (xdata)2762args->xdata = dict_ref(xdata);2763
2764if (buf)2765args->iatt1 = *buf;2766
2767__wake(args);2768
2769return 0;2770}
2771
2772int
2773syncop_symlink(xlator_t *subvol, loc_t *loc, const char *newpath,2774struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)2775{
2776struct syncargs args = {27770,2778};2779
2780SYNCOP(subvol, (&args), syncop_symlink_cbk, subvol->fops->symlink, newpath,2781loc, 0, xdata_in);2782
2783if (iatt)2784*iatt = args.iatt1;2785
2786if (xdata_out)2787*xdata_out = args.xdata;2788else if (args.xdata)2789dict_unref(args.xdata);2790
2791if (args.op_ret < 0)2792return -args.op_errno;2793return args.op_ret;2794}
2795
2796int
2797syncop_readlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2798int op_ret, int op_errno, const char *path,2799struct iatt *stbuf, dict_t *xdata)2800{
2801struct syncargs *args = NULL;2802
2803args = cookie;2804
2805args->op_ret = op_ret;2806args->op_errno = op_errno;2807if (xdata)2808args->xdata = dict_ref(xdata);2809
2810if ((op_ret != -1) && path)2811args->buffer = gf_strdup(path);2812
2813__wake(args);2814
2815return 0;2816}
2817
2818int
2819syncop_readlink(xlator_t *subvol, loc_t *loc, char **buffer, size_t size,2820dict_t *xdata_in, dict_t **xdata_out)2821{
2822struct syncargs args = {28230,2824};2825
2826SYNCOP(subvol, (&args), syncop_readlink_cbk, subvol->fops->readlink, loc,2827size, xdata_in);2828
2829if (buffer)2830*buffer = args.buffer;2831else2832GF_FREE(args.buffer);2833
2834if (xdata_out)2835*xdata_out = args.xdata;2836else if (args.xdata)2837dict_unref(args.xdata);2838
2839if (args.op_ret < 0)2840return -args.op_errno;2841return args.op_ret;2842}
2843
2844int
2845syncop_mknod_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2846int32_t op_ret, int32_t op_errno, inode_t *inode,2847struct iatt *buf, struct iatt *preparent,2848struct iatt *postparent, dict_t *xdata)2849{
2850struct syncargs *args = NULL;2851
2852args = cookie;2853
2854args->op_ret = op_ret;2855args->op_errno = op_errno;2856if (xdata)2857args->xdata = dict_ref(xdata);2858
2859if (buf)2860args->iatt1 = *buf;2861
2862__wake(args);2863
2864return 0;2865}
2866
2867int
2868syncop_mknod(xlator_t *subvol, loc_t *loc, mode_t mode, dev_t rdev,2869struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)2870{
2871struct syncargs args = {28720,2873};2874
2875SYNCOP(subvol, (&args), syncop_mknod_cbk, subvol->fops->mknod, loc, mode,2876rdev, 0, xdata_in);2877
2878if (iatt)2879*iatt = args.iatt1;2880
2881if (xdata_out)2882*xdata_out = args.xdata;2883else if (args.xdata)2884dict_unref(args.xdata);2885
2886if (args.op_ret < 0)2887return -args.op_errno;2888return args.op_ret;2889}
2890
2891int
2892syncop_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2893int32_t op_ret, int32_t op_errno, inode_t *inode,2894struct iatt *buf, struct iatt *preparent,2895struct iatt *postparent, dict_t *xdata)2896{
2897struct syncargs *args = NULL;2898
2899args = cookie;2900
2901args->op_ret = op_ret;2902args->op_errno = op_errno;2903if (xdata)2904args->xdata = dict_ref(xdata);2905
2906if (buf)2907args->iatt1 = *buf;2908
2909__wake(args);2910
2911return 0;2912}
2913
2914int
2915syncop_mkdir(xlator_t *subvol, loc_t *loc, mode_t mode, struct iatt *iatt,2916dict_t *xdata_in, dict_t **xdata_out)2917{
2918struct syncargs args = {29190,2920};2921
2922SYNCOP(subvol, (&args), syncop_mkdir_cbk, subvol->fops->mkdir, loc, mode, 0,2923xdata_in);2924
2925if (iatt)2926*iatt = args.iatt1;2927
2928if (xdata_out)2929*xdata_out = args.xdata;2930else if (args.xdata)2931dict_unref(args.xdata);2932
2933if (args.op_ret < 0)2934return -args.op_errno;2935return args.op_ret;2936}
2937
2938int
2939syncop_access_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2940int32_t op_ret, int32_t op_errno, dict_t *xdata)2941{
2942struct syncargs *args = NULL;2943
2944args = cookie;2945
2946args->op_ret = op_ret;2947args->op_errno = op_errno;2948if (xdata)2949args->xdata = dict_ref(xdata);2950
2951__wake(args);2952
2953return 0;2954}
2955
2956/* posix_acl xlator will respond in different ways for access calls from
2957fuse and access calls from nfs. For fuse, checking op_ret is sufficient
2958to check whether the access call is successful or not. But for nfs the
2959mode of the access that is permitted is put into op_errno before unwind.
2960With syncop, the caller of syncop_access will not be able to get the
2961mode of the access despite call being successul (since syncop_access
2962returns only the op_ret collected in args).
2963Now, if access call is failed, then args.op_ret is returned to recognise
2964the failure. But if op_ret is zero, then the mode of access which is
2965set in args.op_errno is returned. Thus the caller of syncop_access
2966has to check whether the return value is less than zero or not. If the
2967return value it got is less than zero, then access call is failed.
2968If it is not, then the access call is successful and the value the caller
2969got is the mode of the access.
2970*/
2971int
2972syncop_access(xlator_t *subvol, loc_t *loc, int32_t mask, dict_t *xdata_in,2973dict_t **xdata_out)2974{
2975struct syncargs args = {29760,2977};2978
2979SYNCOP(subvol, (&args), syncop_access_cbk, subvol->fops->access, loc, mask,2980xdata_in);2981
2982if (xdata_out)2983*xdata_out = args.xdata;2984else if (args.xdata)2985dict_unref(args.xdata);2986
2987if (args.op_ret < 0)2988return -args.op_errno;2989return args.op_errno;2990}
2991
2992int
2993syncop_fallocate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,2994int op_ret, int op_errno, struct iatt *prebuf,2995struct iatt *postbuf, dict_t *xdata)2996{
2997struct syncargs *args = NULL;2998
2999args = cookie;3000
3001args->op_ret = op_ret;3002args->op_errno = op_errno;3003if (xdata)3004args->xdata = dict_ref(xdata);3005
3006__wake(args);3007
3008return 0;3009}
3010
3011int
3012syncop_fallocate(xlator_t *subvol, fd_t *fd, int32_t keep_size, off_t offset,3013size_t len, dict_t *xdata_in, dict_t **xdata_out)3014{
3015struct syncargs args = {30160,3017};3018
3019SYNCOP(subvol, (&args), syncop_fallocate_cbk, subvol->fops->fallocate, fd,3020keep_size, offset, len, xdata_in);3021
3022if (xdata_out)3023*xdata_out = args.xdata;3024else if (args.xdata)3025dict_unref(args.xdata);3026
3027if (args.op_ret < 0)3028return -args.op_errno;3029return args.op_ret;3030}
3031
3032int
3033syncop_discard_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3034int op_ret, int op_errno, struct iatt *prebuf,3035struct iatt *postbuf, dict_t *xdata)3036{
3037struct syncargs *args = NULL;3038
3039args = cookie;3040
3041args->op_ret = op_ret;3042args->op_errno = op_errno;3043if (xdata)3044args->xdata = dict_ref(xdata);3045
3046__wake(args);3047
3048return 0;3049}
3050
3051int
3052syncop_discard(xlator_t *subvol, fd_t *fd, off_t offset, size_t len,3053dict_t *xdata_in, dict_t **xdata_out)3054{
3055struct syncargs args = {30560,3057};3058
3059SYNCOP(subvol, (&args), syncop_discard_cbk, subvol->fops->discard, fd,3060offset, len, xdata_in);3061
3062if (xdata_out)3063*xdata_out = args.xdata;3064else if (args.xdata)3065dict_unref(args.xdata);3066
3067if (args.op_ret < 0)3068return -args.op_errno;3069return args.op_ret;3070}
3071
3072int
3073syncop_zerofill_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3074int op_ret, int op_errno, struct iatt *prebuf,3075struct iatt *postbuf, dict_t *xdata)3076{
3077struct syncargs *args = NULL;3078
3079args = cookie;3080
3081args->op_ret = op_ret;3082args->op_errno = op_errno;3083if (xdata)3084args->xdata = dict_ref(xdata);3085
3086__wake(args);3087
3088return 0;3089}
3090
3091int
3092syncop_zerofill(xlator_t *subvol, fd_t *fd, off_t offset, off_t len,3093dict_t *xdata_in, dict_t **xdata_out)3094{
3095struct syncargs args = {30960,3097};3098
3099SYNCOP(subvol, (&args), syncop_zerofill_cbk, subvol->fops->zerofill, fd,3100offset, len, xdata_in);3101
3102if (xdata_out)3103*xdata_out = args.xdata;3104else if (args.xdata)3105dict_unref(args.xdata);3106
3107if (args.op_ret < 0)3108return -args.op_errno;3109return args.op_ret;3110}
3111
3112int
3113syncop_ipc_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,3114int op_errno, dict_t *xdata)3115{
3116struct syncargs *args = NULL;3117
3118args = cookie;3119
3120args->op_ret = op_ret;3121args->op_errno = op_errno;3122if (xdata)3123args->xdata = dict_ref(xdata);3124
3125__wake(args);3126
3127return 0;3128}
3129
3130int
3131syncop_ipc(xlator_t *subvol, int32_t op, dict_t *xdata_in, dict_t **xdata_out)3132{
3133struct syncargs args = {31340,3135};3136
3137SYNCOP(subvol, (&args), syncop_ipc_cbk, subvol->fops->ipc, op, xdata_in);3138
3139if (args.xdata) {3140if (xdata_out) {3141/*3142* We're passing this reference to the caller, along
3143* with the pointer itself. That means they're
3144* responsible for calling dict_unref at some point.
3145*/
3146*xdata_out = args.xdata;3147} else {3148dict_unref(args.xdata);3149}3150}3151
3152if (args.op_ret < 0)3153return -args.op_errno;3154return args.op_ret;3155}
3156
3157int
3158syncop_seek_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,3159int op_errno, off_t offset, dict_t *xdata)3160{
3161struct syncargs *args = NULL;3162
3163args = cookie;3164
3165args->op_ret = op_ret;3166args->op_errno = op_errno;3167args->offset = offset;3168if (xdata)3169args->xdata = dict_ref(xdata);3170
3171__wake(args);3172
3173return 0;3174}
3175
3176int
3177syncop_seek(xlator_t *subvol, fd_t *fd, off_t offset, gf_seek_what_t what,3178dict_t *xdata_in, off_t *off)3179{
3180struct syncargs args = {31810,3182};3183
3184SYNCOP(subvol, (&args), syncop_seek_cbk, subvol->fops->seek, fd, offset,3185what, xdata_in);3186
3187if (args.op_ret < 0) {3188return -args.op_errno;3189} else {3190if (off)3191*off = args.offset;3192return args.op_ret;3193}3194}
3195
3196int
3197syncop_lease_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,3198int op_errno, struct gf_lease *lease, dict_t *xdata)3199{
3200struct syncargs *args = NULL;3201
3202args = cookie;3203
3204args->op_ret = op_ret;3205args->op_errno = op_errno;3206if (xdata)3207args->xdata = dict_ref(xdata);3208if (lease)3209args->lease = *lease;3210
3211__wake(args);3212
3213return 0;3214}
3215
3216int
3217syncop_lease(xlator_t *subvol, loc_t *loc, struct gf_lease *lease,3218dict_t *xdata_in, dict_t **xdata_out)3219{
3220struct syncargs args = {32210,3222};3223
3224SYNCOP(subvol, (&args), syncop_lease_cbk, subvol->fops->lease, loc, lease,3225xdata_in);3226
3227*lease = args.lease;3228
3229if (args.xdata) {3230if (xdata_out) {3231/*3232* We're passing this reference to the caller, along
3233* with the pointer itself. That means they're
3234* responsible for calling dict_unref at some point.
3235*/
3236*xdata_out = args.xdata;3237} else {3238dict_unref(args.xdata);3239}3240}3241
3242if (args.op_ret < 0)3243return -args.op_errno;3244return args.op_ret;3245}
3246
3247int
3248syncop_lk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,3249int op_errno, struct gf_flock *flock, dict_t *xdata)3250{
3251struct syncargs *args = NULL;3252
3253args = cookie;3254
3255args->op_ret = op_ret;3256args->op_errno = op_errno;3257if (xdata)3258args->xdata = dict_ref(xdata);3259
3260if (flock)3261gf_flock_copy(&args->flock, flock);3262__wake(args);3263
3264return 0;3265}
3266
3267int
3268syncop_lk(xlator_t *subvol, fd_t *fd, int cmd, struct gf_flock *flock,3269dict_t *xdata_in, dict_t **xdata_out)3270{
3271struct syncargs args = {32720,3273};3274
3275SYNCOP(subvol, (&args), syncop_lk_cbk, subvol->fops->lk, fd, cmd, flock,3276xdata_in);3277
3278gf_flock_copy(flock, &args.flock);3279
3280if (xdata_out)3281*xdata_out = args.xdata;3282else if (args.xdata)3283dict_unref(args.xdata);3284
3285if (args.op_ret < 0)3286return -args.op_errno;3287return args.op_ret;3288}
3289
3290int32_t
3291syncop_inodelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3292int32_t op_ret, int32_t op_errno, dict_t *xdata)3293{
3294struct syncargs *args = NULL;3295
3296args = cookie;3297
3298args->op_ret = op_ret;3299args->op_errno = op_errno;3300if (xdata)3301args->xdata = dict_ref(xdata);3302
3303__wake(args);3304
3305return 0;3306}
3307
3308int
3309syncop_inodelk(xlator_t *subvol, const char *volume, loc_t *loc, int32_t cmd,3310struct gf_flock *lock, dict_t *xdata_in, dict_t **xdata_out)3311{
3312struct syncargs args = {33130,3314};3315
3316SYNCOP(subvol, (&args), syncop_inodelk_cbk, subvol->fops->inodelk, volume,3317loc, cmd, lock, xdata_in);3318
3319if (xdata_out)3320*xdata_out = args.xdata;3321else if (args.xdata)3322dict_unref(args.xdata);3323
3324if (args.op_ret < 0)3325return -args.op_errno;3326
3327return args.op_ret;3328}
3329
3330int32_t
3331syncop_entrylk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3332int32_t op_ret, int32_t op_errno, dict_t *xdata)3333{
3334struct syncargs *args = NULL;3335
3336args = cookie;3337args->op_ret = op_ret;3338args->op_errno = op_errno;3339if (xdata)3340args->xdata = dict_ref(xdata);3341
3342__wake(args);3343return 0;3344}
3345
3346int
3347syncop_entrylk(xlator_t *subvol, const char *volume, loc_t *loc,3348const char *basename, entrylk_cmd cmd, entrylk_type type,3349dict_t *xdata_in, dict_t **xdata_out)3350{
3351struct syncargs args = {33520,3353};3354
3355SYNCOP(subvol, (&args), syncop_entrylk_cbk, subvol->fops->entrylk, volume,3356loc, basename, cmd, type, xdata_in);3357
3358if (xdata_out)3359*xdata_out = args.xdata;3360else if (args.xdata)3361dict_unref(args.xdata);3362
3363if (args.op_ret < 0)3364return -args.op_errno;3365
3366return args.op_ret;3367}
3368
3369int32_t
3370syncop_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3371int32_t op_ret, int32_t op_errno, dict_t *dict,3372dict_t *xdata)3373{
3374struct syncargs *args = NULL;3375
3376args = cookie;3377
3378args->op_ret = op_ret;3379args->op_errno = op_errno;3380if (xdata)3381args->xdata = dict_ref(xdata);3382if (dict)3383args->dict_out = dict_ref(dict);3384
3385__wake(args);3386
3387return 0;3388}
3389
3390int
3391syncop_xattrop(xlator_t *subvol, loc_t *loc, gf_xattrop_flags_t flags,3392dict_t *dict, dict_t *xdata_in, dict_t **dict_out,3393dict_t **xdata_out)3394{
3395struct syncargs args = {33960,3397};3398
3399SYNCOP(subvol, (&args), syncop_xattrop_cbk, subvol->fops->xattrop, loc,3400flags, dict, xdata_in);3401
3402if (xdata_out)3403*xdata_out = args.xdata;3404else if (args.xdata)3405dict_unref(args.xdata);3406
3407if (dict_out)3408*dict_out = args.dict_out;3409else if (args.dict_out)3410dict_unref(args.dict_out);3411
3412if (args.op_ret < 0)3413return -args.op_errno;3414
3415return args.op_ret;3416}
3417
3418int
3419syncop_fxattrop(xlator_t *subvol, fd_t *fd, gf_xattrop_flags_t flags,3420dict_t *dict, dict_t *xdata_in, dict_t **dict_out,3421dict_t **xdata_out)3422{
3423struct syncargs args = {34240,3425};3426
3427SYNCOP(subvol, (&args), syncop_xattrop_cbk, subvol->fops->fxattrop, fd,3428flags, dict, xdata_in);3429
3430if (xdata_out)3431*xdata_out = args.xdata;3432else if (args.xdata)3433dict_unref(args.xdata);3434
3435if (dict_out)3436*dict_out = args.dict_out;3437else if (args.dict_out)3438dict_unref(args.dict_out);3439
3440if (args.op_ret < 0)3441return -args.op_errno;3442
3443return args.op_ret;3444}
3445
3446int32_t
3447syncop_getactivelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3448int32_t op_ret, int32_t op_errno,3449lock_migration_info_t *locklist, dict_t *xdata)3450{
3451struct syncargs *args = NULL;3452lock_migration_info_t *tmp = NULL;3453lock_migration_info_t *entry = NULL;3454
3455args = cookie;3456
3457INIT_LIST_HEAD(&args->locklist.list);3458
3459args->op_ret = op_ret;3460args->op_errno = op_errno;3461if (xdata)3462args->xdata = dict_ref(xdata);3463
3464if (op_ret > 0) {3465list_for_each_entry(tmp, &locklist->list, list)3466{3467/* TODO: move to GF_MALLOC() */3468entry = GF_CALLOC(1, sizeof(lock_migration_info_t),3469gf_common_mt_char);3470
3471if (!entry) {3472gf_msg(THIS->name, GF_LOG_ERROR, 0, 0,3473"lock mem allocation failed");3474gf_free_mig_locks(&args->locklist);3475
3476break;3477}3478
3479INIT_LIST_HEAD(&entry->list);3480
3481gf_flock_copy(&entry->flock, &tmp->flock);3482
3483entry->lk_flags = tmp->lk_flags;3484
3485entry->client_uid = gf_strdup(tmp->client_uid);3486
3487list_add_tail(&entry->list, &args->locklist.list);3488}3489}3490
3491__wake(args);3492
3493return 0;3494}
3495
3496int
3497syncop_getactivelk(xlator_t *subvol, loc_t *loc,3498lock_migration_info_t *locklist, dict_t *xdata_in,3499dict_t **xdata_out)3500{
3501struct syncargs args = {35020,3503};3504
3505INIT_LIST_HEAD(&args.locklist.list);3506SYNCOP(subvol, (&args), syncop_getactivelk_cbk, subvol->fops->getactivelk,3507loc, xdata_in);3508
3509if (locklist)3510list_splice_init(&args.locklist.list, &locklist->list);3511else3512gf_free_mig_locks(&args.locklist);3513
3514if (xdata_out)3515*xdata_out = args.xdata;3516else if (args.xdata)3517dict_unref(args.xdata);3518
3519if (args.op_ret < 0)3520return -args.op_errno;3521
3522return args.op_ret;3523}
3524
3525int
3526syncop_setactivelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3527int32_t op_ret, int32_t op_errno, dict_t *xdata)3528{
3529struct syncargs *args = NULL;3530
3531args = cookie;3532
3533args->op_ret = op_ret;3534args->op_errno = op_errno;3535
3536if (xdata)3537args->xdata = dict_ref(xdata);3538
3539__wake(args);3540
3541return 0;3542}
3543
3544int
3545syncop_setactivelk(xlator_t *subvol, loc_t *loc,3546lock_migration_info_t *locklist, dict_t *xdata_in,3547dict_t **xdata_out)3548{
3549struct syncargs args = {35500,3551};3552
3553SYNCOP(subvol, (&args), syncop_setactivelk_cbk, subvol->fops->setactivelk,3554loc, locklist, xdata_in);3555
3556if (xdata_out)3557*xdata_out = args.xdata;3558else if (args.xdata)3559dict_unref(args.xdata);3560
3561if (args.op_ret < 0)3562return -args.op_errno;3563
3564return args.op_ret;3565}
3566
3567int
3568syncop_icreate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3569int32_t op_ret, int32_t op_errno, inode_t *inode,3570struct iatt *buf, dict_t *xdata)3571{
3572struct syncargs *args = NULL;3573
3574args = cookie;3575
3576args->op_ret = op_ret;3577args->op_errno = op_errno;3578if (xdata)3579args->xdata = dict_ref(xdata);3580
3581if (buf)3582args->iatt1 = *buf;3583
3584__wake(args);3585
3586return 0;3587}
3588
3589int
3590syncop_namelink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3591int32_t op_ret, int32_t op_errno, struct iatt *prebuf,3592struct iatt *postbuf, dict_t *xdata)3593{
3594struct syncargs *args = NULL;3595
3596args = cookie;3597
3598args->op_ret = op_ret;3599args->op_errno = op_errno;3600
3601if (xdata)3602args->xdata = dict_ref(xdata);3603
3604__wake(args);3605
3606return 0;3607}
3608
3609int
3610syncop_copy_file_range(xlator_t *subvol, fd_t *fd_in, off64_t off_in,3611fd_t *fd_out, off64_t off_out, size_t len,3612uint32_t flags, struct iatt *stbuf,3613struct iatt *preiatt_dst, struct iatt *postiatt_dst,3614dict_t *xdata_in, dict_t **xdata_out)3615{
3616struct syncargs args = {36170,3618};3619
3620SYNCOP(subvol, (&args), syncop_copy_file_range_cbk,3621subvol->fops->copy_file_range, fd_in, off_in, fd_out, off_out, len,3622flags, xdata_in);3623
3624if (stbuf) {3625*stbuf = args.iatt1;3626}3627if (preiatt_dst) {3628*preiatt_dst = args.iatt2;3629}3630if (postiatt_dst) {3631*postiatt_dst = args.iatt3;3632}3633
3634if (xdata_out) {3635*xdata_out = args.xdata;3636} else if (args.xdata) {3637dict_unref(args.xdata);3638}3639
3640errno = args.op_errno;3641return args.op_ret;3642}
3643
3644int
3645syncop_copy_file_range_cbk(call_frame_t *frame, void *cookie, xlator_t *this,3646int op_ret, int op_errno, struct iatt *stbuf,3647struct iatt *prebuf_dst, struct iatt *postbuf_dst,3648dict_t *xdata)3649{
3650struct syncargs *args = NULL;3651
3652args = cookie;3653
3654args->op_ret = op_ret;3655args->op_errno = op_errno;3656if (xdata)3657args->xdata = dict_ref(xdata);3658
3659if (op_ret >= 0) {3660args->iatt1 = *stbuf;3661args->iatt2 = *prebuf_dst;3662args->iatt3 = *postbuf_dst;3663}3664
3665__wake(args);3666
3667return 0;3668}
3669