glusterfs

Форк
0
3668 строк · 82.1 Кб
1
/*
2
  Copyright (c) 2008-2013 Red Hat, Inc. <http://www.redhat.com>
3
  This file is part of GlusterFS.
4

5
  This file is licensed to you under your choice of the GNU Lesser
6
  General Public License, version 3 or any later version (LGPLv3 or
7
  later), or the GNU General Public License, version 2 (GPLv2), in all
8
  cases as published by the Free Software Foundation.
9
*/
10

11
#include "glusterfs/syncop.h"
12
#include "glusterfs/libglusterfs-messages.h"
13

14
#ifdef HAVE_ASAN_API
15
#include <sanitizer/common_interface_defs.h>
16
#endif
17

18
#ifdef HAVE_TSAN_API
19
#include <sanitizer/tsan_interface.h>
20
#endif
21

22
#ifdef HAVE_VALGRIND_API
23
#include <valgrind/valgrind.h>
24
#endif
25

26
int
27
syncopctx_setfsuid(void *uid)
28
{
29
    struct syncopctx *opctx = NULL;
30
    int ret = 0;
31

32
    /* In args check */
33
    if (!uid) {
34
        ret = -1;
35
        errno = EINVAL;
36
        goto out;
37
    }
38

39
    opctx = syncopctx_getctx();
40

41
    opctx->uid = *(uid_t *)uid;
42
    opctx->valid |= SYNCOPCTX_UID;
43

44
out:
45
    return ret;
46
}
47

48
int
49
syncopctx_setfsgid(void *gid)
50
{
51
    struct syncopctx *opctx = NULL;
52
    int ret = 0;
53

54
    /* In args check */
55
    if (!gid) {
56
        ret = -1;
57
        errno = EINVAL;
58
        goto out;
59
    }
60

61
    opctx = syncopctx_getctx();
62

63
    opctx->gid = *(gid_t *)gid;
64
    opctx->valid |= SYNCOPCTX_GID;
65

66
out:
67
    return ret;
68
}
69

70
int
71
syncopctx_setfsgroups(int count, const void *groups)
72
{
73
    struct syncopctx *opctx = NULL;
74
    gid_t *tmpgroups = NULL;
75
    int ret = 0;
76

77
    /* In args check */
78
    if (count != 0 && !groups) {
79
        ret = -1;
80
        errno = EINVAL;
81
        goto out;
82
    }
83

84
    opctx = syncopctx_getctx();
85

86
    /* resize internal groups as required */
87
    if (count && opctx->grpsize < count) {
88
        if (opctx->groups) {
89
            /* Group list will be updated later, so no need to keep current
90
             * data and waste time copying it. It's better to free the current
91
             * allocation and then allocate a fresh new memory block. */
92
            GF_FREE(opctx->groups);
93
            opctx->groups = NULL;
94
            opctx->grpsize = 0;
95
        }
96
        tmpgroups = GF_MALLOC(count * sizeof(gid_t), gf_common_mt_syncopctx);
97
        if (tmpgroups == NULL) {
98
            ret = -1;
99
            goto out;
100
        }
101

102
        opctx->groups = tmpgroups;
103
        opctx->grpsize = count;
104
    }
105

106
    /* copy out the groups passed */
107
    if (count)
108
        memcpy(opctx->groups, groups, (sizeof(gid_t) * count));
109

110
    /* set/reset the ngrps, this is where reset of groups is handled */
111
    opctx->ngrps = count;
112

113
    if ((opctx->valid & SYNCOPCTX_GROUPS) == 0) {
114
        /* This is the first time we are storing groups into the TLS structure
115
         * so we mark the current thread so that it will be properly cleaned
116
         * up when the thread terminates. */
117
        gf_thread_needs_cleanup();
118
    }
119
    opctx->valid |= SYNCOPCTX_GROUPS;
120

121
out:
122
    return ret;
123
}
124

125
int
126
syncopctx_setfspid(void *pid)
127
{
128
    struct syncopctx *opctx = NULL;
129
    int ret = 0;
130

131
    /* In args check */
132
    if (!pid) {
133
        ret = -1;
134
        errno = EINVAL;
135
        goto out;
136
    }
137

138
    opctx = syncopctx_getctx();
139

140
    opctx->pid = *(pid_t *)pid;
141
    opctx->valid |= SYNCOPCTX_PID;
142

143
out:
144
    return ret;
145
}
146

147
int
148
syncopctx_setfslkowner(gf_lkowner_t *lk_owner)
149
{
150
    struct syncopctx *opctx = NULL;
151
    int ret = 0;
152

153
    /* In args check */
154
    if (!lk_owner) {
155
        ret = -1;
156
        errno = EINVAL;
157
        goto out;
158
    }
159

160
    opctx = syncopctx_getctx();
161

162
    lk_owner_copy(&opctx->lk_owner, lk_owner);
163
    opctx->valid |= SYNCOPCTX_LKOWNER;
164

165
out:
166
    return ret;
167
}
168

169
void *
170
syncenv_processor(void *thdata);
171

172
static void
173
__run(struct synctask *task)
174
{
175
    struct syncenv *env = NULL;
176
    int32_t total, ret, i;
177

178
    env = task->env;
179

180
    list_del_init(&task->all_tasks);
181
    switch (task->state) {
182
        case SYNCTASK_INIT:
183
        case SYNCTASK_SUSPEND:
184
            break;
185
        case SYNCTASK_RUN:
186
            gf_msg_debug(task->xl->name, 0,
187
                         "re-running already running"
188
                         " task");
189
            env->runcount--;
190
            break;
191
        case SYNCTASK_WAIT:
192
            break;
193
        case SYNCTASK_DONE:
194
            gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,
195
                   "running completed task");
196
            return;
197
        case SYNCTASK_ZOMBIE:
198
            gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_WAKE_UP_ZOMBIE,
199
                   "attempted to wake up "
200
                   "zombie!!");
201
            return;
202
    }
203

204
    list_add_tail(&task->all_tasks, &env->runq);
205
    task->state = SYNCTASK_RUN;
206

207
    env->runcount++;
208

209
    total = env->procs + env->runcount - env->procs_idle;
210
    if (total > env->procmax) {
211
        total = env->procmax;
212
    }
213
    if (total > env->procs) {
214
        for (i = 0; i < env->procmax; i++) {
215
            if (env->proc[i].env == NULL) {
216
                env->proc[i].env = env;
217
                ret = gf_thread_create(&env->proc[i].processor, NULL,
218
                                       syncenv_processor, &env->proc[i],
219
                                       "sproc%d", i);
220
                if ((ret < 0) || (++env->procs >= total)) {
221
                    break;
222
                }
223
            }
224
        }
225
    }
226
}
227

228
static void
229
__wait(struct synctask *task)
230
{
231
    struct syncenv *env = NULL;
232

233
    env = task->env;
234

235
    list_del_init(&task->all_tasks);
236
    switch (task->state) {
237
        case SYNCTASK_INIT:
238
        case SYNCTASK_SUSPEND:
239
            break;
240
        case SYNCTASK_RUN:
241
            env->runcount--;
242
            break;
243
        case SYNCTASK_WAIT:
244
            gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_REWAITING_TASK,
245
                   "re-waiting already waiting "
246
                   "task");
247
            break;
248
        case SYNCTASK_DONE:
249
            gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_COMPLETED_TASK,
250
                   "running completed task");
251
            return;
252
        case SYNCTASK_ZOMBIE:
253
            gf_msg(task->xl->name, GF_LOG_WARNING, 0, LG_MSG_SLEEP_ZOMBIE,
254
                   "attempted to sleep a zombie!!");
255
            return;
256
    }
257

258
    list_add_tail(&task->all_tasks, &env->waitq);
259
    task->state = SYNCTASK_WAIT;
260
}
261

262
void
263
synctask_yield(struct synctask *task, struct timespec *delta)
264
{
265
    xlator_t *oldTHIS = THIS;
266

267
#if defined(__NetBSD__) && defined(_UC_TLSBASE)
268
    /* Preserve pthread private pointer through swapcontex() */
269
    task->proc->sched.uc_flags &= ~_UC_TLSBASE;
270
#endif
271

272
    task->delta = delta;
273

274
    if (task->state != SYNCTASK_DONE) {
275
        task->state = SYNCTASK_SUSPEND;
276
    }
277

278
#ifdef HAVE_TSAN_API
279
    __tsan_switch_to_fiber(task->proc->tsan.fiber, 0);
280
#endif
281

282
#ifdef HAVE_ASAN_API
283
    __sanitizer_start_switch_fiber(&task->fake_stack,
284
                                   task->proc->sched.uc_stack.ss_sp,
285
                                   task->proc->sched.uc_stack.ss_size);
286
#endif
287

288
    if (swapcontext(&task->ctx, &task->proc->sched) < 0) {
289
        gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_SWAPCONTEXT_FAILED,
290
               "swapcontext failed");
291
    }
292

293
#ifdef HAVE_ASAN_API
294
    __sanitizer_finish_switch_fiber(task->proc->fake_stack, NULL, NULL);
295
#endif
296

297
    THIS = oldTHIS;
298
}
299

300
void
301
synctask_sleep(int32_t secs)
302
{
303
    struct timespec delta;
304
    struct synctask *task;
305

306
    task = synctask_get();
307

308
    if (task == NULL) {
309
        sleep(secs);
310
    } else {
311
        delta.tv_sec = secs;
312
        delta.tv_nsec = 0;
313

314
        synctask_yield(task, &delta);
315
    }
316
}
317

318
void
319
synctask_usleep(int32_t usecs)
320
{
321
    struct timespec delta;
322
    struct synctask *task;
323

324
    task = synctask_get();
325

326
    if (task == NULL) {
327
        usleep(usecs);
328
    } else {
329
        delta.tv_sec = usecs / 1000000;
330
        delta.tv_nsec = (usecs % 1000000) * 1000;
331

332
        synctask_yield(task, &delta);
333
    }
334
}
335

336
static void
337
__synctask_wake(struct synctask *task)
338
{
339
    task->woken = 1;
340

341
    if (task->slept)
342
        __run(task);
343

344
    pthread_cond_broadcast(&task->env->cond);
345
}
346

347
void
348
synctask_wake(struct synctask *task)
349
{
350
    struct syncenv *env = NULL;
351

352
    env = task->env;
353

354
    pthread_mutex_lock(&env->mutex);
355
    {
356
        if (task->timer != NULL) {
357
            if (gf_timer_call_cancel(task->xl->ctx, task->timer) != 0) {
358
                goto unlock;
359
            }
360

361
            task->timer = NULL;
362
            task->synccond = NULL;
363
        }
364

365
        __synctask_wake(task);
366
    }
367
unlock:
368
    pthread_mutex_unlock(&env->mutex);
369
}
370

371
void
372
synctask_wrap(void)
373
{
374
    struct synctask *task = NULL;
375

376
    /* Do not trust the pointer received. It may be
377
       wrong and can lead to crashes. */
378

379
    task = synctask_get();
380

381
#ifdef HAVE_ASAN_API
382
    __sanitizer_finish_switch_fiber(task->fake_stack, NULL, NULL);
383
#endif
384

385
    task->ret = task->syncfn(task->opaque);
386
    if (task->synccbk)
387
        task->synccbk(task->ret, task->frame, task->opaque);
388

389
    task->state = SYNCTASK_DONE;
390

391
    synctask_yield(task, NULL);
392
}
393

394
static void
395
synctask_destroy(struct synctask *task)
396
{
397
    if (task->opframe && (task->opframe != task->frame))
398
        STACK_DESTROY(task->opframe->root);
399

400
    if (task->synccbk == NULL) {
401
        pthread_mutex_destroy(&task->mutex);
402
        pthread_cond_destroy(&task->cond);
403
    }
404

405
#ifdef HAVE_TSAN_API
406
    __tsan_destroy_fiber(task->tsan.fiber);
407
#endif
408

409
#ifdef HAVE_VALGRIND_API
410
    VALGRIND_STACK_DEREGISTER(task->stackid);
411
#endif
412

413
    GF_FREE(task);
414
}
415

416
void
417
synctask_done(struct synctask *task)
418
{
419
    if (task->synccbk) {
420
        synctask_destroy(task);
421
        return;
422
    }
423

424
    pthread_mutex_lock(&task->mutex);
425
    {
426
        task->state = SYNCTASK_ZOMBIE;
427
        task->done = 1;
428
        pthread_cond_broadcast(&task->cond);
429
    }
430
    pthread_mutex_unlock(&task->mutex);
431
}
432

433
int
434
synctask_setid(struct synctask *task, uid_t uid, gid_t gid)
435
{
436
    if (!task)
437
        return -1;
438

439
    if (uid != -1)
440
        task->uid = uid;
441

442
    if (gid != -1)
443
        task->gid = gid;
444

445
    return 0;
446
}
447

448
static struct synctask *
449
synctask_create(struct syncenv *env, size_t stacksize, synctask_fn_t fn,
450
                synctask_cbk_t cbk, call_frame_t *frame, void *opaque)
451
{
452
    struct synctask *newtask = NULL;
453
    xlator_t *this = THIS;
454
    int destroymode = 0;
455

456
    VALIDATE_OR_GOTO(env, out);
457
    VALIDATE_OR_GOTO(fn, out);
458

459
    /* Check if the syncenv is in destroymode i.e. destroy is SET.
460
     * If YES, then don't allow any new synctasks on it. Return NULL.
461
     */
462
    pthread_mutex_lock(&env->mutex);
463
    {
464
        destroymode = env->destroy;
465
    }
466
    pthread_mutex_unlock(&env->mutex);
467

468
    /* syncenv is in DESTROY mode, return from here */
469
    if (destroymode)
470
        return NULL;
471

472
    if (stacksize <= 0) {
473
        newtask = GF_MALLOC(sizeof(struct synctask) + env->stacksize,
474
                            gf_common_mt_synctask);
475
        if (caa_unlikely(!newtask))
476
            return NULL;
477

478
        memset(newtask, 0, sizeof(struct synctask));
479
        newtask->ctx.uc_stack.ss_size = env->stacksize;
480
    } else {
481
        newtask = GF_MALLOC(sizeof(struct synctask) + stacksize,
482
                            gf_common_mt_synctask);
483
        if (caa_unlikely(!newtask))
484
            return NULL;
485

486
        memset(newtask, 0, sizeof(struct synctask));
487
        newtask->ctx.uc_stack.ss_size = stacksize;
488
    }
489

490
    INIT_LIST_HEAD(&newtask->all_tasks);
491
    newtask->env = env;
492
    newtask->xl = this;
493
    newtask->frame = frame;
494
    if (!frame) {
495
        newtask->opframe = create_frame(this, this->ctx->pool);
496
        if (!newtask->opframe)
497
            goto err;
498
        set_lk_owner_from_ptr(&newtask->opframe->root->lk_owner,
499
                              newtask->opframe->root);
500
    } else {
501
        newtask->opframe = frame;
502
    }
503

504
    newtask->synccbk = cbk;
505
    newtask->syncfn = fn;
506
    newtask->delta = NULL;
507
    newtask->opaque = opaque;
508
    newtask->timer = NULL;
509
    newtask->synccond = NULL;
510
    newtask->state = SYNCTASK_INIT;
511
    newtask->woken = 0;
512
    newtask->slept = 1;
513
    newtask->ret = 0;
514

515
    /* default to the uid/gid of the passed frame */
516
    newtask->uid = newtask->opframe->root->uid;
517
    newtask->gid = newtask->opframe->root->gid;
518

519
#ifdef HAVE_TSAN_API
520
    newtask->tsan.fiber = __tsan_create_fiber(0);
521
    snprintf(newtask->tsan.name, TSAN_THREAD_NAMELEN, "<synctask of %s>",
522
             this->name);
523
    __tsan_set_fiber_name(newtask->tsan.fiber, newtask->tsan.name);
524
#endif
525

526
#ifdef HAVE_ASAN_API
527
    newtask->fake_stack = NULL;
528
#endif
529

530
#ifdef HAVE_VALGRIND_API
531
    newtask->stackid = VALGRIND_STACK_REGISTER(
532
        newtask->ctx.uc_stack.ss_sp,
533
        newtask->ctx.uc_stack.ss_sp + newtask->ctx.uc_stack.ss_size);
534
#endif
535

536
    if (getcontext(&newtask->ctx) < 0) {
537
        gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_GETCONTEXT_FAILED,
538
               "getcontext failed");
539
        goto err;
540
    }
541
    newtask->ctx.uc_stack.ss_sp = newtask->stack;
542
    makecontext(&newtask->ctx, (void (*)(void))synctask_wrap, 0);
543

544
    newtask->proc = NULL;
545

546
    if (!cbk) {
547
        pthread_mutex_init(&newtask->mutex, NULL);
548
        pthread_cond_init(&newtask->cond, NULL);
549
    }
550

551
    INIT_LIST_HEAD(&newtask->waitq);
552
    newtask->done = 0;
553

554
    synctask_wake(newtask);
555

556
    return newtask;
557
err:
558
    if (newtask) {
559
        if (newtask->opframe && (newtask->opframe != newtask->frame))
560
            STACK_DESTROY(newtask->opframe->root);
561
        GF_FREE(newtask);
562
    }
563
out:
564
    return NULL;
565
}
566

567
int
568
synctask_join(struct synctask *task)
569
{
570
    int ret = 0;
571

572
    pthread_mutex_lock(&task->mutex);
573
    {
574
        while (!task->done)
575
            pthread_cond_wait(&task->cond, &task->mutex);
576
    }
577
    pthread_mutex_unlock(&task->mutex);
578

579
    ret = task->ret;
580

581
    synctask_destroy(task);
582

583
    return ret;
584
}
585

586
int
587
synctask_new1(struct syncenv *env, size_t stacksize, synctask_fn_t fn,
588
              synctask_cbk_t cbk, call_frame_t *frame, void *opaque)
589
{
590
    struct synctask *newtask = NULL;
591
    int ret = 0;
592

593
    newtask = synctask_create(env, stacksize, fn, cbk, frame, opaque);
594
    if (!newtask)
595
        return -1;
596

597
    if (!cbk)
598
        ret = synctask_join(newtask);
599

600
    return ret;
601
}
602

603
int
604
synctask_new(struct syncenv *env, synctask_fn_t fn, synctask_cbk_t cbk,
605
             call_frame_t *frame, void *opaque)
606
{
607
    return synctask_new1(env, 0, fn, cbk, frame, opaque);
608
}
609

610
struct synctask *
611
syncenv_task(struct syncproc *proc)
612
{
613
    struct syncenv *env = NULL;
614
    struct synctask *task = NULL;
615
    struct timespec sleep_till = {
616
        0,
617
    };
618
    int ret = 0;
619

620
    env = proc->env;
621

622
    pthread_mutex_lock(&env->mutex);
623
    {
624
        while (list_empty(&env->runq)) {
625
            /* If either of the conditions are met then exit
626
             * the current thread:
627
             * 1. syncenv has to scale down(procs > procmin)
628
             * 2. syncenv is in destroy mode and no tasks in
629
             *    either waitq or runq.
630
             *
631
             * At any point in time, a task can be either in runq,
632
             * or in executing state or in the waitq. Once the
633
             * destroy mode is set, no new synctask creates will
634
             * be allowed, but whatever in waitq or runq should be
635
             * allowed to finish before exiting any of the syncenv
636
             * processor threads.
637
             */
638
            if (((ret == ETIMEDOUT) && (env->procs > env->procmin)) ||
639
                (env->destroy && list_empty(&env->waitq))) {
640
                task = NULL;
641
                env->procs--;
642
                memset(proc, 0, sizeof(*proc));
643
                pthread_cond_broadcast(&env->cond);
644
                goto unlock;
645
            }
646

647
            env->procs_idle++;
648

649
            sleep_till.tv_sec = gf_time() + SYNCPROC_IDLE_TIME;
650
            ret = pthread_cond_timedwait(&env->cond, &env->mutex, &sleep_till);
651

652
            env->procs_idle--;
653
        }
654

655
        task = list_entry(env->runq.next, struct synctask, all_tasks);
656

657
        list_del_init(&task->all_tasks);
658
        env->runcount--;
659

660
        task->woken = 0;
661
        task->slept = 0;
662

663
        task->proc = proc;
664
    }
665
unlock:
666
    pthread_mutex_unlock(&env->mutex);
667

668
    return task;
669
}
670

671
static void
672
synctask_timer(void *data)
673
{
674
    struct synctask *task = data;
675
    struct synccond *cond;
676

677
    cond = task->synccond;
678
    if (cond != NULL) {
679
        pthread_mutex_lock(&cond->pmutex);
680

681
        list_del_init(&task->waitq);
682
        task->synccond = NULL;
683

684
        pthread_mutex_unlock(&cond->pmutex);
685

686
        task->ret = -ETIMEDOUT;
687
    }
688

689
    pthread_mutex_lock(&task->env->mutex);
690

691
    gf_timer_call_cancel(task->xl->ctx, task->timer);
692
    task->timer = NULL;
693

694
    __synctask_wake(task);
695

696
    pthread_mutex_unlock(&task->env->mutex);
697
}
698

699
void
700
synctask_switchto(struct synctask *task)
701
{
702
    struct syncenv *env = NULL;
703

704
    env = task->env;
705

706
    synctask_set(task);
707
    THIS = task->xl;
708

709
#if defined(__NetBSD__) && defined(_UC_TLSBASE)
710
    /* Preserve pthread private pointer through swapcontex() */
711
    task->ctx.uc_flags &= ~_UC_TLSBASE;
712
#endif
713

714
#ifdef HAVE_TSAN_API
715
    __tsan_switch_to_fiber(task->tsan.fiber, 0);
716
#endif
717

718
#ifdef HAVE_ASAN_API
719
    __sanitizer_start_switch_fiber(&task->proc->fake_stack,
720
                                   task->ctx.uc_stack.ss_sp,
721
                                   task->ctx.uc_stack.ss_size);
722
#endif
723

724
    if (swapcontext(&task->proc->sched, &task->ctx) < 0) {
725
        gf_msg("syncop", GF_LOG_ERROR, errno, LG_MSG_SWAPCONTEXT_FAILED,
726
               "swapcontext failed");
727
    }
728

729
#ifdef HAVE_ASAN_API
730
    __sanitizer_finish_switch_fiber(task->fake_stack, NULL, NULL);
731
#endif
732

733
    if (task->state == SYNCTASK_DONE) {
734
        synctask_done(task);
735
        return;
736
    }
737

738
    pthread_mutex_lock(&env->mutex);
739
    {
740
        if (task->woken) {
741
            __run(task);
742
        } else {
743
            task->slept = 1;
744
            __wait(task);
745

746
            if (task->delta != NULL) {
747
                task->timer = gf_timer_call_after(task->xl->ctx, *task->delta,
748
                                                  synctask_timer, task);
749
            }
750
        }
751

752
        task->delta = NULL;
753
    }
754
    pthread_mutex_unlock(&env->mutex);
755
}
756

757
#ifdef HAVE_VALGRIND_API
758

759
static unsigned
760
__valgrind_register_current_stack(void)
761
{
762
    pthread_attr_t attr;
763
    size_t stacksize;
764
    void *stack;
765
    int ret;
766

767
    ret = pthread_getattr_np(pthread_self(), &attr);
768
    GF_ASSERT(ret == 0);
769

770
    ret = pthread_attr_getstack(&attr, &stack, &stacksize);
771
    GF_ASSERT(ret == 0);
772

773
    return VALGRIND_STACK_REGISTER(stack, stack + stacksize);
774
}
775

776
#endif /* HAVE_VALGRIND_API */
777

778
void *
779
syncenv_processor(void *thdata)
780
{
781
    struct syncproc *proc = NULL;
782
    struct synctask *task = NULL;
783

784
    proc = thdata;
785

786
#ifdef HAVE_TSAN_API
787
    proc->tsan.fiber = __tsan_create_fiber(0);
788
    snprintf(proc->tsan.name, TSAN_THREAD_NAMELEN, "<sched of syncenv@%p>",
789
             proc);
790
    __tsan_set_fiber_name(proc->tsan.fiber, proc->tsan.name);
791
#endif
792

793
#ifdef HAVE_VALGRIND_API
794
    proc->stackid = __valgrind_register_current_stack();
795
#endif
796

797
    while ((task = syncenv_task(proc)) != NULL) {
798
        synctask_switchto(task);
799
    }
800

801
#ifdef HAVE_TSAN_API
802
    __tsan_destroy_fiber(proc->tsan.fiber);
803
#endif
804

805
#ifdef HAVE_VALGRIND_API
806
    VALGRIND_STACK_DEREGISTER(proc->stackid);
807
#endif
808

809
    return NULL;
810
}
811

812
/* The syncenv threads are cleaned up in this routine.
813
 */
814
void
815
syncenv_destroy(struct syncenv *env)
816
{
817
    if (env == NULL)
818
        return;
819

820
    /* SET the 'destroy' in syncenv structure to prohibit any
821
     * further synctask(s) on this syncenv which is in destroy mode.
822
     *
823
     * If syncenv threads are in pthread cond wait with no tasks in
824
     * their run or wait queue, then the threads are woken up by
825
     * broadcasting the cond variable and if destroy field is set,
826
     * the infinite loop in syncenv_processor is broken and the
827
     * threads return.
828
     *
829
     * If syncenv threads have tasks in runq or waitq, the tasks are
830
     * completed and only then the thread returns.
831
     */
832
    pthread_mutex_lock(&env->mutex);
833
    {
834
        env->destroy = 1;
835
        /* This broadcast will wake threads in pthread_cond_wait
836
         * in syncenv_task
837
         */
838
        pthread_cond_broadcast(&env->cond);
839

840
        /* when the syncenv_task() thread is exiting, it broadcasts to
841
         * wake the below wait.
842
         */
843
        while (env->procs != 0) {
844
            pthread_cond_wait(&env->cond, &env->mutex);
845
        }
846
    }
847
    pthread_mutex_unlock(&env->mutex);
848

849
    pthread_mutex_destroy(&env->mutex);
850
    pthread_cond_destroy(&env->cond);
851

852
    GF_FREE(env);
853

854
    return;
855
}
856

857
struct syncenv *
858
syncenv_new(size_t stacksize, int procmin, int procmax)
859
{
860
    struct syncenv *newenv = NULL;
861
    int ret = 0;
862
    int i = 0;
863

864
    if (!procmin || procmin < 0)
865
        procmin = SYNCENV_PROC_MIN;
866
    if (!procmax || procmax > SYNCENV_PROC_MAX)
867
        procmax = SYNCENV_PROC_MAX;
868

869
    if (procmin > procmax)
870
        return NULL;
871

872
    newenv = GF_CALLOC(1, sizeof(*newenv), gf_common_mt_syncenv);
873

874
    if (!newenv)
875
        return NULL;
876

877
    pthread_mutex_init(&newenv->mutex, NULL);
878
    pthread_cond_init(&newenv->cond, NULL);
879

880
    INIT_LIST_HEAD(&newenv->runq);
881
    INIT_LIST_HEAD(&newenv->waitq);
882

883
    newenv->stacksize = SYNCENV_DEFAULT_STACKSIZE;
884
    if (stacksize)
885
        newenv->stacksize = stacksize;
886
    newenv->procmin = procmin;
887
    newenv->procmax = procmax;
888
    newenv->procs_idle = 0;
889

890
    for (i = 0; i < newenv->procmin; i++) {
891
        newenv->proc[i].env = newenv;
892
        ret = gf_thread_create(&newenv->proc[i].processor, NULL,
893
                               syncenv_processor, &newenv->proc[i], "sproc%d",
894
                               i);
895
        if (ret)
896
            break;
897
        newenv->procs++;
898
    }
899

900
    if (ret != 0) {
901
        syncenv_destroy(newenv);
902
        newenv = NULL;
903
    }
904

905
    return newenv;
906
}
907

908
int
909
synclock_init(synclock_t *lock, lock_attr_t attr)
910
{
911
    if (!lock)
912
        return -1;
913

914
    pthread_cond_init(&lock->cond, 0);
915
    lock->type = LOCK_NULL;
916
    lock->owner = NULL;
917
    lock->owner_tid = 0;
918
    lock->lock = 0;
919
    lock->attr = attr;
920
    INIT_LIST_HEAD(&lock->waitq);
921

922
    return pthread_mutex_init(&lock->guard, 0);
923
}
924

925
int
926
synclock_destroy(synclock_t *lock)
927
{
928
    if (!lock)
929
        return -1;
930

931
    pthread_cond_destroy(&lock->cond);
932
    return pthread_mutex_destroy(&lock->guard);
933
}
934

935
static int
936
__synclock_lock(struct synclock *lock)
937
{
938
    struct synctask *task = NULL;
939

940
    if (!lock)
941
        return -1;
942

943
    task = synctask_get();
944

945
    if (lock->lock && (lock->attr == SYNC_LOCK_RECURSIVE)) {
946
        /*Recursive lock (if same owner requested for lock again then
947
         *increment lock count and return success).
948
         *Note:same number of unlocks required.
949
         */
950
        switch (lock->type) {
951
            case LOCK_TASK:
952
                if (task == lock->owner) {
953
                    lock->lock++;
954
                    gf_msg_trace("", 0,
955
                                 "Recursive lock called by"
956
                                 " sync task.owner= %p,lock=%d",
957
                                 lock->owner, lock->lock);
958
                    return 0;
959
                }
960
                break;
961
            case LOCK_THREAD:
962
                if (pthread_equal(pthread_self(), lock->owner_tid)) {
963
                    lock->lock++;
964
                    gf_msg_trace("", 0,
965
                                 "Recursive lock called by"
966
                                 " thread ,owner=%u lock=%d",
967
                                 (unsigned int)lock->owner_tid, lock->lock);
968
                    return 0;
969
                }
970
                break;
971
            default:
972
                gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_UNKNOWN_LOCK_TYPE,
973
                       "unknown lock type");
974
                break;
975
        }
976
    }
977

978
    while (lock->lock) {
979
        if (task) {
980
            /* called within a synctask */
981
            task->woken = 0;
982
            list_add_tail(&task->waitq, &lock->waitq);
983
            pthread_mutex_unlock(&lock->guard);
984
            synctask_yield(task, NULL);
985
            /* task is removed from waitq in unlock,
986
             * under lock->guard.*/
987
            pthread_mutex_lock(&lock->guard);
988
        } else {
989
            /* called by a non-synctask */
990
            pthread_cond_wait(&lock->cond, &lock->guard);
991
        }
992
    }
993

994
    if (task) {
995
        lock->type = LOCK_TASK;
996
        lock->owner = task; /* for synctask*/
997

998
    } else {
999
        lock->type = LOCK_THREAD;
1000
        lock->owner_tid = pthread_self(); /* for non-synctask */
1001
    }
1002
    lock->lock = 1;
1003

1004
    return 0;
1005
}
1006

1007
int
1008
synclock_lock(synclock_t *lock)
1009
{
1010
    int ret = 0;
1011

1012
    pthread_mutex_lock(&lock->guard);
1013
    {
1014
        ret = __synclock_lock(lock);
1015
    }
1016
    pthread_mutex_unlock(&lock->guard);
1017

1018
    return ret;
1019
}
1020

1021
int
1022
synclock_trylock(synclock_t *lock)
1023
{
1024
    int ret = 0;
1025

1026
    errno = 0;
1027

1028
    pthread_mutex_lock(&lock->guard);
1029
    {
1030
        if (lock->lock) {
1031
            errno = EBUSY;
1032
            ret = -1;
1033
            goto unlock;
1034
        }
1035

1036
        ret = __synclock_lock(lock);
1037
    }
1038
unlock:
1039
    pthread_mutex_unlock(&lock->guard);
1040

1041
    return ret;
1042
}
1043

1044
static int
1045
__synclock_unlock(synclock_t *lock)
1046
{
1047
    struct synctask *task = NULL;
1048
    struct synctask *curr = NULL;
1049

1050
    if (!lock)
1051
        return -1;
1052

1053
    if (lock->lock == 0) {
1054
        gf_msg("", GF_LOG_CRITICAL, 0, LG_MSG_UNLOCK_BEFORE_LOCK,
1055
               "Unlock called  before lock ");
1056
        return -1;
1057
    }
1058
    curr = synctask_get();
1059
    /*unlock should be called by lock owner
1060
     *i.e this will not allow the lock in nonsync task and unlock
1061
     * in sync task and vice-versa
1062
     */
1063
    switch (lock->type) {
1064
        case LOCK_TASK:
1065
            if (curr == lock->owner) {
1066
                lock->lock--;
1067
                gf_msg_trace("", 0,
1068
                             "Unlock success %p, remaining"
1069
                             " locks=%d",
1070
                             lock->owner, lock->lock);
1071
            } else {
1072
                gf_msg("", GF_LOG_WARNING, 0, LG_MSG_LOCK_OWNER_ERROR,
1073
                       "Unlock called by %p, but lock held by %p", curr,
1074
                       lock->owner);
1075
            }
1076

1077
            break;
1078
        case LOCK_THREAD:
1079
            if (pthread_equal(pthread_self(), lock->owner_tid)) {
1080
                lock->lock--;
1081
                gf_msg_trace("", 0,
1082
                             "Unlock success %u, remaining "
1083
                             "locks=%d",
1084
                             (unsigned int)lock->owner_tid, lock->lock);
1085
            } else {
1086
                gf_msg("", GF_LOG_WARNING, 0, LG_MSG_LOCK_OWNER_ERROR,
1087
                       "Unlock called by %u, but lock held by %u",
1088
                       (unsigned int)pthread_self(),
1089
                       (unsigned int)lock->owner_tid);
1090
            }
1091

1092
            break;
1093
        default:
1094
            break;
1095
    }
1096

1097
    if (lock->lock > 0) {
1098
        return 0;
1099
    }
1100
    lock->type = LOCK_NULL;
1101
    lock->owner = NULL;
1102
    lock->owner_tid = 0;
1103
    lock->lock = 0;
1104
    /* There could be both synctasks and non synctasks
1105
       waiting (or none, or either). As a mid-approach
1106
       between maintaining too many waiting counters
1107
       at one extreme and a thundering herd on unlock
1108
       at the other, call a cond_signal (which wakes
1109
       one waiter) and first synctask waiter. So at
1110
       most we have two threads waking up to grab the
1111
       just released lock.
1112
    */
1113
    pthread_cond_signal(&lock->cond);
1114
    if (!list_empty(&lock->waitq)) {
1115
        task = list_entry(lock->waitq.next, struct synctask, waitq);
1116
        list_del_init(&task->waitq);
1117
        synctask_wake(task);
1118
    }
1119

1120
    return 0;
1121
}
1122

1123
int
1124
synclock_unlock(synclock_t *lock)
1125
{
1126
    int ret = 0;
1127

1128
    pthread_mutex_lock(&lock->guard);
1129
    {
1130
        ret = __synclock_unlock(lock);
1131
    }
1132
    pthread_mutex_unlock(&lock->guard);
1133

1134
    return ret;
1135
}
1136

1137
/* Condition variables */
1138

1139
int32_t
1140
synccond_init(synccond_t *cond)
1141
{
1142
    int32_t ret;
1143

1144
    INIT_LIST_HEAD(&cond->waitq);
1145

1146
    ret = pthread_mutex_init(&cond->pmutex, NULL);
1147
    if (ret != 0) {
1148
        return -ret;
1149
    }
1150

1151
    ret = pthread_cond_init(&cond->pcond, NULL);
1152
    if (ret != 0) {
1153
        pthread_mutex_destroy(&cond->pmutex);
1154
    }
1155

1156
    return -ret;
1157
}
1158

1159
void
1160
synccond_destroy(synccond_t *cond)
1161
{
1162
    pthread_cond_destroy(&cond->pcond);
1163
    pthread_mutex_destroy(&cond->pmutex);
1164
}
1165

1166
int
1167
synccond_timedwait(synccond_t *cond, synclock_t *lock, struct timespec *delta)
1168
{
1169
    struct timespec now;
1170
    struct synctask *task = NULL;
1171
    int ret;
1172

1173
    task = synctask_get();
1174

1175
    if (task == NULL) {
1176
        if (delta != NULL) {
1177
            timespec_now_realtime(&now);
1178
            timespec_adjust_delta(&now, *delta);
1179
        }
1180

1181
        pthread_mutex_lock(&cond->pmutex);
1182

1183
        if (delta == NULL) {
1184
            ret = -pthread_cond_wait(&cond->pcond, &cond->pmutex);
1185
        } else {
1186
            ret = -pthread_cond_timedwait(&cond->pcond, &cond->pmutex, &now);
1187
        }
1188
    } else {
1189
        pthread_mutex_lock(&cond->pmutex);
1190

1191
        list_add_tail(&task->waitq, &cond->waitq);
1192
        task->synccond = cond;
1193

1194
        ret = synclock_unlock(lock);
1195
        if (ret == 0) {
1196
            pthread_mutex_unlock(&cond->pmutex);
1197

1198
            synctask_yield(task, delta);
1199

1200
            ret = synclock_lock(lock);
1201
            if (ret == 0) {
1202
                ret = task->ret;
1203
            }
1204
            task->ret = 0;
1205

1206
            return ret;
1207
        }
1208

1209
        list_del_init(&task->waitq);
1210
    }
1211

1212
    pthread_mutex_unlock(&cond->pmutex);
1213

1214
    return ret;
1215
}
1216

1217
int
1218
synccond_wait(synccond_t *cond, synclock_t *lock)
1219
{
1220
    return synccond_timedwait(cond, lock, NULL);
1221
}
1222

1223
void
1224
synccond_signal(synccond_t *cond)
1225
{
1226
    struct synctask *task;
1227

1228
    pthread_mutex_lock(&cond->pmutex);
1229

1230
    if (!list_empty(&cond->waitq)) {
1231
        task = list_first_entry(&cond->waitq, struct synctask, waitq);
1232
        list_del_init(&task->waitq);
1233

1234
        pthread_mutex_unlock(&cond->pmutex);
1235

1236
        synctask_wake(task);
1237
    } else {
1238
        pthread_cond_signal(&cond->pcond);
1239

1240
        pthread_mutex_unlock(&cond->pmutex);
1241
    }
1242
}
1243

1244
void
1245
synccond_broadcast(synccond_t *cond)
1246
{
1247
    struct list_head list;
1248
    struct synctask *task;
1249

1250
    INIT_LIST_HEAD(&list);
1251

1252
    pthread_mutex_lock(&cond->pmutex);
1253

1254
    list_splice_init(&cond->waitq, &list);
1255
    pthread_cond_broadcast(&cond->pcond);
1256

1257
    pthread_mutex_unlock(&cond->pmutex);
1258

1259
    while (!list_empty(&list)) {
1260
        task = list_first_entry(&list, struct synctask, waitq);
1261
        list_del_init(&task->waitq);
1262

1263
        synctask_wake(task);
1264
    }
1265
}
1266

1267
/* Barriers */
1268

1269
int
1270
syncbarrier_init(struct syncbarrier *barrier)
1271
{
1272
    int ret = 0;
1273
    if (!barrier) {
1274
        errno = EINVAL;
1275
        return -1;
1276
    }
1277

1278
    ret = pthread_cond_init(&barrier->cond, 0);
1279
    if (ret) {
1280
        errno = ret;
1281
        return -1;
1282
    }
1283
    barrier->count = 0;
1284
    barrier->waitfor = 0;
1285
    INIT_LIST_HEAD(&barrier->waitq);
1286

1287
    ret = pthread_mutex_init(&barrier->guard, 0);
1288
    if (ret) {
1289
        (void)pthread_cond_destroy(&barrier->cond);
1290
        errno = ret;
1291
        return -1;
1292
    }
1293
    barrier->initialized = _gf_true;
1294
    return 0;
1295
}
1296

1297
int
1298
syncbarrier_destroy(struct syncbarrier *barrier)
1299
{
1300
    int ret = 0;
1301
    int ret1 = 0;
1302
    if (!barrier) {
1303
        errno = EINVAL;
1304
        return -1;
1305
    }
1306

1307
    if (barrier->initialized) {
1308
        ret = pthread_cond_destroy(&barrier->cond);
1309
        ret1 = pthread_mutex_destroy(&barrier->guard);
1310
        barrier->initialized = _gf_false;
1311
    }
1312
    if (ret || ret1) {
1313
        errno = ret ? ret : ret1;
1314
        return -1;
1315
    }
1316
    return 0;
1317
}
1318

1319
static int
1320
__syncbarrier_wait(struct syncbarrier *barrier, int waitfor)
1321
{
1322
    struct synctask *task = NULL;
1323

1324
    if (!barrier) {
1325
        errno = EINVAL;
1326
        return -1;
1327
    }
1328

1329
    task = synctask_get();
1330

1331
    while (barrier->count < waitfor) {
1332
        if (task) {
1333
            /* called within a synctask */
1334
            list_add_tail(&task->waitq, &barrier->waitq);
1335
            pthread_mutex_unlock(&barrier->guard);
1336
            synctask_yield(task, NULL);
1337
            pthread_mutex_lock(&barrier->guard);
1338
        } else {
1339
            /* called by a non-synctask */
1340
            pthread_cond_wait(&barrier->cond, &barrier->guard);
1341
        }
1342
    }
1343

1344
    barrier->count = 0;
1345

1346
    return 0;
1347
}
1348

1349
int
1350
syncbarrier_wait(struct syncbarrier *barrier, int waitfor)
1351
{
1352
    int ret = 0;
1353

1354
    pthread_mutex_lock(&barrier->guard);
1355
    {
1356
        ret = __syncbarrier_wait(barrier, waitfor);
1357
    }
1358
    pthread_mutex_unlock(&barrier->guard);
1359

1360
    return ret;
1361
}
1362

1363
static int
1364
__syncbarrier_wake(struct syncbarrier *barrier)
1365
{
1366
    struct synctask *task = NULL;
1367

1368
    if (!barrier) {
1369
        errno = EINVAL;
1370
        return -1;
1371
    }
1372

1373
    barrier->count++;
1374
    if (barrier->waitfor && (barrier->count < barrier->waitfor))
1375
        return 0;
1376

1377
    pthread_cond_signal(&barrier->cond);
1378
    if (!list_empty(&barrier->waitq)) {
1379
        task = list_entry(barrier->waitq.next, struct synctask, waitq);
1380
        list_del_init(&task->waitq);
1381
        synctask_wake(task);
1382
    }
1383
    barrier->waitfor = 0;
1384

1385
    return 0;
1386
}
1387

1388
int
1389
syncbarrier_wake(struct syncbarrier *barrier)
1390
{
1391
    int ret = 0;
1392

1393
    pthread_mutex_lock(&barrier->guard);
1394
    {
1395
        ret = __syncbarrier_wake(barrier);
1396
    }
1397
    pthread_mutex_unlock(&barrier->guard);
1398

1399
    return ret;
1400
}
1401

1402
/* FOPS */
1403

1404
int
1405
syncop_lookup_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
1406
                  int op_errno, inode_t *inode, struct iatt *iatt,
1407
                  dict_t *xdata, struct iatt *parent)
1408
{
1409
    struct syncargs *args = NULL;
1410

1411
    args = cookie;
1412

1413
    args->op_ret = op_ret;
1414
    args->op_errno = op_errno;
1415
    if (xdata)
1416
        args->xdata = dict_ref(xdata);
1417

1418
    if (op_ret == 0) {
1419
        args->iatt1 = *iatt;
1420
        args->iatt2 = *parent;
1421
    }
1422

1423
    __wake(args);
1424

1425
    return 0;
1426
}
1427

1428
int
1429
syncop_lookup(xlator_t *subvol, loc_t *loc, struct iatt *iatt,
1430
              struct iatt *parent, dict_t *xdata_in, dict_t **xdata_out)
1431
{
1432
    struct syncargs args = {
1433
        0,
1434
    };
1435

1436
    SYNCOP(subvol, (&args), syncop_lookup_cbk, subvol->fops->lookup, loc,
1437
           xdata_in);
1438

1439
    if (iatt)
1440
        *iatt = args.iatt1;
1441
    if (parent)
1442
        *parent = args.iatt2;
1443
    if (xdata_out)
1444
        *xdata_out = args.xdata;
1445
    else if (args.xdata)
1446
        dict_unref(args.xdata);
1447

1448
    if (args.op_ret < 0)
1449
        return -args.op_errno;
1450
    return args.op_ret;
1451
}
1452

1453
int32_t
1454
syncop_readdirp_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1455
                    int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,
1456
                    dict_t *xdata)
1457
{
1458
    struct syncargs *args = NULL;
1459
    gf_dirent_t *entry = NULL;
1460
    gf_dirent_t *tmp = NULL;
1461

1462
    int count = 0;
1463

1464
    args = cookie;
1465

1466
    INIT_LIST_HEAD(&args->entries.list);
1467

1468
    args->op_ret = op_ret;
1469
    args->op_errno = op_errno;
1470
    if (xdata)
1471
        args->xdata = dict_ref(xdata);
1472

1473
    if (op_ret >= 0) {
1474
        list_for_each_entry(entry, &entries->list, list)
1475
        {
1476
            tmp = entry_copy(entry);
1477
            if (!tmp) {
1478
                args->op_ret = -1;
1479
                args->op_errno = ENOMEM;
1480
                gf_dirent_free(&(args->entries));
1481
                break;
1482
            }
1483
            gf_msg_trace(this->name, 0,
1484
                         "adding entry=%s, "
1485
                         "count=%d",
1486
                         tmp->d_name, count);
1487
            list_add_tail(&tmp->list, &(args->entries.list));
1488
            count++;
1489
        }
1490
    }
1491

1492
    __wake(args);
1493

1494
    return 0;
1495
}
1496

1497
int
1498
syncop_readdirp(xlator_t *subvol, fd_t *fd, size_t size, off_t off,
1499
                gf_dirent_t *entries, dict_t *xdata_in, dict_t **xdata_out)
1500
{
1501
    struct syncargs args = {
1502
        0,
1503
    };
1504

1505
    SYNCOP(subvol, (&args), syncop_readdirp_cbk, subvol->fops->readdirp, fd,
1506
           size, off, xdata_in);
1507

1508
    if (entries)
1509
        list_splice_init(&args.entries.list, &entries->list);
1510
    else
1511
        gf_dirent_free(&args.entries);
1512

1513
    if (xdata_out)
1514
        *xdata_out = args.xdata;
1515
    else if (args.xdata)
1516
        dict_unref(args.xdata);
1517

1518
    if (args.op_ret < 0)
1519
        return -args.op_errno;
1520
    return args.op_ret;
1521
}
1522

1523
int32_t
1524
syncop_readdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1525
                   int32_t op_ret, int32_t op_errno, gf_dirent_t *entries,
1526
                   dict_t *xdata)
1527
{
1528
    struct syncargs *args = NULL;
1529
    gf_dirent_t *entry = NULL;
1530
    gf_dirent_t *tmp = NULL;
1531

1532
    int count = 0;
1533

1534
    args = cookie;
1535

1536
    INIT_LIST_HEAD(&args->entries.list);
1537

1538
    args->op_ret = op_ret;
1539
    args->op_errno = op_errno;
1540
    if (xdata)
1541
        args->xdata = dict_ref(xdata);
1542

1543
    if (op_ret >= 0) {
1544
        list_for_each_entry(entry, &entries->list, list)
1545
        {
1546
            tmp = entry_copy(entry);
1547
            if (!tmp) {
1548
                args->op_ret = -1;
1549
                args->op_errno = ENOMEM;
1550
                gf_dirent_free(&(args->entries));
1551
                break;
1552
            }
1553
            gf_msg_trace(this->name, 0,
1554
                         "adding "
1555
                         "entry=%s, count=%d",
1556
                         tmp->d_name, count);
1557
            list_add_tail(&tmp->list, &(args->entries.list));
1558
            count++;
1559
        }
1560
    }
1561

1562
    __wake(args);
1563

1564
    return 0;
1565
}
1566

1567
int
1568
syncop_readdir(xlator_t *subvol, fd_t *fd, size_t size, off_t off,
1569
               gf_dirent_t *entries, dict_t *xdata_in, dict_t **xdata_out)
1570
{
1571
    struct syncargs args = {
1572
        0,
1573
    };
1574

1575
    INIT_LIST_HEAD(&args.entries.list);
1576

1577
    SYNCOP(subvol, (&args), syncop_readdir_cbk, subvol->fops->readdir, fd, size,
1578
           off, xdata_in);
1579

1580
    if (entries)
1581
        list_splice_init(&args.entries.list, &entries->list);
1582
    else
1583
        gf_dirent_free(&args.entries);
1584

1585
    if (xdata_out)
1586
        *xdata_out = args.xdata;
1587
    else if (args.xdata)
1588
        dict_unref(args.xdata);
1589

1590
    if (args.op_ret < 0)
1591
        return -args.op_errno;
1592
    return args.op_ret;
1593
}
1594

1595
int32_t
1596
syncop_opendir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1597
                   int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
1598
{
1599
    struct syncargs *args = NULL;
1600

1601
    args = cookie;
1602

1603
    args->op_ret = op_ret;
1604
    args->op_errno = op_errno;
1605
    if (xdata)
1606
        args->xdata = dict_ref(xdata);
1607

1608
    __wake(args);
1609

1610
    return 0;
1611
}
1612

1613
int
1614
syncop_opendir(xlator_t *subvol, loc_t *loc, fd_t *fd, dict_t *xdata_in,
1615
               dict_t **xdata_out)
1616
{
1617
    struct syncargs args = {
1618
        0,
1619
    };
1620

1621
    SYNCOP(subvol, (&args), syncop_opendir_cbk, subvol->fops->opendir, loc, fd,
1622
           xdata_in);
1623

1624
    if (xdata_out)
1625
        *xdata_out = args.xdata;
1626
    else if (args.xdata)
1627
        dict_unref(args.xdata);
1628

1629
    if (args.op_ret < 0)
1630
        return -args.op_errno;
1631
    return args.op_ret;
1632
}
1633

1634
int
1635
syncop_fsyncdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1636
                    int op_ret, int op_errno, dict_t *xdata)
1637
{
1638
    struct syncargs *args = NULL;
1639

1640
    args = cookie;
1641

1642
    args->op_ret = op_ret;
1643
    args->op_errno = op_errno;
1644
    if (xdata)
1645
        args->xdata = dict_ref(xdata);
1646

1647
    __wake(args);
1648

1649
    return 0;
1650
}
1651

1652
int
1653
syncop_fsyncdir(xlator_t *subvol, fd_t *fd, int datasync, dict_t *xdata_in,
1654
                dict_t **xdata_out)
1655
{
1656
    struct syncargs args = {
1657
        0,
1658
    };
1659

1660
    SYNCOP(subvol, (&args), syncop_fsyncdir_cbk, subvol->fops->fsyncdir, fd,
1661
           datasync, xdata_in);
1662

1663
    if (xdata_out)
1664
        *xdata_out = args.xdata;
1665
    else if (args.xdata)
1666
        dict_unref(args.xdata);
1667

1668
    if (args.op_ret < 0)
1669
        return -args.op_errno;
1670
    return args.op_ret;
1671
}
1672

1673
int
1674
syncop_removexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1675
                       int op_ret, int op_errno, dict_t *xdata)
1676
{
1677
    struct syncargs *args = NULL;
1678

1679
    args = cookie;
1680

1681
    args->op_ret = op_ret;
1682
    args->op_errno = op_errno;
1683
    if (xdata)
1684
        args->xdata = dict_ref(xdata);
1685

1686
    __wake(args);
1687

1688
    return 0;
1689
}
1690

1691
int
1692
syncop_removexattr(xlator_t *subvol, loc_t *loc, const char *name,
1693
                   dict_t *xdata_in, dict_t **xdata_out)
1694
{
1695
    struct syncargs args = {
1696
        0,
1697
    };
1698

1699
    SYNCOP(subvol, (&args), syncop_removexattr_cbk, subvol->fops->removexattr,
1700
           loc, name, xdata_in);
1701

1702
    if (xdata_out)
1703
        *xdata_out = args.xdata;
1704
    else if (args.xdata)
1705
        dict_unref(args.xdata);
1706

1707
    if (args.op_ret < 0)
1708
        return -args.op_errno;
1709
    return args.op_ret;
1710
}
1711

1712
int
1713
syncop_fremovexattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1714
                        int op_ret, int op_errno, dict_t *xdata)
1715
{
1716
    struct syncargs *args = NULL;
1717

1718
    args = cookie;
1719

1720
    args->op_ret = op_ret;
1721
    args->op_errno = op_errno;
1722
    if (xdata)
1723
        args->xdata = dict_ref(xdata);
1724

1725
    __wake(args);
1726

1727
    return 0;
1728
}
1729

1730
int
1731
syncop_fremovexattr(xlator_t *subvol, fd_t *fd, const char *name,
1732
                    dict_t *xdata_in, dict_t **xdata_out)
1733
{
1734
    struct syncargs args = {
1735
        0,
1736
    };
1737

1738
    SYNCOP(subvol, (&args), syncop_fremovexattr_cbk, subvol->fops->fremovexattr,
1739
           fd, name, xdata_in);
1740

1741
    if (xdata_out)
1742
        *xdata_out = args.xdata;
1743
    else if (args.xdata)
1744
        dict_unref(args.xdata);
1745

1746
    if (args.op_ret < 0)
1747
        return -args.op_errno;
1748
    return args.op_ret;
1749
}
1750

1751
int
1752
syncop_setxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1753
                    int op_ret, int op_errno, dict_t *xdata)
1754
{
1755
    struct syncargs *args = NULL;
1756

1757
    args = cookie;
1758

1759
    args->op_ret = op_ret;
1760
    args->op_errno = op_errno;
1761
    if (xdata)
1762
        args->xdata = dict_ref(xdata);
1763

1764
    __wake(args);
1765

1766
    return 0;
1767
}
1768

1769
int
1770
syncop_setxattr(xlator_t *subvol, loc_t *loc, dict_t *dict, int32_t flags,
1771
                dict_t *xdata_in, dict_t **xdata_out)
1772
{
1773
    struct syncargs args = {
1774
        0,
1775
    };
1776

1777
    SYNCOP(subvol, (&args), syncop_setxattr_cbk, subvol->fops->setxattr, loc,
1778
           dict, flags, xdata_in);
1779

1780
    if (xdata_out)
1781
        *xdata_out = args.xdata;
1782
    else if (args.xdata)
1783
        dict_unref(args.xdata);
1784

1785
    if (args.op_ret < 0)
1786
        return -args.op_errno;
1787
    return args.op_ret;
1788
}
1789

1790
int
1791
syncop_fsetxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1792
                     int op_ret, int op_errno, dict_t *xdata)
1793
{
1794
    struct syncargs *args = NULL;
1795

1796
    args = cookie;
1797

1798
    args->op_ret = op_ret;
1799
    args->op_errno = op_errno;
1800
    if (xdata)
1801
        args->xdata = dict_ref(xdata);
1802

1803
    __wake(args);
1804

1805
    return 0;
1806
}
1807

1808
int
1809
syncop_fsetxattr(xlator_t *subvol, fd_t *fd, dict_t *dict, int32_t flags,
1810
                 dict_t *xdata_in, dict_t **xdata_out)
1811
{
1812
    struct syncargs args = {
1813
        0,
1814
    };
1815

1816
    SYNCOP(subvol, (&args), syncop_fsetxattr_cbk, subvol->fops->fsetxattr, fd,
1817
           dict, flags, xdata_in);
1818

1819
    if (xdata_out)
1820
        *xdata_out = args.xdata;
1821
    else if (args.xdata)
1822
        dict_unref(args.xdata);
1823

1824
    if (args.op_ret < 0)
1825
        return -args.op_errno;
1826
    return args.op_ret;
1827
}
1828

1829
int
1830
syncop_getxattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1831
                    int op_ret, int op_errno, dict_t *dict, dict_t *xdata)
1832
{
1833
    struct syncargs *args = NULL;
1834

1835
    args = cookie;
1836

1837
    args->op_ret = op_ret;
1838
    args->op_errno = op_errno;
1839
    if (xdata)
1840
        args->xdata = dict_ref(xdata);
1841

1842
    if (op_ret >= 0)
1843
        args->xattr = dict_ref(dict);
1844

1845
    __wake(args);
1846

1847
    return 0;
1848
}
1849

1850
int
1851
syncop_listxattr(xlator_t *subvol, loc_t *loc, dict_t **dict, dict_t *xdata_in,
1852
                 dict_t **xdata_out)
1853
{
1854
    struct syncargs args = {
1855
        0,
1856
    };
1857

1858
    SYNCOP(subvol, (&args), syncop_getxattr_cbk, subvol->fops->getxattr, loc,
1859
           NULL, xdata_in);
1860

1861
    if (dict)
1862
        *dict = args.xattr;
1863
    else if (args.xattr)
1864
        dict_unref(args.xattr);
1865

1866
    if (xdata_out)
1867
        *xdata_out = args.xdata;
1868
    else if (args.xdata)
1869
        dict_unref(args.xdata);
1870

1871
    if (args.op_ret < 0)
1872
        return -args.op_errno;
1873
    return args.op_ret;
1874
}
1875

1876
int
1877
syncop_getxattr(xlator_t *subvol, loc_t *loc, dict_t **dict, const char *key,
1878
                dict_t *xdata_in, dict_t **xdata_out)
1879
{
1880
    struct syncargs args = {
1881
        0,
1882
    };
1883

1884
    SYNCOP(subvol, (&args), syncop_getxattr_cbk, subvol->fops->getxattr, loc,
1885
           key, xdata_in);
1886

1887
    if (dict)
1888
        *dict = args.xattr;
1889
    else if (args.xattr)
1890
        dict_unref(args.xattr);
1891

1892
    if (xdata_out)
1893
        *xdata_out = args.xdata;
1894
    else if (args.xdata)
1895
        dict_unref(args.xdata);
1896

1897
    if (args.op_ret < 0)
1898
        return -args.op_errno;
1899
    return args.op_ret;
1900
}
1901

1902
int
1903
syncop_fgetxattr(xlator_t *subvol, fd_t *fd, dict_t **dict, const char *key,
1904
                 dict_t *xdata_in, dict_t **xdata_out)
1905
{
1906
    struct syncargs args = {
1907
        0,
1908
    };
1909

1910
    SYNCOP(subvol, (&args), syncop_getxattr_cbk, subvol->fops->fgetxattr, fd,
1911
           key, xdata_in);
1912

1913
    if (dict)
1914
        *dict = args.xattr;
1915
    else if (args.xattr)
1916
        dict_unref(args.xattr);
1917

1918
    if (xdata_out)
1919
        *xdata_out = args.xdata;
1920
    else if (args.xdata)
1921
        dict_unref(args.xdata);
1922

1923
    if (args.op_ret < 0)
1924
        return -args.op_errno;
1925
    return args.op_ret;
1926
}
1927

1928
int
1929
syncop_statfs_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1930
                  int32_t op_ret, int32_t op_errno, struct statvfs *buf,
1931
                  dict_t *xdata)
1932

1933
{
1934
    struct syncargs *args = NULL;
1935

1936
    args = cookie;
1937

1938
    args->op_ret = op_ret;
1939
    args->op_errno = op_errno;
1940
    if (xdata)
1941
        args->xdata = dict_ref(xdata);
1942

1943
    if (op_ret == 0) {
1944
        args->statvfs_buf = *buf;
1945
    }
1946

1947
    __wake(args);
1948

1949
    return 0;
1950
}
1951

1952
int
1953
syncop_statfs(xlator_t *subvol, loc_t *loc, struct statvfs *buf,
1954
              dict_t *xdata_in, dict_t **xdata_out)
1955

1956
{
1957
    struct syncargs args = {
1958
        0,
1959
    };
1960

1961
    SYNCOP(subvol, (&args), syncop_statfs_cbk, subvol->fops->statfs, loc,
1962
           xdata_in);
1963

1964
    if (buf)
1965
        *buf = args.statvfs_buf;
1966
    if (xdata_out)
1967
        *xdata_out = args.xdata;
1968
    else if (args.xdata)
1969
        dict_unref(args.xdata);
1970

1971
    if (args.op_ret < 0)
1972
        return -args.op_errno;
1973
    return args.op_ret;
1974
}
1975

1976
int
1977
syncop_setattr_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1978
                   int op_ret, int op_errno, struct iatt *preop,
1979
                   struct iatt *postop, dict_t *xdata)
1980
{
1981
    struct syncargs *args = NULL;
1982

1983
    args = cookie;
1984

1985
    args->op_ret = op_ret;
1986
    args->op_errno = op_errno;
1987
    if (xdata)
1988
        args->xdata = dict_ref(xdata);
1989

1990
    if (op_ret == 0) {
1991
        args->iatt1 = *preop;
1992
        args->iatt2 = *postop;
1993
    }
1994

1995
    __wake(args);
1996

1997
    return 0;
1998
}
1999

2000
int
2001
syncop_setattr(xlator_t *subvol, loc_t *loc, struct iatt *iatt, int valid,
2002
               struct iatt *preop, struct iatt *postop, dict_t *xdata_in,
2003
               dict_t **xdata_out)
2004
{
2005
    struct syncargs args = {
2006
        0,
2007
    };
2008

2009
    SYNCOP(subvol, (&args), syncop_setattr_cbk, subvol->fops->setattr, loc,
2010
           iatt, valid, xdata_in);
2011

2012
    if (preop)
2013
        *preop = args.iatt1;
2014
    if (postop)
2015
        *postop = args.iatt2;
2016

2017
    if (xdata_out)
2018
        *xdata_out = args.xdata;
2019
    else if (args.xdata)
2020
        dict_unref(args.xdata);
2021

2022
    if (args.op_ret < 0)
2023
        return -args.op_errno;
2024
    return args.op_ret;
2025
}
2026

2027
int
2028
syncop_fsetattr(xlator_t *subvol, fd_t *fd, struct iatt *iatt, int valid,
2029
                struct iatt *preop, struct iatt *postop, dict_t *xdata_in,
2030
                dict_t **xdata_out)
2031
{
2032
    struct syncargs args = {
2033
        0,
2034
    };
2035

2036
    SYNCOP(subvol, (&args), syncop_setattr_cbk, subvol->fops->fsetattr, fd,
2037
           iatt, valid, xdata_in);
2038

2039
    if (preop)
2040
        *preop = args.iatt1;
2041
    if (postop)
2042
        *postop = args.iatt2;
2043

2044
    if (xdata_out)
2045
        *xdata_out = args.xdata;
2046
    else if (args.xdata)
2047
        dict_unref(args.xdata);
2048

2049
    if (args.op_ret < 0)
2050
        return -args.op_errno;
2051
    return args.op_ret;
2052
}
2053

2054
int32_t
2055
syncop_open_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2056
                int32_t op_ret, int32_t op_errno, fd_t *fd, dict_t *xdata)
2057
{
2058
    struct syncargs *args = NULL;
2059

2060
    args = cookie;
2061

2062
    args->op_ret = op_ret;
2063
    args->op_errno = op_errno;
2064
    if (xdata)
2065
        args->xdata = dict_ref(xdata);
2066

2067
    __wake(args);
2068

2069
    return 0;
2070
}
2071

2072
int
2073
syncop_open(xlator_t *subvol, loc_t *loc, int32_t flags, fd_t *fd,
2074
            dict_t *xdata_in, dict_t **xdata_out)
2075
{
2076
    struct syncargs args = {
2077
        0,
2078
    };
2079

2080
    SYNCOP(subvol, (&args), syncop_open_cbk, subvol->fops->open, loc, flags, fd,
2081
           xdata_in);
2082

2083
    if (xdata_out)
2084
        *xdata_out = args.xdata;
2085
    else if (args.xdata)
2086
        dict_unref(args.xdata);
2087

2088
    if (args.op_ret < 0)
2089
        return -args.op_errno;
2090
    return args.op_ret;
2091
}
2092

2093
int32_t
2094
syncop_readv_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2095
                 int32_t op_ret, int32_t op_errno, struct iovec *vector,
2096
                 int32_t count, struct iatt *stbuf, struct iobref *iobref,
2097
                 dict_t *xdata)
2098
{
2099
    struct syncargs *args = NULL;
2100

2101
    args = cookie;
2102

2103
    INIT_LIST_HEAD(&args->entries.list);
2104

2105
    args->op_ret = op_ret;
2106
    args->op_errno = op_errno;
2107
    if (xdata)
2108
        args->xdata = dict_ref(xdata);
2109

2110
    if (args->op_ret >= 0) {
2111
        if (iobref)
2112
            args->iobref = iobref_ref(iobref);
2113
        args->vector = iov_dup(vector, count);
2114
        args->count = count;
2115
        args->iatt1 = *stbuf;
2116
    }
2117

2118
    __wake(args);
2119

2120
    return 0;
2121
}
2122

2123
int
2124
syncop_readv(xlator_t *subvol, fd_t *fd, size_t size, off_t off, uint32_t flags,
2125
             struct iovec **vector, int *count, struct iobref **iobref,
2126
             struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)
2127
{
2128
    struct syncargs args = {
2129
        0,
2130
    };
2131

2132
    SYNCOP(subvol, (&args), syncop_readv_cbk, subvol->fops->readv, fd, size,
2133
           off, flags, xdata_in);
2134

2135
    if (xdata_out)
2136
        *xdata_out = args.xdata;
2137
    else if (args.xdata)
2138
        dict_unref(args.xdata);
2139

2140
    if (iatt)
2141
        *iatt = args.iatt1;
2142

2143
    if (args.op_ret < 0)
2144
        goto out;
2145

2146
    if (vector)
2147
        *vector = args.vector;
2148
    else
2149
        GF_FREE(args.vector);
2150

2151
    if (count)
2152
        *count = args.count;
2153

2154
    /* Do we need a 'ref' here? */
2155
    if (iobref)
2156
        *iobref = args.iobref;
2157
    else if (args.iobref)
2158
        iobref_unref(args.iobref);
2159

2160
out:
2161
    if (args.op_ret < 0)
2162
        return -args.op_errno;
2163
    return args.op_ret;
2164
}
2165

2166
int
2167
syncop_writev_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
2168
                  int op_errno, struct iatt *prebuf, struct iatt *postbuf,
2169
                  dict_t *xdata)
2170
{
2171
    struct syncargs *args = NULL;
2172

2173
    args = cookie;
2174

2175
    args->op_ret = op_ret;
2176
    args->op_errno = op_errno;
2177
    if (xdata)
2178
        args->xdata = dict_ref(xdata);
2179

2180
    if (op_ret >= 0) {
2181
        args->iatt1 = *prebuf;
2182
        args->iatt2 = *postbuf;
2183
    }
2184

2185
    __wake(args);
2186

2187
    return 0;
2188
}
2189

2190
int
2191
syncop_writev(xlator_t *subvol, fd_t *fd, const struct iovec *vector,
2192
              int32_t count, off_t offset, struct iobref *iobref,
2193
              uint32_t flags, struct iatt *preiatt, struct iatt *postiatt,
2194
              dict_t *xdata_in, dict_t **xdata_out)
2195
{
2196
    struct syncargs args = {
2197
        0,
2198
    };
2199

2200
    SYNCOP(subvol, (&args), syncop_writev_cbk, subvol->fops->writev, fd,
2201
           (struct iovec *)vector, count, offset, flags, iobref, xdata_in);
2202

2203
    if (preiatt)
2204
        *preiatt = args.iatt1;
2205
    if (postiatt)
2206
        *postiatt = args.iatt2;
2207

2208
    if (xdata_out)
2209
        *xdata_out = args.xdata;
2210
    else if (args.xdata)
2211
        dict_unref(args.xdata);
2212

2213
    if (args.op_ret < 0)
2214
        return -args.op_errno;
2215
    return args.op_ret;
2216
}
2217

2218
int
2219
syncop_write(xlator_t *subvol, fd_t *fd, const char *buf, int size,
2220
             off_t offset, struct iobref *iobref, uint32_t flags,
2221
             dict_t *xdata_in, dict_t **xdata_out)
2222
{
2223
    struct syncargs args = {
2224
        0,
2225
    };
2226
    struct iovec vec = {
2227
        0,
2228
    };
2229

2230
    vec.iov_len = size;
2231
    vec.iov_base = (void *)buf;
2232

2233
    SYNCOP(subvol, (&args), syncop_writev_cbk, subvol->fops->writev, fd, &vec,
2234
           1, offset, flags, iobref, xdata_in);
2235

2236
    if (xdata_out)
2237
        *xdata_out = args.xdata;
2238
    else if (args.xdata)
2239
        dict_unref(args.xdata);
2240

2241
    if (args.op_ret < 0)
2242
        return -args.op_errno;
2243
    return args.op_ret;
2244
}
2245

2246
int
2247
syncop_close(fd_t *fd)
2248
{
2249
    if (fd)
2250
        fd_unref(fd);
2251
    return 0;
2252
}
2253

2254
int32_t
2255
syncop_create_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2256
                  int32_t op_ret, int32_t op_errno, fd_t *fd, inode_t *inode,
2257
                  struct iatt *buf, struct iatt *preparent,
2258
                  struct iatt *postparent, dict_t *xdata)
2259
{
2260
    struct syncargs *args = NULL;
2261

2262
    args = cookie;
2263

2264
    args->op_ret = op_ret;
2265
    args->op_errno = op_errno;
2266
    if (xdata)
2267
        args->xdata = dict_ref(xdata);
2268

2269
    if (buf)
2270
        args->iatt1 = *buf;
2271

2272
    __wake(args);
2273

2274
    return 0;
2275
}
2276

2277
int
2278
syncop_create(xlator_t *subvol, loc_t *loc, int32_t flags, mode_t mode,
2279
              fd_t *fd, struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)
2280
{
2281
    struct syncargs args = {
2282
        0,
2283
    };
2284

2285
    SYNCOP(subvol, (&args), syncop_create_cbk, subvol->fops->create, loc, flags,
2286
           mode, 0, fd, xdata_in);
2287

2288
    if (iatt)
2289
        *iatt = args.iatt1;
2290

2291
    if (xdata_out)
2292
        *xdata_out = args.xdata;
2293
    else if (args.xdata)
2294
        dict_unref(args.xdata);
2295

2296
    if (args.op_ret < 0)
2297
        return -args.op_errno;
2298
    return args.op_ret;
2299
}
2300

2301
int32_t
2302
syncop_put_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2303
               int32_t op_ret, int32_t op_errno, inode_t *inode,
2304
               struct iatt *buf, struct iatt *preparent,
2305
               struct iatt *postparent, dict_t *xdata)
2306
{
2307
    struct syncargs *args = NULL;
2308

2309
    args = cookie;
2310

2311
    args->op_ret = op_ret;
2312
    args->op_errno = op_errno;
2313
    if (xdata)
2314
        args->xdata = dict_ref(xdata);
2315

2316
    if (buf)
2317
        args->iatt1 = *buf;
2318

2319
    __wake(args);
2320

2321
    return 0;
2322
}
2323

2324
int
2325
syncop_put(xlator_t *subvol, loc_t *loc, mode_t mode, mode_t umask,
2326
           uint32_t flags, struct iovec *vector, int32_t count, off_t offset,
2327
           struct iobref *iobref, dict_t *xattr, struct iatt *iatt,
2328
           dict_t *xdata_in, dict_t **xdata_out)
2329
{
2330
    struct syncargs args = {
2331
        0,
2332
    };
2333

2334
    SYNCOP(subvol, (&args), syncop_put_cbk, subvol->fops->put, loc, mode, umask,
2335
           flags, (struct iovec *)vector, count, offset, iobref, xattr,
2336
           xdata_in);
2337

2338
    if (iatt)
2339
        *iatt = args.iatt1;
2340

2341
    if (xdata_out)
2342
        *xdata_out = args.xdata;
2343
    else if (args.xdata)
2344
        dict_unref(args.xdata);
2345

2346
    if (args.op_ret < 0)
2347
        return -args.op_errno;
2348
    return args.op_ret;
2349
}
2350

2351
int
2352
syncop_unlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
2353
                  int op_errno, struct iatt *preparent, struct iatt *postparent,
2354
                  dict_t *xdata)
2355
{
2356
    struct syncargs *args = NULL;
2357

2358
    args = cookie;
2359

2360
    args->op_ret = op_ret;
2361
    args->op_errno = op_errno;
2362
    if (xdata)
2363
        args->xdata = dict_ref(xdata);
2364

2365
    __wake(args);
2366

2367
    return 0;
2368
}
2369

2370
int
2371
syncop_unlink(xlator_t *subvol, loc_t *loc, dict_t *xdata_in,
2372
              dict_t **xdata_out)
2373
{
2374
    struct syncargs args = {
2375
        0,
2376
    };
2377

2378
    SYNCOP(subvol, (&args), syncop_unlink_cbk, subvol->fops->unlink, loc, 0,
2379
           xdata_in);
2380

2381
    if (xdata_out)
2382
        *xdata_out = args.xdata;
2383
    else if (args.xdata)
2384
        dict_unref(args.xdata);
2385

2386
    if (args.op_ret < 0)
2387
        return -args.op_errno;
2388
    return args.op_ret;
2389
}
2390

2391
int
2392
syncop_rmdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
2393
                 int op_errno, struct iatt *preparent, struct iatt *postparent,
2394
                 dict_t *xdata)
2395
{
2396
    struct syncargs *args = NULL;
2397

2398
    args = cookie;
2399

2400
    args->op_ret = op_ret;
2401
    args->op_errno = op_errno;
2402
    if (xdata)
2403
        args->xdata = dict_ref(xdata);
2404

2405
    __wake(args);
2406

2407
    return 0;
2408
}
2409

2410
int
2411
syncop_rmdir(xlator_t *subvol, loc_t *loc, int flags, dict_t *xdata_in,
2412
             dict_t **xdata_out)
2413
{
2414
    struct syncargs args = {
2415
        0,
2416
    };
2417

2418
    SYNCOP(subvol, (&args), syncop_rmdir_cbk, subvol->fops->rmdir, loc, flags,
2419
           xdata_in);
2420

2421
    if (xdata_out)
2422
        *xdata_out = args.xdata;
2423
    else if (args.xdata)
2424
        dict_unref(args.xdata);
2425

2426
    if (args.op_ret < 0)
2427
        return -args.op_errno;
2428
    return args.op_ret;
2429
}
2430

2431
int
2432
syncop_link_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2433
                int32_t op_ret, int32_t op_errno, inode_t *inode,
2434
                struct iatt *buf, struct iatt *preparent,
2435
                struct iatt *postparent, dict_t *xdata)
2436
{
2437
    struct syncargs *args = NULL;
2438

2439
    args = cookie;
2440

2441
    args->op_ret = op_ret;
2442
    args->op_errno = op_errno;
2443
    if (xdata)
2444
        args->xdata = dict_ref(xdata);
2445

2446
    if (buf)
2447
        args->iatt1 = *buf;
2448

2449
    __wake(args);
2450

2451
    return 0;
2452
}
2453

2454
int
2455
syncop_link(xlator_t *subvol, loc_t *oldloc, loc_t *newloc, struct iatt *iatt,
2456
            dict_t *xdata_in, dict_t **xdata_out)
2457
{
2458
    struct syncargs args = {
2459
        0,
2460
    };
2461

2462
    SYNCOP(subvol, (&args), syncop_link_cbk, subvol->fops->link, oldloc, newloc,
2463
           xdata_in);
2464

2465
    if (iatt)
2466
        *iatt = args.iatt1;
2467

2468
    if (xdata_out)
2469
        *xdata_out = args.xdata;
2470
    else if (args.xdata)
2471
        dict_unref(args.xdata);
2472

2473
    if (args.op_ret < 0)
2474
        return -args.op_errno;
2475

2476
    return args.op_ret;
2477
}
2478

2479
int
2480
syncop_rename_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2481
                  int32_t op_ret, int32_t op_errno, struct iatt *buf,
2482
                  struct iatt *preoldparent, struct iatt *postoldparent,
2483
                  struct iatt *prenewparent, struct iatt *postnewparent,
2484
                  dict_t *xdata)
2485
{
2486
    struct syncargs *args = NULL;
2487

2488
    args = cookie;
2489

2490
    args->op_ret = op_ret;
2491
    args->op_errno = op_errno;
2492
    if (xdata)
2493
        args->xdata = dict_ref(xdata);
2494

2495
    __wake(args);
2496

2497
    return 0;
2498
}
2499

2500
int
2501
syncop_rename(xlator_t *subvol, loc_t *oldloc, loc_t *newloc, dict_t *xdata_in,
2502
              dict_t **xdata_out)
2503
{
2504
    struct syncargs args = {
2505
        0,
2506
    };
2507

2508
    SYNCOP(subvol, (&args), syncop_rename_cbk, subvol->fops->rename, oldloc,
2509
           newloc, xdata_in);
2510

2511
    if (xdata_out)
2512
        *xdata_out = args.xdata;
2513
    else if (args.xdata)
2514
        dict_unref(args.xdata);
2515

2516
    if (args.op_ret < 0)
2517
        return -args.op_errno;
2518

2519
    return args.op_ret;
2520
}
2521

2522
int
2523
syncop_ftruncate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2524
                     int op_ret, int op_errno, struct iatt *prebuf,
2525
                     struct iatt *postbuf, dict_t *xdata)
2526
{
2527
    struct syncargs *args = NULL;
2528

2529
    args = cookie;
2530

2531
    args->op_ret = op_ret;
2532
    args->op_errno = op_errno;
2533
    if (xdata)
2534
        args->xdata = dict_ref(xdata);
2535

2536
    if (op_ret >= 0) {
2537
        args->iatt1 = *prebuf;
2538
        args->iatt2 = *postbuf;
2539
    }
2540

2541
    __wake(args);
2542

2543
    return 0;
2544
}
2545

2546
int
2547
syncop_ftruncate(xlator_t *subvol, fd_t *fd, off_t offset, struct iatt *preiatt,
2548
                 struct iatt *postiatt, dict_t *xdata_in, dict_t **xdata_out)
2549
{
2550
    struct syncargs args = {
2551
        0,
2552
    };
2553

2554
    SYNCOP(subvol, (&args), syncop_ftruncate_cbk, subvol->fops->ftruncate, fd,
2555
           offset, xdata_in);
2556

2557
    if (preiatt)
2558
        *preiatt = args.iatt1;
2559
    if (postiatt)
2560
        *postiatt = args.iatt2;
2561

2562
    if (xdata_out)
2563
        *xdata_out = args.xdata;
2564
    else if (args.xdata)
2565
        dict_unref(args.xdata);
2566

2567
    if (args.op_ret < 0)
2568
        return -args.op_errno;
2569
    return args.op_ret;
2570
}
2571

2572
int
2573
syncop_truncate(xlator_t *subvol, loc_t *loc, off_t offset, dict_t *xdata_in,
2574
                dict_t **xdata_out)
2575
{
2576
    struct syncargs args = {
2577
        0,
2578
    };
2579

2580
    SYNCOP(subvol, (&args), syncop_ftruncate_cbk, subvol->fops->truncate, loc,
2581
           offset, xdata_in);
2582

2583
    if (xdata_out)
2584
        *xdata_out = args.xdata;
2585
    else if (args.xdata)
2586
        dict_unref(args.xdata);
2587

2588
    if (args.op_ret < 0)
2589
        return -args.op_errno;
2590
    return args.op_ret;
2591
}
2592

2593
int
2594
syncop_fsync_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2595
                 int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
2596
                 struct iatt *postbuf, dict_t *xdata)
2597
{
2598
    struct syncargs *args = NULL;
2599

2600
    args = cookie;
2601

2602
    args->op_ret = op_ret;
2603
    args->op_errno = op_errno;
2604
    if (xdata)
2605
        args->xdata = dict_ref(xdata);
2606

2607
    if (op_ret >= 0) {
2608
        args->iatt1 = *prebuf;
2609
        args->iatt2 = *postbuf;
2610
    }
2611

2612
    __wake(args);
2613

2614
    return 0;
2615
}
2616

2617
int
2618
syncop_fsync(xlator_t *subvol, fd_t *fd, int dataonly, struct iatt *preiatt,
2619
             struct iatt *postiatt, dict_t *xdata_in, dict_t **xdata_out)
2620
{
2621
    struct syncargs args = {
2622
        0,
2623
    };
2624

2625
    SYNCOP(subvol, (&args), syncop_fsync_cbk, subvol->fops->fsync, fd, dataonly,
2626
           xdata_in);
2627

2628
    if (preiatt)
2629
        *preiatt = args.iatt1;
2630
    if (postiatt)
2631
        *postiatt = args.iatt2;
2632

2633
    if (xdata_out)
2634
        *xdata_out = args.xdata;
2635
    else if (args.xdata)
2636
        dict_unref(args.xdata);
2637

2638
    if (args.op_ret < 0)
2639
        return -args.op_errno;
2640
    return args.op_ret;
2641
}
2642

2643
int
2644
syncop_flush_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2645
                 int32_t op_ret, int32_t op_errno, dict_t *xdata)
2646
{
2647
    struct syncargs *args = NULL;
2648

2649
    args = cookie;
2650

2651
    args->op_ret = op_ret;
2652
    args->op_errno = op_errno;
2653
    if (xdata)
2654
        args->xdata = dict_ref(xdata);
2655

2656
    __wake(args);
2657

2658
    return 0;
2659
}
2660

2661
int
2662
syncop_flush(xlator_t *subvol, fd_t *fd, dict_t *xdata_in, dict_t **xdata_out)
2663
{
2664
    struct syncargs args = {0};
2665

2666
    SYNCOP(subvol, (&args), syncop_flush_cbk, subvol->fops->flush, fd,
2667
           xdata_in);
2668

2669
    if (xdata_out)
2670
        *xdata_out = args.xdata;
2671
    else if (args.xdata)
2672
        dict_unref(args.xdata);
2673

2674
    if (args.op_ret < 0)
2675
        return -args.op_errno;
2676
    return args.op_ret;
2677
}
2678

2679
int
2680
syncop_fstat_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2681
                 int32_t op_ret, int32_t op_errno, struct iatt *stbuf,
2682
                 dict_t *xdata)
2683
{
2684
    struct syncargs *args = NULL;
2685

2686
    args = cookie;
2687

2688
    args->op_ret = op_ret;
2689
    args->op_errno = op_errno;
2690
    if (xdata)
2691
        args->xdata = dict_ref(xdata);
2692

2693
    if (op_ret == 0)
2694
        args->iatt1 = *stbuf;
2695

2696
    __wake(args);
2697

2698
    return 0;
2699
}
2700

2701
int
2702
syncop_fstat(xlator_t *subvol, fd_t *fd, struct iatt *stbuf, dict_t *xdata_in,
2703
             dict_t **xdata_out)
2704
{
2705
    struct syncargs args = {
2706
        0,
2707
    };
2708

2709
    SYNCOP(subvol, (&args), syncop_fstat_cbk, subvol->fops->fstat, fd,
2710
           xdata_in);
2711

2712
    if (stbuf)
2713
        *stbuf = args.iatt1;
2714

2715
    if (xdata_out)
2716
        *xdata_out = args.xdata;
2717
    else if (args.xdata)
2718
        dict_unref(args.xdata);
2719

2720
    if (args.op_ret < 0)
2721
        return -args.op_errno;
2722
    return args.op_ret;
2723
}
2724

2725
int
2726
syncop_stat(xlator_t *subvol, loc_t *loc, struct iatt *stbuf, dict_t *xdata_in,
2727
            dict_t **xdata_out)
2728
{
2729
    struct syncargs args = {
2730
        0,
2731
    };
2732

2733
    SYNCOP(subvol, (&args), syncop_fstat_cbk, subvol->fops->stat, loc,
2734
           xdata_in);
2735

2736
    if (stbuf)
2737
        *stbuf = args.iatt1;
2738

2739
    if (xdata_out)
2740
        *xdata_out = args.xdata;
2741
    else if (args.xdata)
2742
        dict_unref(args.xdata);
2743

2744
    if (args.op_ret < 0)
2745
        return -args.op_errno;
2746
    return args.op_ret;
2747
}
2748

2749
int32_t
2750
syncop_symlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2751
                   int32_t op_ret, int32_t op_errno, inode_t *inode,
2752
                   struct iatt *buf, struct iatt *preparent,
2753
                   struct iatt *postparent, dict_t *xdata)
2754
{
2755
    struct syncargs *args = NULL;
2756

2757
    args = cookie;
2758

2759
    args->op_ret = op_ret;
2760
    args->op_errno = op_errno;
2761
    if (xdata)
2762
        args->xdata = dict_ref(xdata);
2763

2764
    if (buf)
2765
        args->iatt1 = *buf;
2766

2767
    __wake(args);
2768

2769
    return 0;
2770
}
2771

2772
int
2773
syncop_symlink(xlator_t *subvol, loc_t *loc, const char *newpath,
2774
               struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)
2775
{
2776
    struct syncargs args = {
2777
        0,
2778
    };
2779

2780
    SYNCOP(subvol, (&args), syncop_symlink_cbk, subvol->fops->symlink, newpath,
2781
           loc, 0, xdata_in);
2782

2783
    if (iatt)
2784
        *iatt = args.iatt1;
2785

2786
    if (xdata_out)
2787
        *xdata_out = args.xdata;
2788
    else if (args.xdata)
2789
        dict_unref(args.xdata);
2790

2791
    if (args.op_ret < 0)
2792
        return -args.op_errno;
2793
    return args.op_ret;
2794
}
2795

2796
int
2797
syncop_readlink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2798
                    int op_ret, int op_errno, const char *path,
2799
                    struct iatt *stbuf, dict_t *xdata)
2800
{
2801
    struct syncargs *args = NULL;
2802

2803
    args = cookie;
2804

2805
    args->op_ret = op_ret;
2806
    args->op_errno = op_errno;
2807
    if (xdata)
2808
        args->xdata = dict_ref(xdata);
2809

2810
    if ((op_ret != -1) && path)
2811
        args->buffer = gf_strdup(path);
2812

2813
    __wake(args);
2814

2815
    return 0;
2816
}
2817

2818
int
2819
syncop_readlink(xlator_t *subvol, loc_t *loc, char **buffer, size_t size,
2820
                dict_t *xdata_in, dict_t **xdata_out)
2821
{
2822
    struct syncargs args = {
2823
        0,
2824
    };
2825

2826
    SYNCOP(subvol, (&args), syncop_readlink_cbk, subvol->fops->readlink, loc,
2827
           size, xdata_in);
2828

2829
    if (buffer)
2830
        *buffer = args.buffer;
2831
    else
2832
        GF_FREE(args.buffer);
2833

2834
    if (xdata_out)
2835
        *xdata_out = args.xdata;
2836
    else if (args.xdata)
2837
        dict_unref(args.xdata);
2838

2839
    if (args.op_ret < 0)
2840
        return -args.op_errno;
2841
    return args.op_ret;
2842
}
2843

2844
int
2845
syncop_mknod_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2846
                 int32_t op_ret, int32_t op_errno, inode_t *inode,
2847
                 struct iatt *buf, struct iatt *preparent,
2848
                 struct iatt *postparent, dict_t *xdata)
2849
{
2850
    struct syncargs *args = NULL;
2851

2852
    args = cookie;
2853

2854
    args->op_ret = op_ret;
2855
    args->op_errno = op_errno;
2856
    if (xdata)
2857
        args->xdata = dict_ref(xdata);
2858

2859
    if (buf)
2860
        args->iatt1 = *buf;
2861

2862
    __wake(args);
2863

2864
    return 0;
2865
}
2866

2867
int
2868
syncop_mknod(xlator_t *subvol, loc_t *loc, mode_t mode, dev_t rdev,
2869
             struct iatt *iatt, dict_t *xdata_in, dict_t **xdata_out)
2870
{
2871
    struct syncargs args = {
2872
        0,
2873
    };
2874

2875
    SYNCOP(subvol, (&args), syncop_mknod_cbk, subvol->fops->mknod, loc, mode,
2876
           rdev, 0, xdata_in);
2877

2878
    if (iatt)
2879
        *iatt = args.iatt1;
2880

2881
    if (xdata_out)
2882
        *xdata_out = args.xdata;
2883
    else if (args.xdata)
2884
        dict_unref(args.xdata);
2885

2886
    if (args.op_ret < 0)
2887
        return -args.op_errno;
2888
    return args.op_ret;
2889
}
2890

2891
int
2892
syncop_mkdir_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2893
                 int32_t op_ret, int32_t op_errno, inode_t *inode,
2894
                 struct iatt *buf, struct iatt *preparent,
2895
                 struct iatt *postparent, dict_t *xdata)
2896
{
2897
    struct syncargs *args = NULL;
2898

2899
    args = cookie;
2900

2901
    args->op_ret = op_ret;
2902
    args->op_errno = op_errno;
2903
    if (xdata)
2904
        args->xdata = dict_ref(xdata);
2905

2906
    if (buf)
2907
        args->iatt1 = *buf;
2908

2909
    __wake(args);
2910

2911
    return 0;
2912
}
2913

2914
int
2915
syncop_mkdir(xlator_t *subvol, loc_t *loc, mode_t mode, struct iatt *iatt,
2916
             dict_t *xdata_in, dict_t **xdata_out)
2917
{
2918
    struct syncargs args = {
2919
        0,
2920
    };
2921

2922
    SYNCOP(subvol, (&args), syncop_mkdir_cbk, subvol->fops->mkdir, loc, mode, 0,
2923
           xdata_in);
2924

2925
    if (iatt)
2926
        *iatt = args.iatt1;
2927

2928
    if (xdata_out)
2929
        *xdata_out = args.xdata;
2930
    else if (args.xdata)
2931
        dict_unref(args.xdata);
2932

2933
    if (args.op_ret < 0)
2934
        return -args.op_errno;
2935
    return args.op_ret;
2936
}
2937

2938
int
2939
syncop_access_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2940
                  int32_t op_ret, int32_t op_errno, dict_t *xdata)
2941
{
2942
    struct syncargs *args = NULL;
2943

2944
    args = cookie;
2945

2946
    args->op_ret = op_ret;
2947
    args->op_errno = op_errno;
2948
    if (xdata)
2949
        args->xdata = dict_ref(xdata);
2950

2951
    __wake(args);
2952

2953
    return 0;
2954
}
2955

2956
/* posix_acl xlator will respond in different ways for access calls from
2957
   fuse and access calls from nfs. For fuse, checking op_ret is sufficient
2958
   to check whether the access call is successful or not. But for nfs the
2959
   mode of the access that is permitted is put into op_errno before unwind.
2960
   With syncop, the caller of syncop_access will not be able to get the
2961
   mode of the access despite call being successul (since syncop_access
2962
   returns only the op_ret collected in args).
2963
   Now, if access call is failed, then args.op_ret is returned to recognise
2964
   the failure. But if op_ret is zero, then the mode of access which is
2965
   set in args.op_errno is returned. Thus the caller of syncop_access
2966
   has to check whether the return value is less than zero or not. If the
2967
   return value it got is less than zero, then access call is failed.
2968
   If it is not, then the access call is successful and the value the caller
2969
   got is the mode of the access.
2970
*/
2971
int
2972
syncop_access(xlator_t *subvol, loc_t *loc, int32_t mask, dict_t *xdata_in,
2973
              dict_t **xdata_out)
2974
{
2975
    struct syncargs args = {
2976
        0,
2977
    };
2978

2979
    SYNCOP(subvol, (&args), syncop_access_cbk, subvol->fops->access, loc, mask,
2980
           xdata_in);
2981

2982
    if (xdata_out)
2983
        *xdata_out = args.xdata;
2984
    else if (args.xdata)
2985
        dict_unref(args.xdata);
2986

2987
    if (args.op_ret < 0)
2988
        return -args.op_errno;
2989
    return args.op_errno;
2990
}
2991

2992
int
2993
syncop_fallocate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2994
                     int op_ret, int op_errno, struct iatt *prebuf,
2995
                     struct iatt *postbuf, dict_t *xdata)
2996
{
2997
    struct syncargs *args = NULL;
2998

2999
    args = cookie;
3000

3001
    args->op_ret = op_ret;
3002
    args->op_errno = op_errno;
3003
    if (xdata)
3004
        args->xdata = dict_ref(xdata);
3005

3006
    __wake(args);
3007

3008
    return 0;
3009
}
3010

3011
int
3012
syncop_fallocate(xlator_t *subvol, fd_t *fd, int32_t keep_size, off_t offset,
3013
                 size_t len, dict_t *xdata_in, dict_t **xdata_out)
3014
{
3015
    struct syncargs args = {
3016
        0,
3017
    };
3018

3019
    SYNCOP(subvol, (&args), syncop_fallocate_cbk, subvol->fops->fallocate, fd,
3020
           keep_size, offset, len, xdata_in);
3021

3022
    if (xdata_out)
3023
        *xdata_out = args.xdata;
3024
    else if (args.xdata)
3025
        dict_unref(args.xdata);
3026

3027
    if (args.op_ret < 0)
3028
        return -args.op_errno;
3029
    return args.op_ret;
3030
}
3031

3032
int
3033
syncop_discard_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3034
                   int op_ret, int op_errno, struct iatt *prebuf,
3035
                   struct iatt *postbuf, dict_t *xdata)
3036
{
3037
    struct syncargs *args = NULL;
3038

3039
    args = cookie;
3040

3041
    args->op_ret = op_ret;
3042
    args->op_errno = op_errno;
3043
    if (xdata)
3044
        args->xdata = dict_ref(xdata);
3045

3046
    __wake(args);
3047

3048
    return 0;
3049
}
3050

3051
int
3052
syncop_discard(xlator_t *subvol, fd_t *fd, off_t offset, size_t len,
3053
               dict_t *xdata_in, dict_t **xdata_out)
3054
{
3055
    struct syncargs args = {
3056
        0,
3057
    };
3058

3059
    SYNCOP(subvol, (&args), syncop_discard_cbk, subvol->fops->discard, fd,
3060
           offset, len, xdata_in);
3061

3062
    if (xdata_out)
3063
        *xdata_out = args.xdata;
3064
    else if (args.xdata)
3065
        dict_unref(args.xdata);
3066

3067
    if (args.op_ret < 0)
3068
        return -args.op_errno;
3069
    return args.op_ret;
3070
}
3071

3072
int
3073
syncop_zerofill_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3074
                    int op_ret, int op_errno, struct iatt *prebuf,
3075
                    struct iatt *postbuf, dict_t *xdata)
3076
{
3077
    struct syncargs *args = NULL;
3078

3079
    args = cookie;
3080

3081
    args->op_ret = op_ret;
3082
    args->op_errno = op_errno;
3083
    if (xdata)
3084
        args->xdata = dict_ref(xdata);
3085

3086
    __wake(args);
3087

3088
    return 0;
3089
}
3090

3091
int
3092
syncop_zerofill(xlator_t *subvol, fd_t *fd, off_t offset, off_t len,
3093
                dict_t *xdata_in, dict_t **xdata_out)
3094
{
3095
    struct syncargs args = {
3096
        0,
3097
    };
3098

3099
    SYNCOP(subvol, (&args), syncop_zerofill_cbk, subvol->fops->zerofill, fd,
3100
           offset, len, xdata_in);
3101

3102
    if (xdata_out)
3103
        *xdata_out = args.xdata;
3104
    else if (args.xdata)
3105
        dict_unref(args.xdata);
3106

3107
    if (args.op_ret < 0)
3108
        return -args.op_errno;
3109
    return args.op_ret;
3110
}
3111

3112
int
3113
syncop_ipc_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
3114
               int op_errno, dict_t *xdata)
3115
{
3116
    struct syncargs *args = NULL;
3117

3118
    args = cookie;
3119

3120
    args->op_ret = op_ret;
3121
    args->op_errno = op_errno;
3122
    if (xdata)
3123
        args->xdata = dict_ref(xdata);
3124

3125
    __wake(args);
3126

3127
    return 0;
3128
}
3129

3130
int
3131
syncop_ipc(xlator_t *subvol, int32_t op, dict_t *xdata_in, dict_t **xdata_out)
3132
{
3133
    struct syncargs args = {
3134
        0,
3135
    };
3136

3137
    SYNCOP(subvol, (&args), syncop_ipc_cbk, subvol->fops->ipc, op, xdata_in);
3138

3139
    if (args.xdata) {
3140
        if (xdata_out) {
3141
            /*
3142
             * We're passing this reference to the caller, along
3143
             * with the pointer itself.  That means they're
3144
             * responsible for calling dict_unref at some point.
3145
             */
3146
            *xdata_out = args.xdata;
3147
        } else {
3148
            dict_unref(args.xdata);
3149
        }
3150
    }
3151

3152
    if (args.op_ret < 0)
3153
        return -args.op_errno;
3154
    return args.op_ret;
3155
}
3156

3157
int
3158
syncop_seek_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
3159
                int op_errno, off_t offset, dict_t *xdata)
3160
{
3161
    struct syncargs *args = NULL;
3162

3163
    args = cookie;
3164

3165
    args->op_ret = op_ret;
3166
    args->op_errno = op_errno;
3167
    args->offset = offset;
3168
    if (xdata)
3169
        args->xdata = dict_ref(xdata);
3170

3171
    __wake(args);
3172

3173
    return 0;
3174
}
3175

3176
int
3177
syncop_seek(xlator_t *subvol, fd_t *fd, off_t offset, gf_seek_what_t what,
3178
            dict_t *xdata_in, off_t *off)
3179
{
3180
    struct syncargs args = {
3181
        0,
3182
    };
3183

3184
    SYNCOP(subvol, (&args), syncop_seek_cbk, subvol->fops->seek, fd, offset,
3185
           what, xdata_in);
3186

3187
    if (args.op_ret < 0) {
3188
        return -args.op_errno;
3189
    } else {
3190
        if (off)
3191
            *off = args.offset;
3192
        return args.op_ret;
3193
    }
3194
}
3195

3196
int
3197
syncop_lease_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
3198
                 int op_errno, struct gf_lease *lease, dict_t *xdata)
3199
{
3200
    struct syncargs *args = NULL;
3201

3202
    args = cookie;
3203

3204
    args->op_ret = op_ret;
3205
    args->op_errno = op_errno;
3206
    if (xdata)
3207
        args->xdata = dict_ref(xdata);
3208
    if (lease)
3209
        args->lease = *lease;
3210

3211
    __wake(args);
3212

3213
    return 0;
3214
}
3215

3216
int
3217
syncop_lease(xlator_t *subvol, loc_t *loc, struct gf_lease *lease,
3218
             dict_t *xdata_in, dict_t **xdata_out)
3219
{
3220
    struct syncargs args = {
3221
        0,
3222
    };
3223

3224
    SYNCOP(subvol, (&args), syncop_lease_cbk, subvol->fops->lease, loc, lease,
3225
           xdata_in);
3226

3227
    *lease = args.lease;
3228

3229
    if (args.xdata) {
3230
        if (xdata_out) {
3231
            /*
3232
             * We're passing this reference to the caller, along
3233
             * with the pointer itself.  That means they're
3234
             * responsible for calling dict_unref at some point.
3235
             */
3236
            *xdata_out = args.xdata;
3237
        } else {
3238
            dict_unref(args.xdata);
3239
        }
3240
    }
3241

3242
    if (args.op_ret < 0)
3243
        return -args.op_errno;
3244
    return args.op_ret;
3245
}
3246

3247
int
3248
syncop_lk_cbk(call_frame_t *frame, void *cookie, xlator_t *this, int op_ret,
3249
              int op_errno, struct gf_flock *flock, dict_t *xdata)
3250
{
3251
    struct syncargs *args = NULL;
3252

3253
    args = cookie;
3254

3255
    args->op_ret = op_ret;
3256
    args->op_errno = op_errno;
3257
    if (xdata)
3258
        args->xdata = dict_ref(xdata);
3259

3260
    if (flock)
3261
        gf_flock_copy(&args->flock, flock);
3262
    __wake(args);
3263

3264
    return 0;
3265
}
3266

3267
int
3268
syncop_lk(xlator_t *subvol, fd_t *fd, int cmd, struct gf_flock *flock,
3269
          dict_t *xdata_in, dict_t **xdata_out)
3270
{
3271
    struct syncargs args = {
3272
        0,
3273
    };
3274

3275
    SYNCOP(subvol, (&args), syncop_lk_cbk, subvol->fops->lk, fd, cmd, flock,
3276
           xdata_in);
3277

3278
    gf_flock_copy(flock, &args.flock);
3279

3280
    if (xdata_out)
3281
        *xdata_out = args.xdata;
3282
    else if (args.xdata)
3283
        dict_unref(args.xdata);
3284

3285
    if (args.op_ret < 0)
3286
        return -args.op_errno;
3287
    return args.op_ret;
3288
}
3289

3290
int32_t
3291
syncop_inodelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3292
                   int32_t op_ret, int32_t op_errno, dict_t *xdata)
3293
{
3294
    struct syncargs *args = NULL;
3295

3296
    args = cookie;
3297

3298
    args->op_ret = op_ret;
3299
    args->op_errno = op_errno;
3300
    if (xdata)
3301
        args->xdata = dict_ref(xdata);
3302

3303
    __wake(args);
3304

3305
    return 0;
3306
}
3307

3308
int
3309
syncop_inodelk(xlator_t *subvol, const char *volume, loc_t *loc, int32_t cmd,
3310
               struct gf_flock *lock, dict_t *xdata_in, dict_t **xdata_out)
3311
{
3312
    struct syncargs args = {
3313
        0,
3314
    };
3315

3316
    SYNCOP(subvol, (&args), syncop_inodelk_cbk, subvol->fops->inodelk, volume,
3317
           loc, cmd, lock, xdata_in);
3318

3319
    if (xdata_out)
3320
        *xdata_out = args.xdata;
3321
    else if (args.xdata)
3322
        dict_unref(args.xdata);
3323

3324
    if (args.op_ret < 0)
3325
        return -args.op_errno;
3326

3327
    return args.op_ret;
3328
}
3329

3330
int32_t
3331
syncop_entrylk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3332
                   int32_t op_ret, int32_t op_errno, dict_t *xdata)
3333
{
3334
    struct syncargs *args = NULL;
3335

3336
    args = cookie;
3337
    args->op_ret = op_ret;
3338
    args->op_errno = op_errno;
3339
    if (xdata)
3340
        args->xdata = dict_ref(xdata);
3341

3342
    __wake(args);
3343
    return 0;
3344
}
3345

3346
int
3347
syncop_entrylk(xlator_t *subvol, const char *volume, loc_t *loc,
3348
               const char *basename, entrylk_cmd cmd, entrylk_type type,
3349
               dict_t *xdata_in, dict_t **xdata_out)
3350
{
3351
    struct syncargs args = {
3352
        0,
3353
    };
3354

3355
    SYNCOP(subvol, (&args), syncop_entrylk_cbk, subvol->fops->entrylk, volume,
3356
           loc, basename, cmd, type, xdata_in);
3357

3358
    if (xdata_out)
3359
        *xdata_out = args.xdata;
3360
    else if (args.xdata)
3361
        dict_unref(args.xdata);
3362

3363
    if (args.op_ret < 0)
3364
        return -args.op_errno;
3365

3366
    return args.op_ret;
3367
}
3368

3369
int32_t
3370
syncop_xattrop_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3371
                   int32_t op_ret, int32_t op_errno, dict_t *dict,
3372
                   dict_t *xdata)
3373
{
3374
    struct syncargs *args = NULL;
3375

3376
    args = cookie;
3377

3378
    args->op_ret = op_ret;
3379
    args->op_errno = op_errno;
3380
    if (xdata)
3381
        args->xdata = dict_ref(xdata);
3382
    if (dict)
3383
        args->dict_out = dict_ref(dict);
3384

3385
    __wake(args);
3386

3387
    return 0;
3388
}
3389

3390
int
3391
syncop_xattrop(xlator_t *subvol, loc_t *loc, gf_xattrop_flags_t flags,
3392
               dict_t *dict, dict_t *xdata_in, dict_t **dict_out,
3393
               dict_t **xdata_out)
3394
{
3395
    struct syncargs args = {
3396
        0,
3397
    };
3398

3399
    SYNCOP(subvol, (&args), syncop_xattrop_cbk, subvol->fops->xattrop, loc,
3400
           flags, dict, xdata_in);
3401

3402
    if (xdata_out)
3403
        *xdata_out = args.xdata;
3404
    else if (args.xdata)
3405
        dict_unref(args.xdata);
3406

3407
    if (dict_out)
3408
        *dict_out = args.dict_out;
3409
    else if (args.dict_out)
3410
        dict_unref(args.dict_out);
3411

3412
    if (args.op_ret < 0)
3413
        return -args.op_errno;
3414

3415
    return args.op_ret;
3416
}
3417

3418
int
3419
syncop_fxattrop(xlator_t *subvol, fd_t *fd, gf_xattrop_flags_t flags,
3420
                dict_t *dict, dict_t *xdata_in, dict_t **dict_out,
3421
                dict_t **xdata_out)
3422
{
3423
    struct syncargs args = {
3424
        0,
3425
    };
3426

3427
    SYNCOP(subvol, (&args), syncop_xattrop_cbk, subvol->fops->fxattrop, fd,
3428
           flags, dict, xdata_in);
3429

3430
    if (xdata_out)
3431
        *xdata_out = args.xdata;
3432
    else if (args.xdata)
3433
        dict_unref(args.xdata);
3434

3435
    if (dict_out)
3436
        *dict_out = args.dict_out;
3437
    else if (args.dict_out)
3438
        dict_unref(args.dict_out);
3439

3440
    if (args.op_ret < 0)
3441
        return -args.op_errno;
3442

3443
    return args.op_ret;
3444
}
3445

3446
int32_t
3447
syncop_getactivelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3448
                       int32_t op_ret, int32_t op_errno,
3449
                       lock_migration_info_t *locklist, dict_t *xdata)
3450
{
3451
    struct syncargs *args = NULL;
3452
    lock_migration_info_t *tmp = NULL;
3453
    lock_migration_info_t *entry = NULL;
3454

3455
    args = cookie;
3456

3457
    INIT_LIST_HEAD(&args->locklist.list);
3458

3459
    args->op_ret = op_ret;
3460
    args->op_errno = op_errno;
3461
    if (xdata)
3462
        args->xdata = dict_ref(xdata);
3463

3464
    if (op_ret > 0) {
3465
        list_for_each_entry(tmp, &locklist->list, list)
3466
        {
3467
            /* TODO: move to GF_MALLOC() */
3468
            entry = GF_CALLOC(1, sizeof(lock_migration_info_t),
3469
                              gf_common_mt_char);
3470

3471
            if (!entry) {
3472
                gf_msg(THIS->name, GF_LOG_ERROR, 0, 0,
3473
                       "lock mem allocation  failed");
3474
                gf_free_mig_locks(&args->locklist);
3475

3476
                break;
3477
            }
3478

3479
            INIT_LIST_HEAD(&entry->list);
3480

3481
            gf_flock_copy(&entry->flock, &tmp->flock);
3482

3483
            entry->lk_flags = tmp->lk_flags;
3484

3485
            entry->client_uid = gf_strdup(tmp->client_uid);
3486

3487
            list_add_tail(&entry->list, &args->locklist.list);
3488
        }
3489
    }
3490

3491
    __wake(args);
3492

3493
    return 0;
3494
}
3495

3496
int
3497
syncop_getactivelk(xlator_t *subvol, loc_t *loc,
3498
                   lock_migration_info_t *locklist, dict_t *xdata_in,
3499
                   dict_t **xdata_out)
3500
{
3501
    struct syncargs args = {
3502
        0,
3503
    };
3504

3505
    INIT_LIST_HEAD(&args.locklist.list);
3506
    SYNCOP(subvol, (&args), syncop_getactivelk_cbk, subvol->fops->getactivelk,
3507
           loc, xdata_in);
3508

3509
    if (locklist)
3510
        list_splice_init(&args.locklist.list, &locklist->list);
3511
    else
3512
        gf_free_mig_locks(&args.locklist);
3513

3514
    if (xdata_out)
3515
        *xdata_out = args.xdata;
3516
    else if (args.xdata)
3517
        dict_unref(args.xdata);
3518

3519
    if (args.op_ret < 0)
3520
        return -args.op_errno;
3521

3522
    return args.op_ret;
3523
}
3524

3525
int
3526
syncop_setactivelk_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3527
                       int32_t op_ret, int32_t op_errno, dict_t *xdata)
3528
{
3529
    struct syncargs *args = NULL;
3530

3531
    args = cookie;
3532

3533
    args->op_ret = op_ret;
3534
    args->op_errno = op_errno;
3535

3536
    if (xdata)
3537
        args->xdata = dict_ref(xdata);
3538

3539
    __wake(args);
3540

3541
    return 0;
3542
}
3543

3544
int
3545
syncop_setactivelk(xlator_t *subvol, loc_t *loc,
3546
                   lock_migration_info_t *locklist, dict_t *xdata_in,
3547
                   dict_t **xdata_out)
3548
{
3549
    struct syncargs args = {
3550
        0,
3551
    };
3552

3553
    SYNCOP(subvol, (&args), syncop_setactivelk_cbk, subvol->fops->setactivelk,
3554
           loc, locklist, xdata_in);
3555

3556
    if (xdata_out)
3557
        *xdata_out = args.xdata;
3558
    else if (args.xdata)
3559
        dict_unref(args.xdata);
3560

3561
    if (args.op_ret < 0)
3562
        return -args.op_errno;
3563

3564
    return args.op_ret;
3565
}
3566

3567
int
3568
syncop_icreate_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3569
                   int32_t op_ret, int32_t op_errno, inode_t *inode,
3570
                   struct iatt *buf, dict_t *xdata)
3571
{
3572
    struct syncargs *args = NULL;
3573

3574
    args = cookie;
3575

3576
    args->op_ret = op_ret;
3577
    args->op_errno = op_errno;
3578
    if (xdata)
3579
        args->xdata = dict_ref(xdata);
3580

3581
    if (buf)
3582
        args->iatt1 = *buf;
3583

3584
    __wake(args);
3585

3586
    return 0;
3587
}
3588

3589
int
3590
syncop_namelink_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3591
                    int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
3592
                    struct iatt *postbuf, dict_t *xdata)
3593
{
3594
    struct syncargs *args = NULL;
3595

3596
    args = cookie;
3597

3598
    args->op_ret = op_ret;
3599
    args->op_errno = op_errno;
3600

3601
    if (xdata)
3602
        args->xdata = dict_ref(xdata);
3603

3604
    __wake(args);
3605

3606
    return 0;
3607
}
3608

3609
int
3610
syncop_copy_file_range(xlator_t *subvol, fd_t *fd_in, off64_t off_in,
3611
                       fd_t *fd_out, off64_t off_out, size_t len,
3612
                       uint32_t flags, struct iatt *stbuf,
3613
                       struct iatt *preiatt_dst, struct iatt *postiatt_dst,
3614
                       dict_t *xdata_in, dict_t **xdata_out)
3615
{
3616
    struct syncargs args = {
3617
        0,
3618
    };
3619

3620
    SYNCOP(subvol, (&args), syncop_copy_file_range_cbk,
3621
           subvol->fops->copy_file_range, fd_in, off_in, fd_out, off_out, len,
3622
           flags, xdata_in);
3623

3624
    if (stbuf) {
3625
        *stbuf = args.iatt1;
3626
    }
3627
    if (preiatt_dst) {
3628
        *preiatt_dst = args.iatt2;
3629
    }
3630
    if (postiatt_dst) {
3631
        *postiatt_dst = args.iatt3;
3632
    }
3633

3634
    if (xdata_out) {
3635
        *xdata_out = args.xdata;
3636
    } else if (args.xdata) {
3637
        dict_unref(args.xdata);
3638
    }
3639

3640
    errno = args.op_errno;
3641
    return args.op_ret;
3642
}
3643

3644
int
3645
syncop_copy_file_range_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3646
                           int op_ret, int op_errno, struct iatt *stbuf,
3647
                           struct iatt *prebuf_dst, struct iatt *postbuf_dst,
3648
                           dict_t *xdata)
3649
{
3650
    struct syncargs *args = NULL;
3651

3652
    args = cookie;
3653

3654
    args->op_ret = op_ret;
3655
    args->op_errno = op_errno;
3656
    if (xdata)
3657
        args->xdata = dict_ref(xdata);
3658

3659
    if (op_ret >= 0) {
3660
        args->iatt1 = *stbuf;
3661
        args->iatt2 = *prebuf_dst;
3662
        args->iatt3 = *postbuf_dst;
3663
    }
3664

3665
    __wake(args);
3666

3667
    return 0;
3668
}
3669

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.