glusterfs

Форк
0
/
glfs-fops.c 
7621 строка · 168.1 Кб
1
/*
2
  Copyright (c) 2012-2018 Red Hat, Inc. <http://www.redhat.com>
3
  This file is part of GlusterFS.
4

5
  This file is licensed to you under your choice of the GNU Lesser
6
  General Public License, version 3 or any later version (LGPLv3 or
7
  later), or the GNU General Public License, version 2 (GPLv2), in all
8
  cases as published by the Free Software Foundation.
9
*/
10

11
/* for SEEK_HOLE and SEEK_DATA */
12
#ifndef _GNU_SOURCE
13
#define _GNU_SOURCE
14
#endif
15

16
#include <unistd.h>
17

18
#include "glfs-internal.h"
19
#include "glfs-mem-types.h"
20
#include "glfs.h"
21
#include "gfapi-messages.h"
22
#include <glusterfs/compat-errno.h>
23
#include <glusterfs/common-utils.h>
24
#include <limits.h>
25
#include "glusterfs3.h"
26

27
#ifdef NAME_MAX
28
#define GF_NAME_MAX NAME_MAX
29
#else
30
#define GF_NAME_MAX 255
31
#endif
32

33
struct upcall_syncop_args {
34
    struct glfs *fs;
35
    struct gf_upcall upcall_data;
36
};
37

38
#define READDIRBUF_SIZE (sizeof(struct dirent) + GF_NAME_MAX + 1)
39

40
typedef void (*glfs_io_cbk34)(glfs_fd_t *fd, ssize_t ret, void *data);
41

42
/*
43
 * This function will mark glfd for deletion and decrement its refcount.
44
 */
45
int
46
glfs_mark_glfd_for_deletion(struct glfs_fd *glfd)
47
{
48
    LOCK(&glfd->lock);
49
    {
50
        glfd->state = GLFD_CLOSE;
51
    }
52
    UNLOCK(&glfd->lock);
53

54
    GF_REF_PUT(glfd);
55

56
    return 0;
57
}
58

59
/* This function is useful for all async fops. There is chance that glfd is
60
 * closed before async fop is completed. When glfd is closed we change the
61
 * state to GLFD_CLOSE.
62
 *
63
 * This function will return _gf_true if the glfd is still valid else return
64
 * _gf_false.
65
 */
66
gf_boolean_t
67
glfs_is_glfd_still_valid(struct glfs_fd *glfd)
68
{
69
    gf_boolean_t ret = _gf_false;
70

71
    LOCK(&glfd->lock);
72
    {
73
        if (glfd->state != GLFD_CLOSE)
74
            ret = _gf_true;
75
    }
76
    UNLOCK(&glfd->lock);
77

78
    return ret;
79
}
80

81
void
82
glfd_set_state_bind(struct glfs_fd *glfd)
83
{
84
    LOCK(&glfd->lock);
85
    {
86
        glfd->state = GLFD_OPEN;
87
    }
88
    UNLOCK(&glfd->lock);
89

90
    fd_bind(glfd->fd);
91
    glfs_fd_bind(glfd);
92

93
    return;
94
}
95

96
/*
97
 * This routine is called when an upcall event of type
98
 * 'GF_UPCALL_CACHE_INVALIDATION' is received.
99
 * It makes a copy of the contents of the upcall cache-invalidation
100
 * data received into an entry which is stored in the upcall list
101
 * maintained by gfapi.
102
 */
103
int
104
glfs_get_upcall_cache_invalidation(struct gf_upcall *to_up_data,
105
                                   struct gf_upcall *from_up_data)
106
{
107
    struct gf_upcall_cache_invalidation *ca_data = NULL;
108
    struct gf_upcall_cache_invalidation *f_ca_data = NULL;
109
    int ret = -1;
110

111
    GF_VALIDATE_OR_GOTO(THIS->name, to_up_data, out);
112
    GF_VALIDATE_OR_GOTO(THIS->name, from_up_data, out);
113

114
    f_ca_data = from_up_data->data;
115
    GF_VALIDATE_OR_GOTO(THIS->name, f_ca_data, out);
116

117
    ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
118

119
    if (!ca_data) {
120
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
121
                NULL);
122
        goto out;
123
    }
124

125
    to_up_data->data = ca_data;
126

127
    ca_data->flags = f_ca_data->flags;
128
    ca_data->expire_time_attr = f_ca_data->expire_time_attr;
129
    ca_data->stat = f_ca_data->stat;
130
    ca_data->p_stat = f_ca_data->p_stat;
131
    ca_data->oldp_stat = f_ca_data->oldp_stat;
132

133
    ret = 0;
134
out:
135
    return ret;
136
}
137

138
int
139
glfs_get_upcall_lease(struct gf_upcall *to_up_data,
140
                      struct gf_upcall *from_up_data)
141
{
142
    struct gf_upcall_recall_lease *ca_data = NULL;
143
    struct gf_upcall_recall_lease *f_ca_data = NULL;
144
    int ret = -1;
145

146
    GF_VALIDATE_OR_GOTO(THIS->name, to_up_data, out);
147
    GF_VALIDATE_OR_GOTO(THIS->name, from_up_data, out);
148

149
    f_ca_data = from_up_data->data;
150
    GF_VALIDATE_OR_GOTO(THIS->name, f_ca_data, out);
151

152
    ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
153

154
    if (!ca_data) {
155
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
156
                NULL);
157
        goto out;
158
    }
159

160
    to_up_data->data = ca_data;
161

162
    ca_data->lease_type = f_ca_data->lease_type;
163
    gf_uuid_copy(ca_data->tid, f_ca_data->tid);
164
    ca_data->dict = f_ca_data->dict;
165

166
    ret = 0;
167
out:
168
    return ret;
169
}
170
int
171
glfs_loc_link(loc_t *loc, struct iatt *iatt)
172
{
173
    int ret = -1;
174
    inode_t *old_inode = NULL;
175
    uint64_t ctx_value = LOOKUP_NOT_NEEDED;
176

177
    if (!loc->inode) {
178
        errno = EINVAL;
179
        return -1;
180
    }
181

182
    old_inode = loc->inode;
183

184
    /* If the inode already exists in the cache, the inode
185
     * returned here points to the existing one. We need
186
     * to update loc.inode accordingly.
187
     */
188
    loc->inode = inode_link(loc->inode, loc->parent, loc->name, iatt);
189
    if (loc->inode) {
190
        inode_ctx_set(loc->inode, THIS, &ctx_value);
191
        inode_lookup(loc->inode);
192
        inode_unref(old_inode);
193
        ret = 0;
194
    } else {
195
        ret = -1;
196
    }
197

198
    return ret;
199
}
200

201
void
202
glfs_iatt_to_stat(struct glfs *fs, struct iatt *iatt, struct stat *stat)
203
{
204
    iatt_to_stat(iatt, stat);
205
    stat->st_dev = fs->dev_id;
206
}
207

208
void
209
glfs_iatt_to_statx(struct glfs *fs, const struct iatt *iatt,
210
                   struct glfs_stat *statx)
211
{
212
    statx->glfs_st_mask = 0;
213

214
    statx->glfs_st_mode = 0;
215
    if (IATT_TYPE_VALID(iatt->ia_flags)) {
216
        statx->glfs_st_mode |= st_mode_type_from_ia(iatt->ia_type);
217
        statx->glfs_st_mask |= GLFS_STAT_TYPE;
218
    }
219

220
    if (IATT_MODE_VALID(iatt->ia_flags)) {
221
        statx->glfs_st_mode |= st_mode_prot_from_ia(iatt->ia_prot);
222
        statx->glfs_st_mask |= GLFS_STAT_MODE;
223
    }
224

225
    if (IATT_NLINK_VALID(iatt->ia_flags)) {
226
        statx->glfs_st_nlink = iatt->ia_nlink;
227
        statx->glfs_st_mask |= GLFS_STAT_NLINK;
228
    }
229

230
    if (IATT_UID_VALID(iatt->ia_flags)) {
231
        statx->glfs_st_uid = iatt->ia_uid;
232
        statx->glfs_st_mask |= GLFS_STAT_UID;
233
    }
234

235
    if (IATT_GID_VALID(iatt->ia_flags)) {
236
        statx->glfs_st_gid = iatt->ia_gid;
237
        statx->glfs_st_mask |= GLFS_STAT_GID;
238
    }
239

240
    if (IATT_ATIME_VALID(iatt->ia_flags)) {
241
        statx->glfs_st_atime.tv_sec = iatt->ia_atime;
242
        statx->glfs_st_atime.tv_nsec = iatt->ia_atime_nsec;
243
        statx->glfs_st_mask |= GLFS_STAT_ATIME;
244
    }
245

246
    if (IATT_MTIME_VALID(iatt->ia_flags)) {
247
        statx->glfs_st_mtime.tv_sec = iatt->ia_mtime;
248
        statx->glfs_st_mtime.tv_nsec = iatt->ia_mtime_nsec;
249
        statx->glfs_st_mask |= GLFS_STAT_MTIME;
250
    }
251

252
    if (IATT_CTIME_VALID(iatt->ia_flags)) {
253
        statx->glfs_st_ctime.tv_sec = iatt->ia_ctime;
254
        statx->glfs_st_ctime.tv_nsec = iatt->ia_ctime_nsec;
255
        statx->glfs_st_mask |= GLFS_STAT_CTIME;
256
    }
257

258
    if (IATT_BTIME_VALID(iatt->ia_flags)) {
259
        statx->glfs_st_btime.tv_sec = iatt->ia_btime;
260
        statx->glfs_st_btime.tv_nsec = iatt->ia_btime_nsec;
261
        statx->glfs_st_mask |= GLFS_STAT_BTIME;
262
    }
263

264
    if (IATT_INO_VALID(iatt->ia_flags)) {
265
        statx->glfs_st_ino = iatt->ia_ino;
266
        statx->glfs_st_mask |= GLFS_STAT_INO;
267
    }
268

269
    if (IATT_SIZE_VALID(iatt->ia_flags)) {
270
        statx->glfs_st_size = iatt->ia_size;
271
        statx->glfs_st_mask |= GLFS_STAT_SIZE;
272
    }
273

274
    if (IATT_BLOCKS_VALID(iatt->ia_flags)) {
275
        statx->glfs_st_blocks = iatt->ia_blocks;
276
        statx->glfs_st_mask |= GLFS_STAT_BLOCKS;
277
    }
278

279
    /* unconditionally present, encode as is */
280
    statx->glfs_st_blksize = iatt->ia_blksize;
281
    statx->glfs_st_rdev_major = ia_major(iatt->ia_rdev);
282
    statx->glfs_st_rdev_minor = ia_minor(iatt->ia_rdev);
283
    statx->glfs_st_dev_major = ia_major(fs->dev_id);
284
    statx->glfs_st_dev_minor = ia_minor(fs->dev_id);
285

286
    /* At present we do not read any localFS attributes and pass them along,
287
     * so setting this to 0. As we start supporting file attributes we can
288
     * populate the same here as well */
289
    statx->glfs_st_attributes = 0;
290
    statx->glfs_st_attributes_mask = 0;
291
}
292

293
GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_iatt_from_statx, 6.0)
294
void
295
priv_glfs_iatt_from_statx(struct iatt *iatt, const struct glfs_stat *statx)
296
{
297
    /* Most code in xlators are not checking validity flags before accessing
298
    the items. Hence zero everything before setting valid items */
299
    memset(iatt, 0, sizeof(struct iatt));
300

301
    if (GLFS_STAT_TYPE_VALID(statx->glfs_st_mask)) {
302
        iatt->ia_type = ia_type_from_st_mode(statx->glfs_st_mode);
303
        iatt->ia_flags |= IATT_TYPE;
304
    }
305

306
    if (GLFS_STAT_MODE_VALID(statx->glfs_st_mask)) {
307
        iatt->ia_prot = ia_prot_from_st_mode(statx->glfs_st_mode);
308
        iatt->ia_flags |= IATT_MODE;
309
    }
310

311
    if (GLFS_STAT_NLINK_VALID(statx->glfs_st_mask)) {
312
        iatt->ia_nlink = statx->glfs_st_nlink;
313
        iatt->ia_flags |= IATT_NLINK;
314
    }
315

316
    if (GLFS_STAT_UID_VALID(statx->glfs_st_mask)) {
317
        iatt->ia_uid = statx->glfs_st_uid;
318
        iatt->ia_flags |= IATT_UID;
319
    }
320

321
    if (GLFS_STAT_GID_VALID(statx->glfs_st_mask)) {
322
        iatt->ia_gid = statx->glfs_st_gid;
323
        iatt->ia_flags |= IATT_GID;
324
    }
325

326
    if (GLFS_STAT_ATIME_VALID(statx->glfs_st_mask)) {
327
        iatt->ia_atime = statx->glfs_st_atime.tv_sec;
328
        iatt->ia_atime_nsec = statx->glfs_st_atime.tv_nsec;
329
        iatt->ia_flags |= IATT_ATIME;
330
    }
331

332
    if (GLFS_STAT_MTIME_VALID(statx->glfs_st_mask)) {
333
        iatt->ia_mtime = statx->glfs_st_mtime.tv_sec;
334
        iatt->ia_mtime_nsec = statx->glfs_st_mtime.tv_nsec;
335
        iatt->ia_flags |= IATT_MTIME;
336
    }
337

338
    if (GLFS_STAT_CTIME_VALID(statx->glfs_st_mask)) {
339
        iatt->ia_ctime = statx->glfs_st_ctime.tv_sec;
340
        iatt->ia_ctime_nsec = statx->glfs_st_ctime.tv_nsec;
341
        iatt->ia_flags |= IATT_CTIME;
342
    }
343

344
    if (GLFS_STAT_BTIME_VALID(statx->glfs_st_mask)) {
345
        iatt->ia_btime = statx->glfs_st_btime.tv_sec;
346
        iatt->ia_btime_nsec = statx->glfs_st_btime.tv_nsec;
347
        iatt->ia_flags |= IATT_BTIME;
348
    }
349

350
    if (GLFS_STAT_INO_VALID(statx->glfs_st_mask)) {
351
        iatt->ia_ino = statx->glfs_st_ino;
352
        iatt->ia_flags |= IATT_INO;
353
    }
354

355
    if (GLFS_STAT_SIZE_VALID(statx->glfs_st_mask)) {
356
        iatt->ia_size = statx->glfs_st_size;
357
        iatt->ia_flags |= IATT_SIZE;
358
    }
359

360
    if (GLFS_STAT_BLOCKS_VALID(statx->glfs_st_mask)) {
361
        iatt->ia_blocks = statx->glfs_st_blocks;
362
        iatt->ia_flags |= IATT_BLOCKS;
363
    }
364

365
    /* unconditionally present, encode as is */
366
    iatt->ia_blksize = statx->glfs_st_blksize;
367
    iatt->ia_rdev = makedev(statx->glfs_st_rdev_major,
368
                            statx->glfs_st_rdev_minor);
369
    iatt->ia_dev = makedev(statx->glfs_st_dev_major, statx->glfs_st_dev_minor);
370
    iatt->ia_attributes = statx->glfs_st_attributes;
371
    iatt->ia_attributes_mask = statx->glfs_st_attributes_mask;
372
}
373

374
void
375
glfsflags_from_gfapiflags(struct glfs_stat *stat, int *glvalid)
376
{
377
    *glvalid = 0;
378
    if (stat->glfs_st_mask & GLFS_STAT_MODE) {
379
        *glvalid |= GF_SET_ATTR_MODE;
380
    }
381

382
    if (stat->glfs_st_mask & GLFS_STAT_SIZE) {
383
        *glvalid |= GF_SET_ATTR_SIZE;
384
    }
385

386
    if (stat->glfs_st_mask & GLFS_STAT_UID) {
387
        *glvalid |= GF_SET_ATTR_UID;
388
    }
389

390
    if (stat->glfs_st_mask & GLFS_STAT_GID) {
391
        *glvalid |= GF_SET_ATTR_GID;
392
    }
393

394
    if (stat->glfs_st_mask & GLFS_STAT_ATIME) {
395
        *glvalid |= GF_SET_ATTR_ATIME;
396
    }
397

398
    if (stat->glfs_st_mask & GLFS_STAT_MTIME) {
399
        *glvalid |= GF_SET_ATTR_MTIME;
400
    }
401
}
402

403
int
404
glfs_loc_unlink(loc_t *loc)
405
{
406
    inode_unlink(loc->inode, loc->parent, loc->name);
407

408
    /* since glfs_h_* objects hold a reference to inode
409
     * it is safe to keep lookup count to '0' */
410
    if (!inode_has_dentry(loc->inode))
411
        inode_forget(loc->inode, 0);
412

413
    return 0;
414
}
415

416
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_open, 3.4.0)
417
struct glfs_fd *
418
pub_glfs_open(struct glfs *fs, const char *path, int flags)
419
{
420
    int ret = -1;
421
    struct glfs_fd *glfd = NULL;
422
    xlator_t *subvol = NULL;
423
    loc_t loc = {
424
        0,
425
    };
426
    struct iatt iatt = {
427
        0,
428
    };
429
    int reval = 0;
430
    dict_t *fop_attr = NULL;
431

432
    DECLARE_OLD_THIS;
433
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
434

435
    subvol = glfs_active_subvol(fs);
436
    if (!subvol) {
437
        ret = -1;
438
        errno = EIO;
439
        goto out;
440
    }
441

442
    glfd = glfs_fd_new(fs);
443
    if (!glfd)
444
        goto out;
445

446
retry:
447
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
448

449
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
450

451
    if (ret)
452
        goto out;
453

454
    ret = validate_open_flags(flags, iatt.ia_type);
455
    if (ret)
456
        goto out;
457

458
    if (glfd->fd) {
459
        /* Retry. Safe to touch glfd->fd as we
460
           still have not glfs_fd_bind() yet.
461
        */
462
        fd_unref(glfd->fd);
463
        glfd->fd = NULL;
464
    }
465

466
    glfd->fd = fd_create(loc.inode, getpid());
467
    if (!glfd->fd) {
468
        ret = -1;
469
        errno = ENOMEM;
470
        goto out;
471
    }
472
    glfd->fd->flags = flags;
473

474
    ret = get_fop_attr_thrd_key(&fop_attr);
475
    if (ret)
476
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
477

478
    if (IA_ISDIR(iatt.ia_type))
479
        ret = syncop_opendir(subvol, &loc, glfd->fd, NULL, NULL);
480
    else
481
        ret = syncop_open(subvol, &loc, flags, glfd->fd, fop_attr, NULL);
482

483
    DECODE_SYNCOP_ERR(ret);
484

485
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
486
out:
487
    loc_wipe(&loc);
488

489
    if (fop_attr)
490
        dict_unref(fop_attr);
491

492
    if (ret && glfd) {
493
        GF_REF_PUT(glfd);
494
        glfd = NULL;
495
    } else if (glfd) {
496
        glfd_set_state_bind(glfd);
497
    }
498

499
    glfs_subvol_done(fs, subvol);
500

501
    __GLFS_EXIT_FS;
502

503
invalid_fs:
504
    return glfd;
505
}
506

507
static void
508
cleanup_fopat_args(struct glfs_fd *pglfd, xlator_t *subvol, int ret, loc_t *loc)
509
{
510
    if (loc)
511
        loc_wipe(loc);
512

513
    if (subvol)
514
        glfs_subvol_done(pglfd->fs, subvol);
515

516
    GF_REF_PUT(pglfd);
517
}
518

519
static xlator_t *
520
setup_fopat_args(struct glfs_fd *pglfd, const char *path, gf_boolean_t follow,
521
                 loc_t *loc, struct iatt *iatt, int reval)
522
{
523
    int ret = 0;
524
    xlator_t *subvol = NULL;
525

526
    GF_REF_GET(pglfd);
527

528
    subvol = glfs_active_subvol(pglfd->fs);
529
    if (!subvol) {
530
        ret = -1;
531
        errno = EIO;
532
        goto out;
533
    }
534

535
    glfs_lock(pglfd->fs, _gf_true);
536
    {
537
        ret = glfs_resolve_at(pglfd->fs, subvol, pglfd->fd->inode, path, loc,
538
                              iatt, follow, reval);
539
    }
540
    glfs_unlock(pglfd->fs);
541

542
    if (ret < 0) {
543
        goto out;
544
    }
545

546
    ret = 0;
547
out:
548
    if (ret < 0 && errno != ENOENT) {
549
        cleanup_fopat_args(pglfd, subvol, ret, loc);
550
        subvol = NULL;
551
    }
552

553
    return subvol;
554
}
555

556
static int
557
setup_entry_fopat_args(uuid_t gfid, dict_t **xattr_req, loc_t *loc)
558
{
559
    int ret = 0;
560

561
    if (loc->inode) {
562
        errno = EEXIST;
563
        ret = -1;
564
        goto out;
565
    }
566

567
    /* errno from setup_fopat_args */
568
    if (errno != ENOENT)
569
        /* Any other type of error is fatal */
570
        goto out;
571

572
    /* errno == ENOENT */
573
    if (!loc->parent)
574
        /* The parent directory or an ancestor even
575
           higher does not exist
576
        */
577
        goto out;
578

579
    loc->inode = inode_new(loc->parent->table);
580
    if (!loc->inode) {
581
        ret = -1;
582
        errno = ENOMEM;
583
        goto out;
584
    }
585

586
    *xattr_req = dict_new();
587
    if (!*xattr_req) {
588
        ret = -1;
589
        errno = ENOMEM;
590
        goto out;
591
    }
592

593
    gf_uuid_generate(gfid);
594
    ret = dict_set_gfuuid(*xattr_req, "gfid-req", gfid, true);
595
    if (ret) {
596
        ret = -1;
597
        errno = ENOMEM;
598
        goto out;
599
    }
600

601
    ret = 0;
602

603
out:
604
    if (ret) {
605
        if (*xattr_req)
606
            dict_unref(*xattr_req);
607
    }
608

609
    return ret;
610
}
611

612
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_openat, 11.0)
613
struct glfs_fd *
614
pub_glfs_openat(struct glfs_fd *pglfd, const char *path, int flags, mode_t mode)
615
{
616
    int ret = -1;
617
    struct glfs_fd *glfd = NULL;
618
    xlator_t *subvol = NULL;
619
    loc_t loc = {
620
        0,
621
    };
622
    dict_t *fop_attr = NULL;
623
    struct iatt iatt = {0};
624
    uuid_t gfid;
625
    gf_boolean_t is_create = 0;
626

627
    DECLARE_OLD_THIS;
628
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
629

630
    is_create = !!(flags & O_CREAT);
631
    subvol = setup_fopat_args(pglfd, path, !(flags & O_NOFOLLOW), &loc, &iatt,
632
                              0);
633
    if (!subvol) {
634
        goto out;
635
    }
636

637
    if (is_create && !loc.inode) {
638
        ret = setup_entry_fopat_args(gfid, &fop_attr, &loc);
639
        if (ret) {
640
            goto out;
641
        }
642
    }
643

644
    /* Error is ENOENT but O_CREAT flag is not set */
645
    if (!loc.inode) {
646
        errno = ENOENT;
647
        ret = -1;
648
        goto out;
649
    }
650

651
    glfd = glfs_fd_new(pglfd->fs);
652
    if (!glfd) {
653
        ret = -1;
654
        goto out;
655
    }
656

657
    glfd->fd = fd_create(loc.inode, getpid());
658
    if (!glfd->fd) {
659
        ret = -1;
660
        errno = ENOMEM;
661
        goto out;
662
    }
663
    glfd->fd->flags = flags;
664

665
    ret = get_fop_attr_thrd_key(&fop_attr);
666
    if (ret)
667
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
668

669
    if (!is_create) {
670
        if (IA_ISDIR(iatt.ia_type))
671
            ret = syncop_opendir(subvol, &loc, glfd->fd, NULL, NULL);
672
        else
673
            ret = syncop_open(subvol, &loc, flags, glfd->fd, fop_attr, NULL);
674
    } else
675
        ret = syncop_create(subvol, &loc, flags, mode, glfd->fd, &iatt,
676
                            fop_attr, NULL);
677

678
    DECODE_SYNCOP_ERR(ret);
679

680
    if (is_create && ret == 0)
681
        ret = glfs_loc_link(&loc, &iatt);
682

683
    /* Because it is openat(), no ESTALE expected */
684
out:
685
    if (ret && glfd) {
686
        GF_REF_PUT(glfd);
687
        glfd = NULL;
688
    } else if (glfd) {
689
        glfd_set_state_bind(glfd);
690
    }
691

692
    if (fop_attr)
693
        dict_unref(fop_attr);
694

695
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
696

697
    __GLFS_EXIT_FS;
698

699
invalid_fs:
700
    return glfd;
701
}
702

703
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_close, 3.4.0)
704
int
705
pub_glfs_close(struct glfs_fd *glfd)
706
{
707
    xlator_t *subvol = NULL;
708
    int ret = -1;
709
    fd_t *fd = NULL;
710
    struct glfs *fs = NULL;
711
    dict_t *fop_attr = NULL;
712

713
    DECLARE_OLD_THIS;
714
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
715

716
    gf_dirent_free(list_entry(&glfd->entries, gf_dirent_t, list));
717
    subvol = glfs_active_subvol(glfd->fs);
718
    if (!subvol) {
719
        ret = -1;
720
        errno = EIO;
721
        goto out;
722
    }
723

724
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
725
    if (!fd) {
726
        ret = -1;
727
        errno = EBADFD;
728
        goto out;
729
    }
730

731
    if (glfd->lk_owner.len != 0) {
732
        ret = syncopctx_setfslkowner(&glfd->lk_owner);
733
        if (ret)
734
            goto out;
735
    }
736
    ret = get_fop_attr_thrd_key(&fop_attr);
737
    if (ret)
738
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
739

740
    ret = syncop_flush(subvol, fd, fop_attr, NULL);
741
    DECODE_SYNCOP_ERR(ret);
742
out:
743
    fs = glfd->fs;
744

745
    if (fd)
746
        fd_unref(fd);
747
    if (fop_attr)
748
        dict_unref(fop_attr);
749

750
    glfs_mark_glfd_for_deletion(glfd);
751
    glfs_subvol_done(fs, subvol);
752

753
    __GLFS_EXIT_FS;
754

755
invalid_fs:
756
    return ret;
757
}
758

759
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lstat, 3.4.0)
760
int
761
pub_glfs_lstat(struct glfs *fs, const char *path, struct stat *stat)
762
{
763
    int ret = -1;
764
    xlator_t *subvol = NULL;
765
    loc_t loc = {
766
        0,
767
    };
768
    struct iatt iatt = {
769
        0,
770
    };
771
    int reval = 0;
772

773
    DECLARE_OLD_THIS;
774
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
775

776
    subvol = glfs_active_subvol(fs);
777
    if (!subvol) {
778
        ret = -1;
779
        errno = EIO;
780
        goto out;
781
    }
782
retry:
783
    ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
784

785
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
786

787
    if (ret == 0 && stat)
788
        glfs_iatt_to_stat(fs, &iatt, stat);
789
out:
790
    loc_wipe(&loc);
791

792
    glfs_subvol_done(fs, subvol);
793

794
    __GLFS_EXIT_FS;
795

796
invalid_fs:
797
    return ret;
798
}
799

800
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_stat, 3.4.0)
801
int
802
pub_glfs_stat(struct glfs *fs, const char *path, struct stat *stat)
803
{
804
    int ret = -1;
805
    xlator_t *subvol = NULL;
806
    loc_t loc = {
807
        0,
808
    };
809
    struct iatt iatt = {
810
        0,
811
    };
812
    int reval = 0;
813

814
    DECLARE_OLD_THIS;
815
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
816

817
    subvol = glfs_active_subvol(fs);
818
    if (!subvol) {
819
        ret = -1;
820
        errno = EIO;
821
        goto out;
822
    }
823
retry:
824
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
825

826
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
827

828
    if (ret == 0 && stat)
829
        glfs_iatt_to_stat(fs, &iatt, stat);
830
out:
831
    loc_wipe(&loc);
832

833
    glfs_subvol_done(fs, subvol);
834

835
    __GLFS_EXIT_FS;
836

837
invalid_fs:
838
    return ret;
839
}
840

841
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstatat, 11.0)
842
int
843
pub_glfs_fstatat(struct glfs_fd *pglfd, const char *path, struct stat *stat,
844
                 int flags)
845
{
846
    int ret = -1;
847
    xlator_t *subvol = NULL;
848
    loc_t loc = {
849
        0,
850
    };
851
    struct iatt iatt = {
852
        0,
853
    };
854
    int reval = 0;
855
    int is_path_empty = 0;
856

857
    DECLARE_OLD_THIS;
858
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
859
    fd_t *fd = NULL;
860

861
    is_path_empty = (flags & AT_EMPTY_PATH) == AT_EMPTY_PATH;
862

863
retry:
864
    /* Retry case */
865
    if (subvol) {
866
        cleanup_fopat_args(pglfd, subvol, ret, &loc);
867
    }
868

869
    if (is_path_empty && path[0] == '\0') {
870
        GF_REF_GET(pglfd);
871

872
        subvol = glfs_active_subvol(pglfd->fs);
873
        if (!subvol) {
874
            ret = -1;
875
            errno = EIO;
876
            goto out;
877
        }
878

879
        fd = glfs_resolve_fd(pglfd->fs, subvol, pglfd);
880
        if (!fd) {
881
            ret = -1;
882
            errno = EBADFD;
883
            goto out;
884
        }
885

886
        ret = syncop_fstat(subvol, fd, &iatt, NULL, NULL);
887
        DECODE_SYNCOP_ERR(ret);
888
    } else {
889
        subvol = setup_fopat_args(pglfd, path, !(flags & AT_SYMLINK_NOFOLLOW),
890
                                  &loc, &iatt, reval);
891
    }
892

893
    if (!subvol) {
894
        ret = -1;
895
    }
896

897
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
898

899
    if (!subvol || !stat) {
900
        ret = -1;
901
        goto out;
902
    }
903

904
    if (!loc.inode && !is_path_empty) {
905
        ret = -1;
906
        errno = ENOENT;
907
        goto out;
908
    }
909

910
    glfs_iatt_to_stat(pglfd->fs, &iatt, stat);
911
    ret = 0;
912

913
out:
914
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
915
    __GLFS_EXIT_FS;
916

917
invalid_fs:
918
    return ret;
919
}
920

921
GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_statx, 6.0)
922
int
923
priv_glfs_statx(struct glfs *fs, const char *path, const unsigned int mask,
924
                struct glfs_stat *statxbuf)
925
{
926
    int ret = -1;
927
    xlator_t *subvol = NULL;
928
    loc_t loc = {
929
        0,
930
    };
931
    struct iatt iatt = {
932
        0,
933
    };
934
    int reval = 0;
935

936
    DECLARE_OLD_THIS;
937
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
938

939
    if (path == NULL) {
940
        ret = -1;
941
        errno = EINVAL;
942
        goto out;
943
    }
944

945
    if (mask & ~GLFS_STAT_ALL) {
946
        ret = -1;
947
        errno = EINVAL;
948
        goto out;
949
    }
950

951
    subvol = glfs_active_subvol(fs);
952
    if (!subvol) {
953
        ret = -1;
954
        errno = EIO;
955
        goto out;
956
    }
957

958
retry:
959
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
960
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
961

962
    if (ret == 0 && statxbuf)
963
        glfs_iatt_to_statx(fs, &iatt, statxbuf);
964
out:
965
    loc_wipe(&loc);
966

967
    glfs_subvol_done(fs, subvol);
968

969
    __GLFS_EXIT_FS;
970

971
invalid_fs:
972
    return ret;
973
}
974

975
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstat, 3.4.0)
976
int
977
pub_glfs_fstat(struct glfs_fd *glfd, struct stat *stat)
978
{
979
    int ret = -1;
980
    xlator_t *subvol = NULL;
981
    struct iatt iatt = {
982
        0,
983
    };
984
    fd_t *fd = NULL;
985

986
    DECLARE_OLD_THIS;
987
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
988

989
    GF_REF_GET(glfd);
990

991
    subvol = glfs_active_subvol(glfd->fs);
992
    if (!subvol) {
993
        ret = -1;
994
        errno = EIO;
995
        goto out;
996
    }
997

998
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
999
    if (!fd) {
1000
        ret = -1;
1001
        errno = EBADFD;
1002
        goto out;
1003
    }
1004

1005
    if (stat) {
1006
        ret = syncop_fstat(subvol, fd, &iatt, NULL, NULL);
1007
        if (ret == 0)
1008
            glfs_iatt_to_stat(glfd->fs, &iatt, stat);
1009
    } else
1010
        ret = syncop_fstat(subvol, fd, NULL, NULL, NULL);
1011
    DECODE_SYNCOP_ERR(ret);
1012

1013
out:
1014
    if (fd)
1015
        fd_unref(fd);
1016
    if (glfd)
1017
        GF_REF_PUT(glfd);
1018

1019
    glfs_subvol_done(glfd->fs, subvol);
1020

1021
    __GLFS_EXIT_FS;
1022

1023
invalid_fs:
1024
    return ret;
1025
}
1026

1027
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_creat, 3.4.0)
1028
struct glfs_fd *
1029
pub_glfs_creat(struct glfs *fs, const char *path, int flags, mode_t mode)
1030
{
1031
    int ret = -1;
1032
    struct glfs_fd *glfd = NULL;
1033
    xlator_t *subvol = NULL;
1034
    loc_t loc = {
1035
        0,
1036
    };
1037
    struct iatt iatt = {
1038
        0,
1039
    };
1040
    uuid_t gfid;
1041
    dict_t *xattr_req = NULL;
1042
    int reval = 0;
1043

1044
    DECLARE_OLD_THIS;
1045
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
1046

1047
    subvol = glfs_active_subvol(fs);
1048
    if (!subvol) {
1049
        ret = -1;
1050
        errno = EIO;
1051
        goto out;
1052
    }
1053

1054
    xattr_req = dict_new();
1055
    if (!xattr_req) {
1056
        ret = -1;
1057
        errno = ENOMEM;
1058
        goto out;
1059
    }
1060

1061
    gf_uuid_generate(gfid);
1062
    ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
1063
    if (ret) {
1064
        ret = -1;
1065
        errno = ENOMEM;
1066
        goto out;
1067
    }
1068

1069
    glfd = glfs_fd_new(fs);
1070
    if (!glfd)
1071
        goto out;
1072

1073
    /* This must be glfs_resolve() and NOT glfs_lresolve().
1074
       That is because open("name", O_CREAT) where "name"
1075
       is a danging symlink must create the dangling
1076
       destination.
1077
    */
1078
retry:
1079
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
1080

1081
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
1082

1083
    if (ret == -1 && errno != ENOENT)
1084
        /* Any other type of error is fatal */
1085
        goto out;
1086

1087
    if (ret == -1 && errno == ENOENT && !loc.parent)
1088
        /* The parent directory or an ancestor even
1089
           higher does not exist
1090
        */
1091
        goto out;
1092

1093
    if (loc.inode) {
1094
        if (flags & O_EXCL) {
1095
            ret = -1;
1096
            errno = EEXIST;
1097
            goto out;
1098
        }
1099

1100
        if (IA_ISDIR(iatt.ia_type)) {
1101
            ret = -1;
1102
            errno = EISDIR;
1103
            goto out;
1104
        }
1105

1106
        if (!IA_ISREG(iatt.ia_type)) {
1107
            ret = -1;
1108
            errno = EINVAL;
1109
            goto out;
1110
        }
1111
    }
1112

1113
    if (ret == -1 && errno == ENOENT) {
1114
        loc.inode = inode_new(loc.parent->table);
1115
        if (!loc.inode) {
1116
            ret = -1;
1117
            errno = ENOMEM;
1118
            goto out;
1119
        }
1120
    }
1121

1122
    if (glfd->fd) {
1123
        /* Retry. Safe to touch glfd->fd as we
1124
           still have not glfs_fd_bind() yet.
1125
        */
1126
        fd_unref(glfd->fd);
1127
        glfd->fd = NULL;
1128
    }
1129

1130
    glfd->fd = fd_create(loc.inode, getpid());
1131
    if (!glfd->fd) {
1132
        ret = -1;
1133
        errno = ENOMEM;
1134
        goto out;
1135
    }
1136
    glfd->fd->flags = flags;
1137

1138
    if (get_fop_attr_thrd_key(&xattr_req))
1139
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1140
    if (ret == 0) {
1141
        ret = syncop_open(subvol, &loc, flags, glfd->fd, xattr_req, NULL);
1142
        DECODE_SYNCOP_ERR(ret);
1143
    } else {
1144
        ret = syncop_create(subvol, &loc, flags, mode, glfd->fd, &iatt,
1145
                            xattr_req, NULL);
1146
        DECODE_SYNCOP_ERR(ret);
1147
    }
1148

1149
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
1150

1151
    if (ret == 0)
1152
        ret = glfs_loc_link(&loc, &iatt);
1153
out:
1154
    loc_wipe(&loc);
1155

1156
    if (xattr_req)
1157
        dict_unref(xattr_req);
1158

1159
    if (ret && glfd) {
1160
        GF_REF_PUT(glfd);
1161
        glfd = NULL;
1162
    } else if (glfd) {
1163
        glfd_set_state_bind(glfd);
1164
    }
1165

1166
    glfs_subvol_done(fs, subvol);
1167

1168
    __GLFS_EXIT_FS;
1169

1170
invalid_fs:
1171
    return glfd;
1172
}
1173

1174
#ifdef HAVE_SEEK_HOLE
1175
static int
1176
glfs_seek(struct glfs_fd *glfd, off_t offset, int whence)
1177
{
1178
    int ret = -1;
1179
    xlator_t *subvol = NULL;
1180
    fd_t *fd = NULL;
1181
    gf_seek_what_t what = 0;
1182
    off_t off = -1;
1183

1184
    switch (whence) {
1185
        case SEEK_DATA:
1186
            what = GF_SEEK_DATA;
1187
            break;
1188
        case SEEK_HOLE:
1189
            what = GF_SEEK_HOLE;
1190
            break;
1191
        default:
1192
            /* other SEEK_* do not make sense, all operations get an offset
1193
             * and the position in the fd is not tracked */
1194
            errno = EINVAL;
1195
            goto out;
1196
    }
1197

1198
    subvol = glfs_active_subvol(glfd->fs);
1199
    if (!subvol) {
1200
        errno = EIO;
1201
        goto out;
1202
    }
1203

1204
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1205
    if (!fd) {
1206
        errno = EBADFD;
1207
        goto done;
1208
    }
1209

1210
    ret = syncop_seek(subvol, fd, offset, what, NULL, &off);
1211
    DECODE_SYNCOP_ERR(ret);
1212

1213
    if (ret != -1)
1214
        glfd->offset = off;
1215

1216
done:
1217
    if (fd)
1218
        fd_unref(fd);
1219

1220
    glfs_subvol_done(glfd->fs, subvol);
1221

1222
out:
1223
    return ret;
1224
}
1225
#endif
1226

1227
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lseek, 3.4.0)
1228
off_t
1229
pub_glfs_lseek(struct glfs_fd *glfd, off_t offset, int whence)
1230
{
1231
    struct stat sb = {
1232
        0,
1233
    };
1234
    int ret = -1;
1235
    off_t off = -1;
1236

1237
    DECLARE_OLD_THIS;
1238
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1239

1240
    GF_REF_GET(glfd);
1241

1242
    switch (whence) {
1243
        case SEEK_SET:
1244
            glfd->offset = offset;
1245
            ret = 0;
1246
            break;
1247
        case SEEK_CUR:
1248
            glfd->offset += offset;
1249
            ret = 0;
1250
            break;
1251
        case SEEK_END:
1252
            ret = pub_glfs_fstat(glfd, &sb);
1253
            if (ret) {
1254
                /* seek cannot fail :O */
1255
                break;
1256
            }
1257
            glfd->offset = sb.st_size + offset;
1258
            break;
1259
#ifdef HAVE_SEEK_HOLE
1260
        case SEEK_DATA:
1261
        case SEEK_HOLE:
1262
            ret = glfs_seek(glfd, offset, whence);
1263
            break;
1264
#endif
1265
        default:
1266
            errno = EINVAL;
1267
    }
1268

1269
    if (glfd)
1270
        GF_REF_PUT(glfd);
1271

1272
    __GLFS_EXIT_FS;
1273

1274
    if (ret != -1)
1275
        off = glfd->offset;
1276

1277
    return off;
1278

1279
invalid_fs:
1280
    return -1;
1281
}
1282

1283
static ssize_t
1284
glfs_preadv_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
1285
                   off_t offset, int flags, struct glfs_stat *poststat)
1286
{
1287
    xlator_t *subvol = NULL;
1288
    ssize_t ret = -1;
1289
    ssize_t size = -1;
1290
    struct iovec *iov = NULL;
1291
    int cnt = 0;
1292
    struct iobref *iobref = NULL;
1293
    fd_t *fd = NULL;
1294
    struct iatt iatt = {
1295
        0,
1296
    };
1297
    dict_t *fop_attr = NULL;
1298

1299
    DECLARE_OLD_THIS;
1300
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1301

1302
    GF_REF_GET(glfd);
1303

1304
    subvol = glfs_active_subvol(glfd->fs);
1305
    if (!subvol) {
1306
        ret = -1;
1307
        errno = EIO;
1308
        goto out;
1309
    }
1310

1311
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1312
    if (!fd) {
1313
        ret = -1;
1314
        errno = EBADFD;
1315
        goto out;
1316
    }
1317

1318
    size = iov_length(iovec, iovcnt);
1319

1320
    ret = get_fop_attr_thrd_key(&fop_attr);
1321
    if (ret)
1322
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1323

1324
    if (poststat) {
1325
        ret = syncop_readv(subvol, fd, size, offset, 0, &iov, &cnt, &iobref,
1326
                           &iatt, fop_attr, NULL);
1327
        if (ret >= 0)
1328
            glfs_iatt_to_statx(glfd->fs, &iatt, poststat);
1329
    } else
1330
        ret = syncop_readv(subvol, fd, size, offset, 0, &iov, &cnt, &iobref,
1331
                           NULL, fop_attr, NULL);
1332

1333
    DECODE_SYNCOP_ERR(ret);
1334

1335
    if (ret <= 0)
1336
        goto out;
1337

1338
    size = iov_copy(iovec, iovcnt, iov, cnt); /* FIXME!!! */
1339

1340
    glfd->offset = (offset + size);
1341

1342
    ret = size;
1343
out:
1344
    if (iov)
1345
        GF_FREE(iov);
1346
    if (iobref)
1347
        iobref_unref(iobref);
1348

1349
    if (fd)
1350
        fd_unref(fd);
1351
    if (glfd)
1352
        GF_REF_PUT(glfd);
1353
    if (fop_attr)
1354
        dict_unref(fop_attr);
1355

1356
    glfs_subvol_done(glfd->fs, subvol);
1357

1358
    __GLFS_EXIT_FS;
1359

1360
invalid_fs:
1361
    return ret;
1362
}
1363

1364
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv, 3.4.0)
1365
ssize_t
1366
pub_glfs_preadv(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
1367
                off_t offset, int flags)
1368
{
1369
    return glfs_preadv_common(glfd, iovec, iovcnt, offset, flags, NULL);
1370
}
1371

1372
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read, 3.4.0)
1373
ssize_t
1374
pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
1375
{
1376
    struct iovec iov = {
1377
        0,
1378
    };
1379
    ssize_t ret = 0;
1380

1381
    if (glfd == NULL) {
1382
        errno = EBADF;
1383
        return -1;
1384
    }
1385

1386
    iov.iov_base = buf;
1387
    iov.iov_len = count;
1388

1389
    ret = pub_glfs_preadv(glfd, &iov, 1, glfd->offset, flags);
1390

1391
    return ret;
1392
}
1393

1394
GFAPI_SYMVER_PUBLIC(glfs_pread34, glfs_pread, 3.4.0)
1395
ssize_t
1396
pub_glfs_pread34(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
1397
                 int flags)
1398
{
1399
    struct iovec iov = {
1400
        0,
1401
    };
1402
    ssize_t ret = 0;
1403

1404
    iov.iov_base = buf;
1405
    iov.iov_len = count;
1406

1407
    ret = pub_glfs_preadv(glfd, &iov, 1, offset, flags);
1408

1409
    return ret;
1410
}
1411

1412
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread, 6.0)
1413
ssize_t
1414
pub_glfs_pread(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
1415
               int flags, struct glfs_stat *poststat)
1416
{
1417
    struct iovec iov = {
1418
        0,
1419
    };
1420
    ssize_t ret = 0;
1421

1422
    iov.iov_base = buf;
1423
    iov.iov_len = count;
1424

1425
    ret = glfs_preadv_common(glfd, &iov, 1, offset, flags, poststat);
1426

1427
    return ret;
1428
}
1429

1430
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv, 3.4.0)
1431
ssize_t
1432
pub_glfs_readv(struct glfs_fd *glfd, const struct iovec *iov, int count,
1433
               int flags)
1434
{
1435
    ssize_t ret = 0;
1436

1437
    if (glfd == NULL) {
1438
        errno = EBADF;
1439
        return -1;
1440
    }
1441

1442
    ret = pub_glfs_preadv(glfd, iov, count, glfd->offset, flags);
1443

1444
    return ret;
1445
}
1446

1447
struct glfs_io {
1448
    struct glfs_fd *glfd;
1449
    int op;
1450
    off_t offset;
1451
    int count;
1452
    int flags;
1453
    gf_boolean_t oldcb;
1454
    union {
1455
        glfs_io_cbk34 fn34;
1456
        glfs_io_cbk fn;
1457
    };
1458
    void *data;
1459
    struct iovec iov[];
1460
};
1461

1462
static int
1463
glfs_io_async_cbk(int op_ret, int op_errno, call_frame_t *frame, void *cookie,
1464
                  struct iovec *iovec, int count, struct iatt *prebuf,
1465
                  struct iatt *postbuf)
1466
{
1467
    struct glfs_io *gio = NULL;
1468
    xlator_t *subvol = NULL;
1469
    struct glfs *fs = NULL;
1470
    struct glfs_fd *glfd = NULL;
1471
    int ret = -1;
1472
    struct glfs_stat prestat = {}, *prestatp = NULL;
1473
    struct glfs_stat poststat = {}, *poststatp = NULL;
1474

1475
    GF_VALIDATE_OR_GOTO("gfapi", frame, inval);
1476
    GF_VALIDATE_OR_GOTO("gfapi", cookie, inval);
1477

1478
    gio = frame->local;
1479
    frame->local = NULL;
1480
    subvol = cookie;
1481
    glfd = gio->glfd;
1482
    fs = glfd->fs;
1483

1484
    if (!glfs_is_glfd_still_valid(glfd))
1485
        goto err;
1486

1487
    if (op_ret <= 0) {
1488
        goto out;
1489
    } else if (gio->op == GF_FOP_READ) {
1490
        if (!iovec) {
1491
            op_ret = -1;
1492
            op_errno = EINVAL;
1493
            goto out;
1494
        }
1495

1496
        op_ret = iov_copy(gio->iov, gio->count, iovec, count);
1497
        glfd->offset = gio->offset + op_ret;
1498
    } else if (gio->op == GF_FOP_WRITE) {
1499
        glfd->offset = gio->offset + gio->iov->iov_len;
1500
    }
1501

1502
out:
1503
    errno = op_errno;
1504
    if (gio->oldcb) {
1505
        gio->fn34(gio->glfd, op_ret, gio->data);
1506
    } else {
1507
        if (prebuf) {
1508
            prestatp = &prestat;
1509
            glfs_iatt_to_statx(fs, prebuf, prestatp);
1510
        }
1511

1512
        if (postbuf) {
1513
            poststatp = &poststat;
1514
            glfs_iatt_to_statx(fs, postbuf, poststatp);
1515
        }
1516

1517
        gio->fn(gio->glfd, op_ret, prestatp, poststatp, gio->data);
1518
    }
1519
err:
1520
    fd_unref(glfd->fd);
1521
    /* Since the async operation is complete
1522
     * release the ref taken during the start
1523
     * of async operation
1524
     */
1525
    GF_REF_PUT(glfd);
1526

1527
    GF_FREE(gio);
1528
    STACK_DESTROY(frame->root);
1529
    glfs_subvol_done(fs, subvol);
1530

1531
    ret = 0;
1532
inval:
1533
    return ret;
1534
}
1535

1536
static int
1537
glfs_preadv_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1538
                      int op_ret, int op_errno, struct iovec *iovec, int count,
1539
                      struct iatt *stbuf, struct iobref *iobref, dict_t *xdata)
1540
{
1541
    glfs_io_async_cbk(op_ret, op_errno, frame, cookie, iovec, count, NULL,
1542
                      stbuf);
1543

1544
    return 0;
1545
}
1546

1547
static int
1548
glfs_preadv_async_common(struct glfs_fd *glfd, const struct iovec *iovec,
1549
                         int count, off_t offset, int flags, gf_boolean_t oldcb,
1550
                         glfs_io_cbk fn, void *data)
1551
{
1552
    struct glfs_io *gio = NULL;
1553
    int ret = 0;
1554
    call_frame_t *frame = NULL;
1555
    xlator_t *subvol = NULL;
1556
    struct glfs *fs = NULL;
1557
    fd_t *fd = NULL;
1558
    dict_t *fop_attr = NULL;
1559

1560
    DECLARE_OLD_THIS;
1561
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1562

1563
    GF_REF_GET(glfd);
1564

1565
    subvol = glfs_active_subvol(glfd->fs);
1566
    if (!subvol) {
1567
        ret = -1;
1568
        errno = EIO;
1569
        goto out;
1570
    }
1571

1572
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1573
    if (!fd) {
1574
        ret = -1;
1575
        errno = EBADFD;
1576
        goto out;
1577
    }
1578

1579
    fs = glfd->fs;
1580

1581
    frame = syncop_create_frame(THIS);
1582
    if (!frame) {
1583
        ret = -1;
1584
        errno = ENOMEM;
1585
        goto out;
1586
    }
1587

1588
    gio = GF_MALLOC(sizeof(*gio) + (count * sizeof(struct iovec)),
1589
                    glfs_mt_glfs_io_t);
1590
    if (!gio) {
1591
        ret = -1;
1592
        errno = ENOMEM;
1593
        goto out;
1594
    }
1595
    gio->glfd = glfd;
1596
    gio->op = GF_FOP_READ;
1597
    gio->offset = offset;
1598
    gio->count = count;
1599
    gio->flags = flags;
1600
    gio->oldcb = oldcb;
1601
    gio->fn = fn;
1602
    gio->data = data;
1603
    memcpy(gio->iov, iovec, sizeof(struct iovec) * count);
1604

1605
    frame->local = gio;
1606

1607
    ret = get_fop_attr_thrd_key(&fop_attr);
1608
    if (ret)
1609
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1610

1611
    STACK_WIND_COOKIE(frame, glfs_preadv_async_cbk, subvol, subvol,
1612
                      subvol->fops->readv, fd, iov_length(iovec, count), offset,
1613
                      flags, fop_attr);
1614

1615
out:
1616
    if (ret) {
1617
        if (fd)
1618
            fd_unref(fd);
1619
        if (glfd)
1620
            GF_REF_PUT(glfd);
1621
        if (gio) {
1622
            GF_FREE(gio);
1623
        }
1624
        if (frame) {
1625
            STACK_DESTROY(frame->root);
1626
        }
1627
        glfs_subvol_done(fs, subvol);
1628
    }
1629
    if (fop_attr)
1630
        dict_unref(fop_attr);
1631

1632
    __GLFS_EXIT_FS;
1633

1634
    return ret;
1635

1636
invalid_fs:
1637
    return -1;
1638
}
1639

1640
GFAPI_SYMVER_PUBLIC(glfs_preadv_async34, glfs_preadv_async, 3.4.0)
1641
int
1642
pub_glfs_preadv_async34(struct glfs_fd *glfd, const struct iovec *iovec,
1643
                        int count, off_t offset, int flags, glfs_io_cbk34 fn,
1644
                        void *data)
1645
{
1646
    return glfs_preadv_async_common(glfd, iovec, count, offset, flags, _gf_true,
1647
                                    (void *)fn, data);
1648
}
1649

1650
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv_async, 6.0)
1651
int
1652
pub_glfs_preadv_async(struct glfs_fd *glfd, const struct iovec *iovec,
1653
                      int count, off_t offset, int flags, glfs_io_cbk fn,
1654
                      void *data)
1655
{
1656
    return glfs_preadv_async_common(glfd, iovec, count, offset, flags,
1657
                                    _gf_false, fn, data);
1658
}
1659

1660
GFAPI_SYMVER_PUBLIC(glfs_read_async34, glfs_read_async, 3.4.0)
1661
int
1662
pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
1663
                      glfs_io_cbk34 fn, void *data)
1664
{
1665
    struct iovec iov = {
1666
        0,
1667
    };
1668
    ssize_t ret = 0;
1669

1670
    if (glfd == NULL) {
1671
        errno = EBADF;
1672
        return -1;
1673
    }
1674

1675
    iov.iov_base = buf;
1676
    iov.iov_len = count;
1677

1678
    ret = glfs_preadv_async_common(glfd, &iov, 1, glfd->offset, flags, _gf_true,
1679
                                   (void *)fn, data);
1680

1681
    return ret;
1682
}
1683

1684
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read_async, 6.0)
1685
int
1686
pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
1687
                    glfs_io_cbk fn, void *data)
1688
{
1689
    struct iovec iov = {
1690
        0,
1691
    };
1692
    ssize_t ret = 0;
1693

1694
    if (glfd == NULL) {
1695
        errno = EBADF;
1696
        return -1;
1697
    }
1698

1699
    iov.iov_base = buf;
1700
    iov.iov_len = count;
1701

1702
    ret = glfs_preadv_async_common(glfd, &iov, 1, glfd->offset, flags,
1703
                                   _gf_false, fn, data);
1704

1705
    return ret;
1706
}
1707

1708
GFAPI_SYMVER_PUBLIC(glfs_pread_async34, glfs_pread_async, 3.4.0)
1709
int
1710
pub_glfs_pread_async34(struct glfs_fd *glfd, void *buf, size_t count,
1711
                       off_t offset, int flags, glfs_io_cbk34 fn, void *data)
1712
{
1713
    struct iovec iov = {
1714
        0,
1715
    };
1716
    ssize_t ret = 0;
1717

1718
    iov.iov_base = buf;
1719
    iov.iov_len = count;
1720

1721
    ret = glfs_preadv_async_common(glfd, &iov, 1, offset, flags, _gf_true,
1722
                                   (void *)fn, data);
1723

1724
    return ret;
1725
}
1726

1727
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread_async, 6.0)
1728
int
1729
pub_glfs_pread_async(struct glfs_fd *glfd, void *buf, size_t count,
1730
                     off_t offset, int flags, glfs_io_cbk fn, void *data)
1731
{
1732
    struct iovec iov = {
1733
        0,
1734
    };
1735
    ssize_t ret = 0;
1736

1737
    iov.iov_base = buf;
1738
    iov.iov_len = count;
1739

1740
    ret = glfs_preadv_async_common(glfd, &iov, 1, offset, flags, _gf_false, fn,
1741
                                   data);
1742

1743
    return ret;
1744
}
1745

1746
GFAPI_SYMVER_PUBLIC(glfs_readv_async34, glfs_readv_async, 3.4.0)
1747
int
1748
pub_glfs_readv_async34(struct glfs_fd *glfd, const struct iovec *iov, int count,
1749
                       int flags, glfs_io_cbk34 fn, void *data)
1750
{
1751
    ssize_t ret = 0;
1752

1753
    if (glfd == NULL) {
1754
        errno = EBADF;
1755
        return -1;
1756
    }
1757

1758
    ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
1759
                                   _gf_true, (void *)fn, data);
1760
    return ret;
1761
}
1762

1763
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv_async, 6.0)
1764
int
1765
pub_glfs_readv_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
1766
                     int flags, glfs_io_cbk fn, void *data)
1767
{
1768
    ssize_t ret = 0;
1769

1770
    if (glfd == NULL) {
1771
        errno = EBADF;
1772
        return -1;
1773
    }
1774

1775
    ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
1776
                                   _gf_false, fn, data);
1777
    return ret;
1778
}
1779

1780
static ssize_t
1781
glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
1782
                    off_t offset, int flags, struct glfs_stat *prestat,
1783
                    struct glfs_stat *poststat)
1784
{
1785
    xlator_t *subvol = NULL;
1786
    int ret = -1;
1787
    struct iobref *iobref = NULL;
1788
    struct iobuf *iobuf = NULL;
1789
    struct iovec iov = {
1790
        0,
1791
    };
1792
    fd_t *fd = NULL;
1793
    struct iatt preiatt =
1794
                    {
1795
                        0,
1796
                    },
1797
                postiatt = {
1798
                    0,
1799
                };
1800
    dict_t *fop_attr = NULL;
1801

1802
    DECLARE_OLD_THIS;
1803
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1804

1805
    GF_REF_GET(glfd);
1806

1807
    if (iovec->iov_len >= GF_UNIT_GB) {
1808
        ret = -1;
1809
        errno = EINVAL;
1810
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
1811
                "Data size too large", "size = %llu", GF_UNIT_GB, NULL);
1812
        goto out;
1813
    }
1814

1815
    subvol = glfs_active_subvol(glfd->fs);
1816
    if (!subvol) {
1817
        ret = -1;
1818
        errno = EIO;
1819
        goto out;
1820
    }
1821

1822
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1823
    if (!fd) {
1824
        ret = -1;
1825
        errno = EBADFD;
1826
        goto out;
1827
    }
1828

1829
    ret = iobuf_copy(subvol->ctx->iobuf_pool, iovec, iovcnt, &iobref, &iobuf,
1830
                     &iov);
1831
    if (ret)
1832
        goto out;
1833

1834
    ret = get_fop_attr_thrd_key(&fop_attr);
1835
    if (ret)
1836
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1837

1838
    ret = syncop_writev(subvol, fd, &iov, 1, offset, iobref, flags, &preiatt,
1839
                        &postiatt, fop_attr, NULL);
1840
    DECODE_SYNCOP_ERR(ret);
1841

1842
    if (ret >= 0) {
1843
        if (prestat)
1844
            glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
1845
        if (poststat)
1846
            glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
1847
    }
1848

1849
    if (ret <= 0)
1850
        goto out;
1851

1852
    glfd->offset = (offset + iov.iov_len);
1853
out:
1854
    if (iobuf)
1855
        iobuf_unref(iobuf);
1856
    if (iobref)
1857
        iobref_unref(iobref);
1858
    if (fd)
1859
        fd_unref(fd);
1860
    if (glfd)
1861
        GF_REF_PUT(glfd);
1862
    if (fop_attr)
1863
        dict_unref(fop_attr);
1864

1865
    glfs_subvol_done(glfd->fs, subvol);
1866

1867
    __GLFS_EXIT_FS;
1868

1869
invalid_fs:
1870
    return ret;
1871
}
1872

1873
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_copy_file_range, 6.0)
1874
ssize_t
1875
pub_glfs_copy_file_range(struct glfs_fd *glfd_in, off64_t *off_in,
1876
                         struct glfs_fd *glfd_out, off64_t *off_out, size_t len,
1877
                         unsigned int flags, struct glfs_stat *statbuf,
1878
                         struct glfs_stat *prestat, struct glfs_stat *poststat)
1879
{
1880
    xlator_t *subvol = NULL;
1881
    int ret = -1;
1882
    fd_t *fd_in = NULL;
1883
    fd_t *fd_out = NULL;
1884
    struct iatt preiatt =
1885
                    {
1886
                        0,
1887
                    },
1888
                iattbuf =
1889
                    {
1890
                        0,
1891
                    },
1892
                postiatt = {
1893
                    0,
1894
                };
1895
    dict_t *fop_attr = NULL;
1896
    off64_t pos_in;
1897
    off64_t pos_out;
1898

1899
    DECLARE_OLD_THIS;
1900
    __GLFS_ENTRY_VALIDATE_FD(glfd_in, invalid_fs);
1901
    __GLFS_ENTRY_VALIDATE_FD(glfd_out, invalid_fs);
1902

1903
    GF_REF_GET(glfd_in);
1904
    GF_REF_GET(glfd_out);
1905

1906
    if (glfd_in->fs != glfd_out->fs) {
1907
        ret = -1;
1908
        errno = EXDEV;
1909
        goto out;
1910
    }
1911

1912
    subvol = glfs_active_subvol(glfd_in->fs);
1913
    if (!subvol) {
1914
        ret = -1;
1915
        errno = EIO;
1916
        goto out;
1917
    }
1918

1919
    fd_in = glfs_resolve_fd(glfd_in->fs, subvol, glfd_in);
1920
    if (!fd_in) {
1921
        ret = -1;
1922
        errno = EBADFD;
1923
        goto out;
1924
    }
1925

1926
    fd_out = glfs_resolve_fd(glfd_out->fs, subvol, glfd_out);
1927
    if (!fd_out) {
1928
        ret = -1;
1929
        errno = EBADFD;
1930
        goto out;
1931
    }
1932

1933
    /*
1934
     * This is based on how the vfs layer in the kernel handles
1935
     * copy_file_range call. Upon receiving it follows the
1936
     * below method to consider the offset.
1937
     * if (off_in != NULL)
1938
     *    use the value off_in to perform the op
1939
     * else if off_in == NULL
1940
     *    use the current file offset position to perform the op
1941
     *
1942
     * For gfapi, glfd->offset is used. For a freshly opened
1943
     * fd, the offset is set to 0.
1944
     */
1945
    if (off_in)
1946
        pos_in = *off_in;
1947
    else
1948
        pos_in = glfd_in->offset;
1949

1950
    if (off_out)
1951
        pos_out = *off_out;
1952
    else
1953
        pos_out = glfd_out->offset;
1954

1955
    ret = get_fop_attr_thrd_key(&fop_attr);
1956
    if (ret)
1957
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1958

1959
    ret = syncop_copy_file_range(subvol, fd_in, pos_in, fd_out, pos_out, len,
1960
                                 flags, &iattbuf, &preiatt, &postiatt, fop_attr,
1961
                                 NULL);
1962
    DECODE_SYNCOP_ERR(ret);
1963

1964
    if (ret >= 0) {
1965
        pos_in += ret;
1966
        pos_out += ret;
1967

1968
        if (off_in)
1969
            *off_in = pos_in;
1970
        if (off_out)
1971
            *off_out = pos_out;
1972

1973
        if (statbuf)
1974
            glfs_iatt_to_statx(glfd_in->fs, &iattbuf, statbuf);
1975
        if (prestat)
1976
            glfs_iatt_to_statx(glfd_in->fs, &preiatt, prestat);
1977
        if (poststat)
1978
            glfs_iatt_to_statx(glfd_in->fs, &postiatt, poststat);
1979
    }
1980

1981
    if (ret <= 0)
1982
        goto out;
1983

1984
    /*
1985
     * If *off_in is NULL, then there is no offset info that can
1986
     * obtained from the input argument. Hence follow below method.
1987
     *  If *off_in is NULL, then
1988
     *     glfd->offset = offset + ret;
1989
     * else
1990
     *     do nothing.
1991
     *
1992
     * According to the man page of copy_file_range, if off_in is
1993
     * NULL, then the offset of the source file is advanced by
1994
     * the return value of the fop. The same applies to off_out as
1995
     * well. Otherwise, if *off_in is not NULL, then the offset
1996
     * is not advanced by the filesystem. The entity which sends
1997
     * the copy_file_range call is supposed to advance the offset
1998
     * value in its buffer (pointed to by *off_in or *off_out)
1999
     * by the return value of copy_file_range.
2000
     */
2001
    if (!off_in)
2002
        glfd_in->offset += ret;
2003

2004
    if (!off_out)
2005
        glfd_out->offset += ret;
2006

2007
out:
2008
    if (fd_in)
2009
        fd_unref(fd_in);
2010
    if (fd_out)
2011
        fd_unref(fd_out);
2012
    if (glfd_in)
2013
        GF_REF_PUT(glfd_in);
2014
    if (glfd_out)
2015
        GF_REF_PUT(glfd_out);
2016
    if (fop_attr)
2017
        dict_unref(fop_attr);
2018

2019
    glfs_subvol_done(glfd_in->fs, subvol);
2020

2021
    __GLFS_EXIT_FS;
2022

2023
invalid_fs:
2024
    return ret;
2025
}
2026

2027
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev, 3.4.0)
2028
ssize_t
2029
pub_glfs_pwritev(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
2030
                 off_t offset, int flags)
2031
{
2032
    return glfs_pwritev_common(glfd, iovec, iovcnt, offset, flags, NULL, NULL);
2033
}
2034

2035
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write, 3.4.0)
2036
ssize_t
2037
pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
2038
{
2039
    struct iovec iov = {
2040
        0,
2041
    };
2042
    ssize_t ret = 0;
2043

2044
    if (glfd == NULL) {
2045
        errno = EBADF;
2046
        return -1;
2047
    }
2048

2049
    iov.iov_base = (void *)buf;
2050
    iov.iov_len = count;
2051

2052
    ret = pub_glfs_pwritev(glfd, &iov, 1, glfd->offset, flags);
2053

2054
    return ret;
2055
}
2056

2057
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev, 3.4.0)
2058
ssize_t
2059
pub_glfs_writev(struct glfs_fd *glfd, const struct iovec *iov, int count,
2060
                int flags)
2061
{
2062
    ssize_t ret = 0;
2063

2064
    if (glfd == NULL) {
2065
        errno = EBADF;
2066
        return -1;
2067
    }
2068

2069
    ret = pub_glfs_pwritev(glfd, iov, count, glfd->offset, flags);
2070

2071
    return ret;
2072
}
2073

2074
GFAPI_SYMVER_PUBLIC(glfs_pwrite34, glfs_pwrite, 3.4.0)
2075
ssize_t
2076
pub_glfs_pwrite34(struct glfs_fd *glfd, const void *buf, size_t count,
2077
                  off_t offset, int flags)
2078
{
2079
    struct iovec iov = {
2080
        0,
2081
    };
2082
    ssize_t ret = 0;
2083

2084
    iov.iov_base = (void *)buf;
2085
    iov.iov_len = count;
2086

2087
    ret = pub_glfs_pwritev(glfd, &iov, 1, offset, flags);
2088

2089
    return ret;
2090
}
2091

2092
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite, 6.0)
2093
ssize_t
2094
pub_glfs_pwrite(struct glfs_fd *glfd, const void *buf, size_t count,
2095
                off_t offset, int flags, struct glfs_stat *prestat,
2096
                struct glfs_stat *poststat)
2097
{
2098
    struct iovec iov = {
2099
        0,
2100
    };
2101
    ssize_t ret = 0;
2102

2103
    iov.iov_base = (void *)buf;
2104
    iov.iov_len = count;
2105

2106
    ret = glfs_pwritev_common(glfd, &iov, 1, offset, flags, prestat, poststat);
2107

2108
    return ret;
2109
}
2110

2111
extern glfs_t *
2112
pub_glfs_from_glfd(glfs_fd_t *);
2113

2114
static int
2115
glfs_pwritev_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2116
                       int op_ret, int op_errno, struct iatt *prebuf,
2117
                       struct iatt *postbuf, dict_t *xdata)
2118
{
2119
    glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, prebuf,
2120
                      postbuf);
2121

2122
    return 0;
2123
}
2124

2125
static int
2126
glfs_pwritev_async_common(struct glfs_fd *glfd, const struct iovec *iovec,
2127
                          int count, off_t offset, int flags,
2128
                          gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
2129
{
2130
    struct glfs_io *gio = NULL;
2131
    int ret = -1;
2132
    call_frame_t *frame = NULL;
2133
    xlator_t *subvol = NULL;
2134
    fd_t *fd = NULL;
2135
    struct iobref *iobref = NULL;
2136
    struct iobuf *iobuf = NULL;
2137
    dict_t *fop_attr = NULL;
2138

2139
    DECLARE_OLD_THIS;
2140
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2141

2142
    /* Need to take explicit ref so that the fd
2143
     * is not destroyed before the fop is complete
2144
     */
2145
    GF_REF_GET(glfd);
2146

2147
    subvol = glfs_active_subvol(glfd->fs);
2148
    if (!subvol) {
2149
        errno = EIO;
2150
        goto out;
2151
    }
2152

2153
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2154
    if (!fd) {
2155
        errno = EBADFD;
2156
        goto out;
2157
    }
2158

2159
    gio = GF_MALLOC(sizeof(*gio) + (1 * (sizeof(struct iovec))),
2160
                    glfs_mt_glfs_io_t);
2161
    if (caa_unlikely(!gio)) {
2162
        errno = ENOMEM;
2163
        goto out;
2164
    }
2165

2166
    gio->glfd = glfd;
2167
    gio->op = GF_FOP_WRITE;
2168
    gio->offset = offset;
2169
    gio->count = 1;
2170
    gio->flags = flags;
2171
    gio->oldcb = oldcb;
2172
    gio->fn = fn;
2173
    gio->data = data;
2174

2175
    ret = iobuf_copy(subvol->ctx->iobuf_pool, iovec, count, &iobref, &iobuf,
2176
                     gio->iov);
2177
    if (ret)
2178
        goto out;
2179

2180
    frame = syncop_create_frame(THIS);
2181
    if (!frame) {
2182
        errno = ENOMEM;
2183
        ret = -1;
2184
        goto out;
2185
    }
2186

2187
    frame->local = gio;
2188

2189
    ret = get_fop_attr_thrd_key(&fop_attr);
2190
    if (ret)
2191
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2192

2193
    STACK_WIND_COOKIE(frame, glfs_pwritev_async_cbk, subvol, subvol,
2194
                      subvol->fops->writev, fd, gio->iov, gio->count, offset,
2195
                      flags, iobref, fop_attr);
2196

2197
    ret = 0;
2198
out:
2199
    if (ret) {
2200
        if (fd)
2201
            fd_unref(fd);
2202
        if (glfd)
2203
            GF_REF_PUT(glfd);
2204
        GF_FREE(gio);
2205
        /*
2206
         * If there is any error condition check after the frame
2207
         * creation, we have to destroy the frame root.
2208
         */
2209
        glfs_subvol_done(glfd->fs, subvol);
2210
    }
2211
    if (fop_attr)
2212
        dict_unref(fop_attr);
2213

2214
    if (iobuf)
2215
        iobuf_unref(iobuf);
2216
    if (iobref)
2217
        iobref_unref(iobref);
2218

2219
    __GLFS_EXIT_FS;
2220

2221
invalid_fs:
2222
    return ret;
2223
}
2224

2225
GFAPI_SYMVER_PUBLIC(glfs_pwritev_async34, glfs_pwritev_async, 3.4.0)
2226
int
2227
pub_glfs_pwritev_async34(struct glfs_fd *glfd, const struct iovec *iovec,
2228
                         int count, off_t offset, int flags, glfs_io_cbk34 fn,
2229
                         void *data)
2230
{
2231
    return glfs_pwritev_async_common(glfd, iovec, count, offset, flags,
2232
                                     _gf_true, (void *)fn, data);
2233
}
2234

2235
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev_async, 6.0)
2236
int
2237
pub_glfs_pwritev_async(struct glfs_fd *glfd, const struct iovec *iovec,
2238
                       int count, off_t offset, int flags, glfs_io_cbk fn,
2239
                       void *data)
2240
{
2241
    return glfs_pwritev_async_common(glfd, iovec, count, offset, flags,
2242
                                     _gf_false, fn, data);
2243
}
2244

2245
GFAPI_SYMVER_PUBLIC(glfs_write_async34, glfs_write_async, 3.4.0)
2246
int
2247
pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
2248
                       int flags, glfs_io_cbk34 fn, void *data)
2249
{
2250
    struct iovec iov = {
2251
        0,
2252
    };
2253
    ssize_t ret = 0;
2254

2255
    if (glfd == NULL) {
2256
        errno = EBADF;
2257
        return -1;
2258
    }
2259

2260
    iov.iov_base = (void *)buf;
2261
    iov.iov_len = count;
2262

2263
    ret = glfs_pwritev_async_common(glfd, &iov, 1, glfd->offset, flags,
2264
                                    _gf_true, (void *)fn, data);
2265

2266
    return ret;
2267
}
2268

2269
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write_async, 6.0)
2270
int
2271
pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
2272
                     int flags, glfs_io_cbk fn, void *data)
2273
{
2274
    struct iovec iov = {
2275
        0,
2276
    };
2277
    ssize_t ret = 0;
2278

2279
    if (glfd == NULL) {
2280
        errno = EBADF;
2281
        return -1;
2282
    }
2283

2284
    iov.iov_base = (void *)buf;
2285
    iov.iov_len = count;
2286

2287
    ret = glfs_pwritev_async_common(glfd, &iov, 1, glfd->offset, flags,
2288
                                    _gf_false, fn, data);
2289

2290
    return ret;
2291
}
2292

2293
GFAPI_SYMVER_PUBLIC(glfs_pwrite_async34, glfs_pwrite_async, 3.4.0)
2294
int
2295
pub_glfs_pwrite_async34(struct glfs_fd *glfd, const void *buf, int count,
2296
                        off_t offset, int flags, glfs_io_cbk34 fn, void *data)
2297
{
2298
    struct iovec iov = {
2299
        0,
2300
    };
2301
    ssize_t ret = 0;
2302

2303
    iov.iov_base = (void *)buf;
2304
    iov.iov_len = count;
2305

2306
    ret = glfs_pwritev_async_common(glfd, &iov, 1, offset, flags, _gf_true,
2307
                                    (void *)fn, data);
2308

2309
    return ret;
2310
}
2311

2312
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite_async, 6.0)
2313
int
2314
pub_glfs_pwrite_async(struct glfs_fd *glfd, const void *buf, int count,
2315
                      off_t offset, int flags, glfs_io_cbk fn, void *data)
2316
{
2317
    struct iovec iov = {
2318
        0,
2319
    };
2320
    ssize_t ret = 0;
2321

2322
    iov.iov_base = (void *)buf;
2323
    iov.iov_len = count;
2324

2325
    ret = glfs_pwritev_async_common(glfd, &iov, 1, offset, flags, _gf_false, fn,
2326
                                    data);
2327

2328
    return ret;
2329
}
2330

2331
GFAPI_SYMVER_PUBLIC(glfs_writev_async34, glfs_writev_async, 3.4.0)
2332
int
2333
pub_glfs_writev_async34(struct glfs_fd *glfd, const struct iovec *iov,
2334
                        int count, int flags, glfs_io_cbk34 fn, void *data)
2335
{
2336
    ssize_t ret = 0;
2337

2338
    if (glfd == NULL) {
2339
        errno = EBADF;
2340
        return -1;
2341
    }
2342

2343
    ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
2344
                                    _gf_true, (void *)fn, data);
2345
    return ret;
2346
}
2347

2348
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev_async, 6.0)
2349
int
2350
pub_glfs_writev_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
2351
                      int flags, glfs_io_cbk fn, void *data)
2352
{
2353
    ssize_t ret = 0;
2354

2355
    if (glfd == NULL) {
2356
        errno = EBADF;
2357
        return -1;
2358
    }
2359

2360
    ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
2361
                                    _gf_false, fn, data);
2362
    return ret;
2363
}
2364

2365
static int
2366
glfs_fsync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
2367
                  struct glfs_stat *poststat)
2368
{
2369
    int ret = -1;
2370
    xlator_t *subvol = NULL;
2371
    fd_t *fd = NULL;
2372
    struct iatt preiatt =
2373
                    {
2374
                        0,
2375
                    },
2376
                postiatt = {
2377
                    0,
2378
                };
2379
    dict_t *fop_attr = NULL;
2380

2381
    DECLARE_OLD_THIS;
2382
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2383

2384
    GF_REF_GET(glfd);
2385

2386
    subvol = glfs_active_subvol(glfd->fs);
2387
    if (!subvol) {
2388
        ret = -1;
2389
        errno = EIO;
2390
        goto out;
2391
    }
2392

2393
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2394
    if (!fd) {
2395
        ret = -1;
2396
        errno = EBADFD;
2397
        goto out;
2398
    }
2399

2400
    ret = get_fop_attr_thrd_key(&fop_attr);
2401
    if (ret)
2402
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2403

2404
    ret = syncop_fsync(subvol, fd, 0, &preiatt, &postiatt, fop_attr, NULL);
2405
    DECODE_SYNCOP_ERR(ret);
2406

2407
    if (ret >= 0) {
2408
        if (prestat)
2409
            glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
2410
        if (poststat)
2411
            glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
2412
    }
2413
out:
2414
    if (fd)
2415
        fd_unref(fd);
2416
    if (glfd)
2417
        GF_REF_PUT(glfd);
2418
    if (fop_attr)
2419
        dict_unref(fop_attr);
2420

2421
    glfs_subvol_done(glfd->fs, subvol);
2422

2423
    __GLFS_EXIT_FS;
2424

2425
invalid_fs:
2426
    return ret;
2427
}
2428

2429
GFAPI_SYMVER_PUBLIC(glfs_fsync34, glfs_fsync, 3.4.0)
2430
int
2431
pub_glfs_fsync34(struct glfs_fd *glfd)
2432
{
2433
    return glfs_fsync_common(glfd, NULL, NULL);
2434
}
2435

2436
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync, 6.0)
2437
int
2438
pub_glfs_fsync(struct glfs_fd *glfd, struct glfs_stat *prestat,
2439
               struct glfs_stat *poststat)
2440
{
2441
    return glfs_fsync_common(glfd, prestat, poststat);
2442
}
2443

2444
static int
2445
glfs_fsync_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2446
                     int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
2447
                     struct iatt *postbuf, dict_t *xdata)
2448
{
2449
    glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, prebuf,
2450
                      postbuf);
2451

2452
    return 0;
2453
}
2454

2455
static int
2456
glfs_fsync_async_common(struct glfs_fd *glfd, gf_boolean_t oldcb,
2457
                        glfs_io_cbk fn, void *data, int dataonly)
2458
{
2459
    struct glfs_io *gio = NULL;
2460
    int ret = 0;
2461
    call_frame_t *frame = NULL;
2462
    xlator_t *subvol = NULL;
2463
    fd_t *fd = NULL;
2464

2465
    /* Need to take explicit ref so that the fd
2466
     * is not destroyed before the fop is complete
2467
     */
2468
    GF_REF_GET(glfd);
2469

2470
    subvol = glfs_active_subvol(glfd->fs);
2471
    if (!subvol) {
2472
        ret = -1;
2473
        errno = EIO;
2474
        goto out;
2475
    }
2476

2477
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2478
    if (!fd) {
2479
        ret = -1;
2480
        errno = EBADFD;
2481
        goto out;
2482
    }
2483

2484
    frame = syncop_create_frame(THIS);
2485
    if (!frame) {
2486
        ret = -1;
2487
        errno = ENOMEM;
2488
        goto out;
2489
    }
2490

2491
    gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
2492
    if (!gio) {
2493
        errno = ENOMEM;
2494
        ret = -1;
2495
        goto out;
2496
    }
2497

2498
    gio->op = GF_FOP_FSYNC;
2499
    gio->glfd = glfd;
2500
    gio->flags = dataonly;
2501
    gio->oldcb = oldcb;
2502
    gio->fn = fn;
2503
    gio->data = data;
2504

2505
    frame->local = gio;
2506

2507
    STACK_WIND_COOKIE(frame, glfs_fsync_async_cbk, subvol, subvol,
2508
                      subvol->fops->fsync, fd, dataonly, NULL);
2509

2510
out:
2511
    if (ret) {
2512
        if (fd)
2513
            fd_unref(fd);
2514
        GF_REF_PUT(glfd);
2515
        GF_FREE(gio);
2516
        if (frame)
2517
            STACK_DESTROY(frame->root);
2518
        glfs_subvol_done(glfd->fs, subvol);
2519
    }
2520

2521
    return ret;
2522
}
2523

2524
GFAPI_SYMVER_PUBLIC(glfs_fsync_async34, glfs_fsync_async, 3.4.0)
2525
int
2526
pub_glfs_fsync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
2527
{
2528
    int ret = -1;
2529

2530
    DECLARE_OLD_THIS;
2531
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2532

2533
    ret = glfs_fsync_async_common(glfd, _gf_true, (void *)fn, data, 0);
2534

2535
    __GLFS_EXIT_FS;
2536

2537
invalid_fs:
2538
    return ret;
2539
}
2540

2541
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync_async, 6.0)
2542
int
2543
pub_glfs_fsync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
2544
{
2545
    int ret = -1;
2546

2547
    DECLARE_OLD_THIS;
2548
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2549

2550
    ret = glfs_fsync_async_common(glfd, _gf_false, fn, data, 0);
2551

2552
    __GLFS_EXIT_FS;
2553

2554
invalid_fs:
2555
    return ret;
2556
}
2557

2558
static int
2559
glfs_fdatasync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
2560
                      struct glfs_stat *poststat)
2561
{
2562
    int ret = -1;
2563
    xlator_t *subvol = NULL;
2564
    fd_t *fd = NULL;
2565
    struct iatt preiatt =
2566
                    {
2567
                        0,
2568
                    },
2569
                postiatt = {
2570
                    0,
2571
                };
2572
    dict_t *fop_attr = NULL;
2573

2574
    DECLARE_OLD_THIS;
2575
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2576

2577
    GF_REF_GET(glfd);
2578

2579
    subvol = glfs_active_subvol(glfd->fs);
2580
    if (!subvol) {
2581
        ret = -1;
2582
        errno = EIO;
2583
        goto out;
2584
    }
2585

2586
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2587
    if (!fd) {
2588
        ret = -1;
2589
        errno = EBADFD;
2590
        goto out;
2591
    }
2592

2593
    ret = get_fop_attr_thrd_key(&fop_attr);
2594
    if (ret)
2595
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2596

2597
    ret = syncop_fsync(subvol, fd, 1, &preiatt, &postiatt, fop_attr, NULL);
2598
    DECODE_SYNCOP_ERR(ret);
2599

2600
    if (ret >= 0) {
2601
        if (prestat)
2602
            glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
2603
        if (poststat)
2604
            glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
2605
    }
2606
out:
2607
    if (fd)
2608
        fd_unref(fd);
2609
    if (glfd)
2610
        GF_REF_PUT(glfd);
2611
    if (fop_attr)
2612
        dict_unref(fop_attr);
2613

2614
    glfs_subvol_done(glfd->fs, subvol);
2615

2616
    __GLFS_EXIT_FS;
2617

2618
invalid_fs:
2619
    return ret;
2620
}
2621

2622
GFAPI_SYMVER_PUBLIC(glfs_fdatasync34, glfs_fdatasync, 3.4.0)
2623
int
2624
pub_glfs_fdatasync34(struct glfs_fd *glfd)
2625
{
2626
    return glfs_fdatasync_common(glfd, NULL, NULL);
2627
}
2628

2629
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync, 6.0)
2630
int
2631
pub_glfs_fdatasync(struct glfs_fd *glfd, struct glfs_stat *prestat,
2632
                   struct glfs_stat *poststat)
2633
{
2634
    return glfs_fdatasync_common(glfd, prestat, poststat);
2635
}
2636

2637
GFAPI_SYMVER_PUBLIC(glfs_fdatasync_async34, glfs_fdatasync_async, 3.4.0)
2638
int
2639
pub_glfs_fdatasync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
2640
{
2641
    int ret = -1;
2642

2643
    DECLARE_OLD_THIS;
2644
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2645

2646
    ret = glfs_fsync_async_common(glfd, _gf_true, (void *)fn, data, 1);
2647

2648
    __GLFS_EXIT_FS;
2649

2650
invalid_fs:
2651
    return ret;
2652
}
2653

2654
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync_async, 6.0)
2655
int
2656
pub_glfs_fdatasync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
2657
{
2658
    int ret = -1;
2659

2660
    DECLARE_OLD_THIS;
2661
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2662

2663
    ret = glfs_fsync_async_common(glfd, _gf_false, fn, data, 1);
2664

2665
    __GLFS_EXIT_FS;
2666

2667
invalid_fs:
2668
    return ret;
2669
}
2670

2671
static int
2672
glfs_ftruncate_common(struct glfs_fd *glfd, off_t offset,
2673
                      struct glfs_stat *prestat, struct glfs_stat *poststat)
2674
{
2675
    int ret = -1;
2676
    xlator_t *subvol = NULL;
2677
    fd_t *fd = NULL;
2678
    struct iatt preiatt =
2679
                    {
2680
                        0,
2681
                    },
2682
                postiatt = {
2683
                    0,
2684
                };
2685
    dict_t *fop_attr = NULL;
2686

2687
    DECLARE_OLD_THIS;
2688
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2689

2690
    GF_REF_GET(glfd);
2691

2692
    subvol = glfs_active_subvol(glfd->fs);
2693
    if (!subvol) {
2694
        ret = -1;
2695
        errno = EIO;
2696
        goto out;
2697
    }
2698

2699
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2700
    if (!fd) {
2701
        ret = -1;
2702
        errno = EBADFD;
2703
        goto out;
2704
    }
2705

2706
    ret = get_fop_attr_thrd_key(&fop_attr);
2707
    if (ret)
2708
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2709

2710
    ret = syncop_ftruncate(subvol, fd, offset, &preiatt, &postiatt, fop_attr,
2711
                           NULL);
2712
    DECODE_SYNCOP_ERR(ret);
2713

2714
    if (ret >= 0) {
2715
        if (prestat)
2716
            glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
2717
        if (poststat)
2718
            glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
2719
    }
2720
out:
2721
    if (fd)
2722
        fd_unref(fd);
2723
    if (glfd)
2724
        GF_REF_PUT(glfd);
2725
    if (fop_attr)
2726
        dict_unref(fop_attr);
2727

2728
    glfs_subvol_done(glfd->fs, subvol);
2729

2730
    __GLFS_EXIT_FS;
2731

2732
invalid_fs:
2733
    return ret;
2734
}
2735

2736
GFAPI_SYMVER_PUBLIC(glfs_ftruncate34, glfs_ftruncate, 3.4.0)
2737
int
2738
pub_glfs_ftruncate34(struct glfs_fd *glfd, off_t offset)
2739
{
2740
    return glfs_ftruncate_common(glfd, offset, NULL, NULL);
2741
}
2742

2743
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate, 6.0)
2744
int
2745
pub_glfs_ftruncate(struct glfs_fd *glfd, off_t offset,
2746
                   struct glfs_stat *prestat, struct glfs_stat *poststat)
2747
{
2748
    return glfs_ftruncate_common(glfd, offset, prestat, poststat);
2749
}
2750

2751
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_truncate, 3.7.15)
2752
int
2753
pub_glfs_truncate(struct glfs *fs, const char *path, off_t length)
2754
{
2755
    int ret = -1;
2756
    xlator_t *subvol = NULL;
2757
    loc_t loc = {
2758
        0,
2759
    };
2760
    struct iatt iatt = {
2761
        0,
2762
    };
2763
    int reval = 0;
2764

2765
    DECLARE_OLD_THIS;
2766
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
2767

2768
    subvol = glfs_active_subvol(fs);
2769
    if (!subvol) {
2770
        ret = -1;
2771
        errno = EIO;
2772
        goto out;
2773
    }
2774
retry:
2775
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
2776

2777
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
2778

2779
    if (ret)
2780
        goto out;
2781

2782
    ret = syncop_truncate(subvol, &loc, length, NULL, NULL);
2783
    DECODE_SYNCOP_ERR(ret);
2784

2785
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
2786
out:
2787
    loc_wipe(&loc);
2788

2789
    glfs_subvol_done(fs, subvol);
2790

2791
    __GLFS_EXIT_FS;
2792

2793
invalid_fs:
2794
    return ret;
2795
}
2796

2797
static int
2798
glfs_ftruncate_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2799
                         int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
2800
                         struct iatt *postbuf, dict_t *xdata)
2801
{
2802
    glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, prebuf,
2803
                      postbuf);
2804

2805
    return 0;
2806
}
2807

2808
static int
2809
glfs_ftruncate_async_common(struct glfs_fd *glfd, off_t offset,
2810
                            gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
2811
{
2812
    struct glfs_io *gio = NULL;
2813
    int ret = -1;
2814
    call_frame_t *frame = NULL;
2815
    xlator_t *subvol = NULL;
2816
    fd_t *fd = NULL;
2817
    dict_t *fop_attr = NULL;
2818

2819
    DECLARE_OLD_THIS;
2820
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2821

2822
    /* Need to take explicit ref so that the fd
2823
     * is not destroyed before the fop is complete
2824
     */
2825
    GF_REF_GET(glfd);
2826

2827
    subvol = glfs_active_subvol(glfd->fs);
2828
    if (!subvol) {
2829
        errno = EIO;
2830
        goto out;
2831
    }
2832

2833
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2834
    if (!fd) {
2835
        errno = EBADFD;
2836
        goto out;
2837
    }
2838

2839
    frame = syncop_create_frame(THIS);
2840
    if (!frame) {
2841
        errno = ENOMEM;
2842
        goto out;
2843
    }
2844

2845
    gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
2846
    if (!gio) {
2847
        errno = ENOMEM;
2848
        goto out;
2849
    }
2850

2851
    gio->op = GF_FOP_FTRUNCATE;
2852
    gio->glfd = glfd;
2853
    gio->offset = offset;
2854
    gio->oldcb = oldcb;
2855
    gio->fn = fn;
2856
    gio->data = data;
2857

2858
    frame->local = gio;
2859

2860
    ret = get_fop_attr_thrd_key(&fop_attr);
2861
    if (ret)
2862
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2863

2864
    STACK_WIND_COOKIE(frame, glfs_ftruncate_async_cbk, subvol, subvol,
2865
                      subvol->fops->ftruncate, fd, offset, fop_attr);
2866

2867
    ret = 0;
2868

2869
out:
2870
    if (ret) {
2871
        if (fd)
2872
            fd_unref(fd);
2873
        if (glfd)
2874
            GF_REF_PUT(glfd);
2875
        GF_FREE(gio);
2876
        if (frame)
2877
            STACK_DESTROY(frame->root);
2878
        glfs_subvol_done(glfd->fs, subvol);
2879
    }
2880
    if (fop_attr)
2881
        dict_unref(fop_attr);
2882

2883
    __GLFS_EXIT_FS;
2884

2885
invalid_fs:
2886
    return ret;
2887
}
2888

2889
GFAPI_SYMVER_PUBLIC(glfs_ftruncate_async34, glfs_ftruncate_async, 3.4.0)
2890
int
2891
pub_glfs_ftruncate_async34(struct glfs_fd *glfd, off_t offset, glfs_io_cbk34 fn,
2892
                           void *data)
2893
{
2894
    return glfs_ftruncate_async_common(glfd, offset, _gf_true, (void *)fn,
2895
                                       data);
2896
}
2897

2898
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate_async, 6.0)
2899
int
2900
pub_glfs_ftruncate_async(struct glfs_fd *glfd, off_t offset, glfs_io_cbk fn,
2901
                         void *data)
2902
{
2903
    return glfs_ftruncate_async_common(glfd, offset, _gf_false, fn, data);
2904
}
2905

2906
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_access, 3.4.0)
2907
int
2908
pub_glfs_access(struct glfs *fs, const char *path, int mode)
2909
{
2910
    int ret = -1;
2911
    xlator_t *subvol = NULL;
2912
    loc_t loc = {
2913
        0,
2914
    };
2915
    struct iatt iatt = {
2916
        0,
2917
    };
2918
    int reval = 0;
2919

2920
    DECLARE_OLD_THIS;
2921
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
2922

2923
    subvol = glfs_active_subvol(fs);
2924
    if (!subvol) {
2925
        ret = -1;
2926
        errno = EIO;
2927
        goto out;
2928
    }
2929
retry:
2930
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
2931

2932
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
2933

2934
    if (ret)
2935
        goto out;
2936

2937
    ret = syncop_access(subvol, &loc, mode, NULL, NULL);
2938
    DECODE_SYNCOP_ERR(ret);
2939

2940
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
2941
out:
2942
    loc_wipe(&loc);
2943

2944
    glfs_subvol_done(fs, subvol);
2945

2946
    __GLFS_EXIT_FS;
2947

2948
invalid_fs:
2949
    return ret;
2950
}
2951

2952
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlink, 3.4.0)
2953
int
2954
pub_glfs_symlink(struct glfs *fs, const char *data, const char *path)
2955
{
2956
    int ret = -1;
2957
    xlator_t *subvol = NULL;
2958
    loc_t loc = {
2959
        0,
2960
    };
2961
    struct iatt iatt = {
2962
        0,
2963
    };
2964
    uuid_t gfid;
2965
    dict_t *xattr_req = NULL;
2966
    int reval = 0;
2967

2968
    DECLARE_OLD_THIS;
2969
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
2970

2971
    subvol = glfs_active_subvol(fs);
2972
    if (!subvol) {
2973
        ret = -1;
2974
        errno = EIO;
2975
        goto out;
2976
    }
2977

2978
    xattr_req = dict_new();
2979
    if (!xattr_req) {
2980
        ret = -1;
2981
        errno = ENOMEM;
2982
        goto out;
2983
    }
2984

2985
    gf_uuid_generate(gfid);
2986
    ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
2987
    if (ret) {
2988
        ret = -1;
2989
        errno = ENOMEM;
2990
        goto out;
2991
    }
2992
retry:
2993
    ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
2994

2995
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
2996

2997
    if (loc.inode) {
2998
        errno = EEXIST;
2999
        ret = -1;
3000
        goto out;
3001
    }
3002

3003
    if (ret == -1 && errno != ENOENT)
3004
        /* Any other type of error is fatal */
3005
        goto out;
3006

3007
    if (ret == -1 && errno == ENOENT && !loc.parent)
3008
        /* The parent directory or an ancestor even
3009
           higher does not exist
3010
        */
3011
        goto out;
3012

3013
    /* ret == -1 && errno == ENOENT */
3014
    loc.inode = inode_new(loc.parent->table);
3015
    if (!loc.inode) {
3016
        ret = -1;
3017
        errno = ENOMEM;
3018
        goto out;
3019
    }
3020

3021
    ret = syncop_symlink(subvol, &loc, data, &iatt, xattr_req, NULL);
3022
    DECODE_SYNCOP_ERR(ret);
3023

3024
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3025

3026
    if (ret == 0)
3027
        ret = glfs_loc_link(&loc, &iatt);
3028
out:
3029
    loc_wipe(&loc);
3030

3031
    if (xattr_req)
3032
        dict_unref(xattr_req);
3033

3034
    glfs_subvol_done(fs, subvol);
3035

3036
    __GLFS_EXIT_FS;
3037

3038
invalid_fs:
3039
    return ret;
3040
}
3041

3042
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlink, 3.4.0)
3043
int
3044
pub_glfs_readlink(struct glfs *fs, const char *path, char *buf, size_t bufsiz)
3045
{
3046
    int ret = -1;
3047
    xlator_t *subvol = NULL;
3048
    loc_t loc = {
3049
        0,
3050
    };
3051
    struct iatt iatt = {
3052
        0,
3053
    };
3054
    int reval = 0;
3055
    char *linkval = NULL;
3056

3057
    DECLARE_OLD_THIS;
3058
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3059

3060
    subvol = glfs_active_subvol(fs);
3061
    if (!subvol) {
3062
        ret = -1;
3063
        errno = EIO;
3064
        goto out;
3065
    }
3066
retry:
3067
    ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3068

3069
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3070

3071
    if (ret)
3072
        goto out;
3073

3074
    if (iatt.ia_type != IA_IFLNK) {
3075
        ret = -1;
3076
        errno = EINVAL;
3077
        goto out;
3078
    }
3079

3080
    ret = syncop_readlink(subvol, &loc, &linkval, bufsiz, NULL, NULL);
3081
    DECODE_SYNCOP_ERR(ret);
3082
    if (ret > 0) {
3083
        memcpy(buf, linkval, ret);
3084
        GF_FREE(linkval);
3085
    }
3086

3087
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3088
out:
3089
    loc_wipe(&loc);
3090

3091
    glfs_subvol_done(fs, subvol);
3092

3093
    __GLFS_EXIT_FS;
3094

3095
invalid_fs:
3096
    return ret;
3097
}
3098

3099
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknod, 3.4.0)
3100
int
3101
pub_glfs_mknod(struct glfs *fs, const char *path, mode_t mode, dev_t dev)
3102
{
3103
    int ret = -1;
3104
    xlator_t *subvol = NULL;
3105
    loc_t loc = {
3106
        0,
3107
    };
3108
    struct iatt iatt = {
3109
        0,
3110
    };
3111
    uuid_t gfid;
3112
    dict_t *xattr_req = NULL;
3113
    int reval = 0;
3114

3115
    DECLARE_OLD_THIS;
3116
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3117

3118
    subvol = glfs_active_subvol(fs);
3119
    if (!subvol) {
3120
        ret = -1;
3121
        errno = EIO;
3122
        goto out;
3123
    }
3124

3125
    xattr_req = dict_new();
3126
    if (!xattr_req) {
3127
        ret = -1;
3128
        errno = ENOMEM;
3129
        goto out;
3130
    }
3131

3132
    gf_uuid_generate(gfid);
3133
    ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
3134
    if (ret) {
3135
        ret = -1;
3136
        errno = ENOMEM;
3137
        goto out;
3138
    }
3139
retry:
3140
    ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3141

3142
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3143

3144
    if (loc.inode) {
3145
        errno = EEXIST;
3146
        ret = -1;
3147
        goto out;
3148
    }
3149

3150
    if (ret == -1 && errno != ENOENT)
3151
        /* Any other type of error is fatal */
3152
        goto out;
3153

3154
    if (ret == -1 && errno == ENOENT && !loc.parent)
3155
        /* The parent directory or an ancestor even
3156
           higher does not exist
3157
        */
3158
        goto out;
3159

3160
    /* ret == -1 && errno == ENOENT */
3161
    loc.inode = inode_new(loc.parent->table);
3162
    if (!loc.inode) {
3163
        ret = -1;
3164
        errno = ENOMEM;
3165
        goto out;
3166
    }
3167

3168
    ret = syncop_mknod(subvol, &loc, mode, dev, &iatt, xattr_req, NULL);
3169
    DECODE_SYNCOP_ERR(ret);
3170

3171
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3172

3173
    if (ret == 0)
3174
        ret = glfs_loc_link(&loc, &iatt);
3175
out:
3176
    loc_wipe(&loc);
3177

3178
    if (xattr_req)
3179
        dict_unref(xattr_req);
3180

3181
    glfs_subvol_done(fs, subvol);
3182

3183
    __GLFS_EXIT_FS;
3184

3185
invalid_fs:
3186
    return ret;
3187
}
3188

3189
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdir, 3.4.0)
3190
int
3191
pub_glfs_mkdir(struct glfs *fs, const char *path, mode_t mode)
3192
{
3193
    int ret = -1;
3194
    xlator_t *subvol = NULL;
3195
    loc_t loc = {
3196
        0,
3197
    };
3198
    struct iatt iatt = {
3199
        0,
3200
    };
3201
    uuid_t gfid;
3202
    dict_t *xattr_req = NULL;
3203
    int reval = 0;
3204

3205
    DECLARE_OLD_THIS;
3206
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3207

3208
    subvol = glfs_active_subvol(fs);
3209
    if (!subvol) {
3210
        ret = -1;
3211
        errno = EIO;
3212
        goto out;
3213
    }
3214

3215
    xattr_req = dict_new();
3216
    if (!xattr_req) {
3217
        ret = -1;
3218
        errno = ENOMEM;
3219
        goto out;
3220
    }
3221

3222
    gf_uuid_generate(gfid);
3223
    ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
3224
    if (ret) {
3225
        ret = -1;
3226
        errno = ENOMEM;
3227
        goto out;
3228
    }
3229
retry:
3230
    ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3231

3232
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3233

3234
    if (loc.inode) {
3235
        errno = EEXIST;
3236
        ret = -1;
3237
        goto out;
3238
    }
3239

3240
    if (ret == -1 && errno != ENOENT)
3241
        /* Any other type of error is fatal */
3242
        goto out;
3243

3244
    if (ret == -1 && errno == ENOENT && !loc.parent)
3245
        /* The parent directory or an ancestor even
3246
           higher does not exist
3247
        */
3248
        goto out;
3249

3250
    /* ret == -1 && errno == ENOENT */
3251
    loc.inode = inode_new(loc.parent->table);
3252
    if (!loc.inode) {
3253
        ret = -1;
3254
        errno = ENOMEM;
3255
        goto out;
3256
    }
3257

3258
    ret = syncop_mkdir(subvol, &loc, mode, &iatt, xattr_req, NULL);
3259
    DECODE_SYNCOP_ERR(ret);
3260

3261
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3262

3263
    if (ret == 0)
3264
        ret = glfs_loc_link(&loc, &iatt);
3265
out:
3266
    loc_wipe(&loc);
3267

3268
    if (xattr_req)
3269
        dict_unref(xattr_req);
3270

3271
    glfs_subvol_done(fs, subvol);
3272

3273
    __GLFS_EXIT_FS;
3274

3275
invalid_fs:
3276
    return ret;
3277
}
3278

3279
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlink, 3.4.0)
3280
int
3281
pub_glfs_unlink(struct glfs *fs, const char *path)
3282
{
3283
    int ret = -1;
3284
    xlator_t *subvol = NULL;
3285
    loc_t loc = {
3286
        0,
3287
    };
3288
    struct iatt iatt = {
3289
        0,
3290
    };
3291
    int reval = 0;
3292

3293
    DECLARE_OLD_THIS;
3294
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3295

3296
    subvol = glfs_active_subvol(fs);
3297
    if (!subvol) {
3298
        ret = -1;
3299
        errno = EIO;
3300
        goto out;
3301
    }
3302
retry:
3303
    ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3304

3305
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3306

3307
    if (ret)
3308
        goto out;
3309

3310
    if (iatt.ia_type == IA_IFDIR) {
3311
        ret = -1;
3312
        errno = EISDIR;
3313
        goto out;
3314
    }
3315

3316
    /* TODO: Add leaseid */
3317
    ret = syncop_unlink(subvol, &loc, NULL, NULL);
3318
    DECODE_SYNCOP_ERR(ret);
3319

3320
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3321

3322
    if (ret == 0)
3323
        ret = glfs_loc_unlink(&loc);
3324
out:
3325
    loc_wipe(&loc);
3326

3327
    glfs_subvol_done(fs, subvol);
3328

3329
    __GLFS_EXIT_FS;
3330

3331
invalid_fs:
3332
    return ret;
3333
}
3334

3335
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rmdir, 3.4.0)
3336
int
3337
pub_glfs_rmdir(struct glfs *fs, const char *path)
3338
{
3339
    int ret = -1;
3340
    xlator_t *subvol = NULL;
3341
    loc_t loc = {
3342
        0,
3343
    };
3344
    struct iatt iatt = {
3345
        0,
3346
    };
3347
    int reval = 0;
3348

3349
    DECLARE_OLD_THIS;
3350
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3351

3352
    subvol = glfs_active_subvol(fs);
3353
    if (!subvol) {
3354
        ret = -1;
3355
        errno = EIO;
3356
        goto out;
3357
    }
3358
retry:
3359
    ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3360

3361
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3362

3363
    if (ret)
3364
        goto out;
3365

3366
    if (iatt.ia_type != IA_IFDIR) {
3367
        ret = -1;
3368
        errno = ENOTDIR;
3369
        goto out;
3370
    }
3371

3372
    ret = syncop_rmdir(subvol, &loc, 0, NULL, NULL);
3373
    DECODE_SYNCOP_ERR(ret);
3374

3375
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3376

3377
    if (ret == 0)
3378
        ret = glfs_loc_unlink(&loc);
3379
out:
3380
    loc_wipe(&loc);
3381

3382
    glfs_subvol_done(fs, subvol);
3383

3384
    __GLFS_EXIT_FS;
3385

3386
invalid_fs:
3387
    return ret;
3388
}
3389

3390
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rename, 3.4.0)
3391
int
3392
pub_glfs_rename(struct glfs *fs, const char *oldpath, const char *newpath)
3393
{
3394
    int ret = -1;
3395
    xlator_t *subvol = NULL;
3396
    loc_t oldloc = {
3397
        0,
3398
    };
3399
    loc_t newloc = {
3400
        0,
3401
    };
3402
    struct iatt oldiatt = {
3403
        0,
3404
    };
3405
    struct iatt newiatt = {
3406
        0,
3407
    };
3408
    int reval = 0;
3409

3410
    DECLARE_OLD_THIS;
3411
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3412

3413
    subvol = glfs_active_subvol(fs);
3414
    if (!subvol) {
3415
        ret = -1;
3416
        errno = EIO;
3417
        goto out;
3418
    }
3419
retry:
3420
    ret = glfs_lresolve(fs, subvol, oldpath, &oldloc, &oldiatt, reval);
3421

3422
    ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
3423

3424
    if (ret)
3425
        goto out;
3426
retrynew:
3427
    ret = glfs_lresolve(fs, subvol, newpath, &newloc, &newiatt, reval);
3428

3429
    ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
3430

3431
    if (ret && errno != ENOENT && newloc.parent)
3432
        goto out;
3433

3434
    if (newiatt.ia_type != IA_INVAL) {
3435
        if ((oldiatt.ia_type == IA_IFDIR) != (newiatt.ia_type == IA_IFDIR)) {
3436
            /* Either both old and new must be dirs,
3437
             * or both must be non-dirs. Else, fail.
3438
             */
3439
            ret = -1;
3440
            errno = EISDIR;
3441
            goto out;
3442
        }
3443
    }
3444

3445
    /* TODO: - check if new or old is a prefix of the other, and fail EINVAL
3446
     *       - Add leaseid */
3447

3448
    ret = syncop_rename(subvol, &oldloc, &newloc, NULL, NULL);
3449
    DECODE_SYNCOP_ERR(ret);
3450

3451
    if (ret == -1 && errno == ESTALE) {
3452
        if (reval < DEFAULT_REVAL_COUNT) {
3453
            reval++;
3454
            loc_wipe(&oldloc);
3455
            loc_wipe(&newloc);
3456
            goto retry;
3457
        }
3458
    }
3459

3460
    if (ret == 0) {
3461
        inode_rename(oldloc.parent->table, oldloc.parent, oldloc.name,
3462
                     newloc.parent, newloc.name, oldloc.inode, &oldiatt);
3463

3464
        if (newloc.inode && !inode_has_dentry(newloc.inode))
3465
            inode_forget(newloc.inode, 0);
3466
    }
3467
out:
3468
    loc_wipe(&oldloc);
3469
    loc_wipe(&newloc);
3470

3471
    glfs_subvol_done(fs, subvol);
3472

3473
    __GLFS_EXIT_FS;
3474

3475
invalid_fs:
3476
    return ret;
3477
}
3478

3479
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_link, 3.4.0)
3480
int
3481
pub_glfs_link(struct glfs *fs, const char *oldpath, const char *newpath)
3482
{
3483
    int ret = -1;
3484
    xlator_t *subvol = NULL;
3485
    loc_t oldloc = {
3486
        0,
3487
    };
3488
    loc_t newloc = {
3489
        0,
3490
    };
3491
    struct iatt oldiatt = {
3492
        0,
3493
    };
3494
    struct iatt newiatt = {
3495
        0,
3496
    };
3497
    int reval = 0;
3498

3499
    DECLARE_OLD_THIS;
3500
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3501

3502
    subvol = glfs_active_subvol(fs);
3503
    if (!subvol) {
3504
        ret = -1;
3505
        errno = EIO;
3506
        goto out;
3507
    }
3508
retry:
3509
    ret = glfs_lresolve(fs, subvol, oldpath, &oldloc, &oldiatt, reval);
3510

3511
    ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
3512

3513
    if (ret)
3514
        goto out;
3515
retrynew:
3516
    ret = glfs_lresolve(fs, subvol, newpath, &newloc, &newiatt, reval);
3517

3518
    ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
3519

3520
    if (ret == 0) {
3521
        ret = -1;
3522
        errno = EEXIST;
3523
        goto out;
3524
    }
3525

3526
    if (oldiatt.ia_type == IA_IFDIR) {
3527
        ret = -1;
3528
        errno = EISDIR;
3529
        goto out;
3530
    }
3531

3532
    /* Filling the inode of the hard link to be same as that of the
3533
       original file
3534
    */
3535
    if (newloc.inode) {
3536
        inode_unref(newloc.inode);
3537
        newloc.inode = NULL;
3538
    }
3539
    newloc.inode = inode_ref(oldloc.inode);
3540

3541
    ret = syncop_link(subvol, &oldloc, &newloc, &newiatt, NULL, NULL);
3542
    DECODE_SYNCOP_ERR(ret);
3543

3544
    if (ret == -1 && errno == ESTALE) {
3545
        loc_wipe(&oldloc);
3546
        loc_wipe(&newloc);
3547
        if (reval--)
3548
            goto retry;
3549
    }
3550

3551
    if (ret == 0)
3552
        ret = glfs_loc_link(&newloc, &newiatt);
3553
out:
3554
    loc_wipe(&oldloc);
3555
    loc_wipe(&newloc);
3556

3557
    glfs_subvol_done(fs, subvol);
3558

3559
    __GLFS_EXIT_FS;
3560

3561
invalid_fs:
3562
    return ret;
3563
}
3564

3565
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_opendir, 3.4.0)
3566
struct glfs_fd *
3567
pub_glfs_opendir(struct glfs *fs, const char *path)
3568
{
3569
    int ret = -1;
3570
    struct glfs_fd *glfd = NULL;
3571
    xlator_t *subvol = NULL;
3572
    loc_t loc = {
3573
        0,
3574
    };
3575
    struct iatt iatt = {
3576
        0,
3577
    };
3578
    int reval = 0;
3579

3580
    DECLARE_OLD_THIS;
3581
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3582

3583
    subvol = glfs_active_subvol(fs);
3584
    if (!subvol) {
3585
        ret = -1;
3586
        errno = EIO;
3587
        goto out;
3588
    }
3589

3590
    glfd = glfs_fd_new(fs);
3591
    if (!glfd)
3592
        goto out;
3593

3594
retry:
3595
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
3596

3597
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3598

3599
    if (ret)
3600
        goto out;
3601

3602
    if (!IA_ISDIR(iatt.ia_type)) {
3603
        ret = -1;
3604
        errno = ENOTDIR;
3605
        goto out;
3606
    }
3607

3608
    if (glfd->fd) {
3609
        /* Retry. Safe to touch glfd->fd as we
3610
           still have not glfs_fd_bind() yet.
3611
        */
3612
        fd_unref(glfd->fd);
3613
        glfd->fd = NULL;
3614
    }
3615

3616
    glfd->fd = fd_create(loc.inode, getpid());
3617
    if (!glfd->fd) {
3618
        ret = -1;
3619
        errno = ENOMEM;
3620
        goto out;
3621
    }
3622

3623
    ret = syncop_opendir(subvol, &loc, glfd->fd, NULL, NULL);
3624
    DECODE_SYNCOP_ERR(ret);
3625

3626
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
3627
out:
3628
    loc_wipe(&loc);
3629

3630
    if (ret && glfd) {
3631
        GF_REF_PUT(glfd);
3632
        glfd = NULL;
3633
    } else if (glfd) {
3634
        glfd_set_state_bind(glfd);
3635
    }
3636

3637
    glfs_subvol_done(fs, subvol);
3638

3639
    __GLFS_EXIT_FS;
3640

3641
invalid_fs:
3642
    return glfd;
3643
}
3644

3645
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_closedir, 3.4.0)
3646
int
3647
pub_glfs_closedir(struct glfs_fd *glfd)
3648
{
3649
    int ret = -1;
3650

3651
    DECLARE_OLD_THIS;
3652
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
3653

3654
    gf_dirent_free(list_entry(&glfd->entries, gf_dirent_t, list));
3655

3656
    glfs_mark_glfd_for_deletion(glfd);
3657

3658
    __GLFS_EXIT_FS;
3659

3660
    ret = 0;
3661

3662
invalid_fs:
3663
    return ret;
3664
}
3665

3666
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_telldir, 3.4.0)
3667
long
3668
pub_glfs_telldir(struct glfs_fd *fd)
3669
{
3670
    if (fd == NULL) {
3671
        errno = EBADF;
3672
        return -1;
3673
    }
3674

3675
    return fd->offset;
3676
}
3677

3678
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_seekdir, 3.4.0)
3679
void
3680
pub_glfs_seekdir(struct glfs_fd *fd, long offset)
3681
{
3682
    gf_dirent_t *entry = NULL;
3683
    gf_dirent_t *tmp = NULL;
3684

3685
    if (fd == NULL) {
3686
        errno = EBADF;
3687
        return;
3688
    }
3689

3690
    if (fd->offset == offset)
3691
        return;
3692

3693
    fd->offset = offset;
3694
    fd->next = NULL;
3695

3696
    list_for_each_entry_safe(entry, tmp, &fd->entries, list)
3697
    {
3698
        if (entry->d_off != offset)
3699
            continue;
3700

3701
        if (&tmp->list != &fd->entries) {
3702
            /* found! */
3703
            fd->next = tmp;
3704
            return;
3705
        }
3706
    }
3707
    /* could not find entry at requested offset in the cache.
3708
       next readdir_r() will result in glfd_entry_refresh()
3709
    */
3710
}
3711

3712
static int
3713
glfs_discard_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3714
                       int32_t op_ret, int32_t op_errno,
3715
                       struct iatt *preop_stbuf, struct iatt *postop_stbuf,
3716
                       dict_t *xdata)
3717
{
3718
    glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, preop_stbuf,
3719
                      postop_stbuf);
3720

3721
    return 0;
3722
}
3723

3724
static int
3725
glfs_discard_async_common(struct glfs_fd *glfd, off_t offset, size_t len,
3726
                          gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
3727
{
3728
    struct glfs_io *gio = NULL;
3729
    int ret = -1;
3730
    call_frame_t *frame = NULL;
3731
    xlator_t *subvol = NULL;
3732
    fd_t *fd = NULL;
3733
    dict_t *fop_attr = NULL;
3734

3735
    DECLARE_OLD_THIS;
3736
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
3737

3738
    /* Need to take explicit ref so that the fd
3739
     * is not destroyed before the fop is complete
3740
     */
3741
    GF_REF_GET(glfd);
3742

3743
    subvol = glfs_active_subvol(glfd->fs);
3744
    if (!subvol) {
3745
        errno = EIO;
3746
        goto out;
3747
    }
3748

3749
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
3750
    if (!fd) {
3751
        errno = EBADFD;
3752
        goto out;
3753
    }
3754

3755
    frame = syncop_create_frame(THIS);
3756
    if (!frame) {
3757
        errno = ENOMEM;
3758
        goto out;
3759
    }
3760

3761
    gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
3762
    if (!gio) {
3763
        errno = ENOMEM;
3764
        goto out;
3765
    }
3766

3767
    gio->op = GF_FOP_DISCARD;
3768
    gio->glfd = glfd;
3769
    gio->offset = offset;
3770
    gio->count = len;
3771
    gio->oldcb = oldcb;
3772
    gio->fn = fn;
3773
    gio->data = data;
3774

3775
    frame->local = gio;
3776
    ret = get_fop_attr_thrd_key(&fop_attr);
3777
    if (ret)
3778
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
3779

3780
    STACK_WIND_COOKIE(frame, glfs_discard_async_cbk, subvol, subvol,
3781
                      subvol->fops->discard, fd, offset, len, fop_attr);
3782

3783
    ret = 0;
3784
out:
3785
    if (fop_attr)
3786
        dict_unref(fop_attr);
3787

3788
    if (ret) {
3789
        if (fd)
3790
            fd_unref(fd);
3791
        if (glfd)
3792
            GF_REF_PUT(glfd);
3793
        GF_FREE(gio);
3794
        if (frame)
3795
            STACK_DESTROY(frame->root);
3796
        glfs_subvol_done(glfd->fs, subvol);
3797
    }
3798

3799
    __GLFS_EXIT_FS;
3800

3801
invalid_fs:
3802
    return ret;
3803
}
3804

3805
GFAPI_SYMVER_PUBLIC(glfs_discard_async35, glfs_discard_async, 3.5.0)
3806
int
3807
pub_glfs_discard_async35(struct glfs_fd *glfd, off_t offset, size_t len,
3808
                         glfs_io_cbk34 fn, void *data)
3809
{
3810
    return glfs_discard_async_common(glfd, offset, len, _gf_true, (void *)fn,
3811
                                     data);
3812
}
3813

3814
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard_async, 6.0)
3815
int
3816
pub_glfs_discard_async(struct glfs_fd *glfd, off_t offset, size_t len,
3817
                       glfs_io_cbk fn, void *data)
3818
{
3819
    return glfs_discard_async_common(glfd, offset, len, _gf_false, fn, data);
3820
}
3821

3822
static int
3823
glfs_zerofill_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3824
                        int32_t op_ret, int32_t op_errno,
3825
                        struct iatt *preop_stbuf, struct iatt *postop_stbuf,
3826
                        dict_t *xdata)
3827
{
3828
    glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, preop_stbuf,
3829
                      postop_stbuf);
3830

3831
    return 0;
3832
}
3833

3834
static int
3835
glfs_zerofill_async_common(struct glfs_fd *glfd, off_t offset, off_t len,
3836
                           gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
3837
{
3838
    struct glfs_io *gio = NULL;
3839
    int ret = -1;
3840
    call_frame_t *frame = NULL;
3841
    xlator_t *subvol = NULL;
3842
    fd_t *fd = NULL;
3843
    dict_t *fop_attr = NULL;
3844

3845
    DECLARE_OLD_THIS;
3846
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
3847

3848
    /* Need to take explicit ref so that the fd
3849
     * is not destroyed before the fop is complete
3850
     */
3851
    GF_REF_GET(glfd);
3852

3853
    subvol = glfs_active_subvol(glfd->fs);
3854
    if (!subvol) {
3855
        errno = EIO;
3856
        goto out;
3857
    }
3858

3859
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
3860
    if (!fd) {
3861
        errno = EBADFD;
3862
        goto out;
3863
    }
3864

3865
    frame = syncop_create_frame(THIS);
3866
    if (!frame) {
3867
        errno = ENOMEM;
3868
        goto out;
3869
    }
3870

3871
    gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
3872
    if (!gio) {
3873
        errno = ENOMEM;
3874
        goto out;
3875
    }
3876

3877
    gio->op = GF_FOP_ZEROFILL;
3878
    gio->glfd = glfd;
3879
    gio->offset = offset;
3880
    gio->count = len;
3881
    gio->oldcb = oldcb;
3882
    gio->fn = fn;
3883
    gio->data = data;
3884

3885
    frame->local = gio;
3886

3887
    ret = get_fop_attr_thrd_key(&fop_attr);
3888
    if (ret)
3889
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
3890

3891
    STACK_WIND_COOKIE(frame, glfs_zerofill_async_cbk, subvol, subvol,
3892
                      subvol->fops->zerofill, fd, offset, len, fop_attr);
3893
    ret = 0;
3894
out:
3895
    if (ret) {
3896
        if (fd)
3897
            fd_unref(fd);
3898
        if (glfd)
3899
            GF_REF_PUT(glfd);
3900
        GF_FREE(gio);
3901
        if (frame)
3902
            STACK_DESTROY(frame->root);
3903
        glfs_subvol_done(glfd->fs, subvol);
3904
    }
3905
    if (fop_attr)
3906
        dict_unref(fop_attr);
3907

3908
    __GLFS_EXIT_FS;
3909

3910
invalid_fs:
3911
    return ret;
3912
}
3913

3914
GFAPI_SYMVER_PUBLIC(glfs_zerofill_async35, glfs_zerofill_async, 3.5.0)
3915
int
3916
pub_glfs_zerofill_async35(struct glfs_fd *glfd, off_t offset, off_t len,
3917
                          glfs_io_cbk34 fn, void *data)
3918
{
3919
    return glfs_zerofill_async_common(glfd, offset, len, _gf_true, (void *)fn,
3920
                                      data);
3921
}
3922

3923
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill_async, 6.0)
3924
int
3925
pub_glfs_zerofill_async(struct glfs_fd *glfd, off_t offset, off_t len,
3926
                        glfs_io_cbk fn, void *data)
3927
{
3928
    return glfs_zerofill_async_common(glfd, offset, len, _gf_false, fn, data);
3929
}
3930

3931
void
3932
gf_dirent_to_dirent(gf_dirent_t *gf_dirent, struct dirent *dirent)
3933
{
3934
    dirent->d_ino = gf_dirent->d_ino;
3935

3936
#ifdef _DIRENT_HAVE_D_OFF
3937
    dirent->d_off = gf_dirent->d_off;
3938
#endif
3939

3940
#ifdef _DIRENT_HAVE_D_TYPE
3941
    dirent->d_type = gf_dirent->d_type;
3942
#endif
3943

3944
#ifdef _DIRENT_HAVE_D_NAMLEN
3945
    dirent->d_namlen = strlen(gf_dirent->d_name);
3946
#endif
3947

3948
    snprintf(dirent->d_name, NAME_MAX + 1, "%s", gf_dirent->d_name);
3949
}
3950

3951
int
3952
glfd_entry_refresh(struct glfs_fd *glfd, int plus)
3953
{
3954
    xlator_t *subvol = NULL;
3955
    gf_dirent_t entries;
3956
    gf_dirent_t old;
3957
    gf_dirent_t *entry = NULL;
3958
    int ret = -1;
3959
    fd_t *fd = NULL;
3960

3961
    subvol = glfs_active_subvol(glfd->fs);
3962
    if (!subvol) {
3963
        ret = -1;
3964
        errno = EIO;
3965
        goto out;
3966
    }
3967

3968
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
3969
    if (!fd) {
3970
        ret = -1;
3971
        errno = EBADFD;
3972
        goto out;
3973
    }
3974

3975
    if (fd->inode->ia_type != IA_IFDIR) {
3976
        ret = -1;
3977
        errno = EBADF;
3978
        goto out;
3979
    }
3980

3981
    INIT_LIST_HEAD(&entries.list);
3982
    INIT_LIST_HEAD(&old.list);
3983

3984
    if (plus)
3985
        ret = syncop_readdirp(subvol, fd, 131072, glfd->offset, &entries, NULL,
3986
                              NULL);
3987
    else
3988
        ret = syncop_readdir(subvol, fd, 131072, glfd->offset, &entries, NULL,
3989
                             NULL);
3990
    DECODE_SYNCOP_ERR(ret);
3991
    if (ret >= 0) {
3992
        if (plus) {
3993
            list_for_each_entry(entry, &entries.list, list)
3994
            {
3995
                if ((!entry->inode && (!IA_ISDIR(entry->d_stat.ia_type))) ||
3996
                    ((entry->d_stat.ia_ctime == 0) &&
3997
                     !inode_dir_or_parentdir(entry))) {
3998
                    /* entry->inode for directories will be
3999
                     * always set to null to force a lookup
4000
                     * on the dentry. Hence to not degrade
4001
                     * readdir performance, we skip lookups
4002
                     * for directory entries. Also we will have
4003
                     * proper stat if directory present on
4004
                     * hashed subvolume.
4005
                     *
4006
                     * In addition, if the stat is invalid, force
4007
                     * lookup to fetch proper stat.
4008
                     */
4009
                    gf_fill_iatt_for_dirent(entry, fd->inode, subvol);
4010
                }
4011
            }
4012

4013
            gf_link_inodes_from_dirent(fd->inode, &entries);
4014
        }
4015

4016
        list_splice_init(&glfd->entries, &old.list);
4017
        list_splice_init(&entries.list, &glfd->entries);
4018

4019
        /* spurious errno is dangerous for glfd_entry_next() */
4020
        errno = 0;
4021
    }
4022

4023
    if ((ret > 0) && !list_empty(&glfd->entries)) {
4024
        glfd->next = list_entry(glfd->entries.next, gf_dirent_t, list);
4025
    }
4026

4027
    gf_dirent_free(&old);
4028
out:
4029
    if (fd)
4030
        fd_unref(fd);
4031

4032
    glfs_subvol_done(glfd->fs, subvol);
4033

4034
    return ret;
4035
}
4036

4037
gf_dirent_t *
4038
glfd_entry_next(struct glfs_fd *glfd, int plus)
4039
{
4040
    gf_dirent_t *entry = NULL;
4041
    int ret = -1;
4042

4043
    if (!glfd->offset || !glfd->next) {
4044
        ret = glfd_entry_refresh(glfd, plus);
4045
        if (ret < 0)
4046
            return NULL;
4047
    }
4048

4049
    entry = glfd->next;
4050
    if (!entry)
4051
        return NULL;
4052

4053
    if (&entry->next->list == &glfd->entries)
4054
        glfd->next = NULL;
4055
    else
4056
        glfd->next = entry->next;
4057

4058
    glfd->offset = entry->d_off;
4059

4060
    return entry;
4061
}
4062

4063
struct dirent *
4064
glfs_readdirbuf_get(struct glfs_fd *glfd)
4065
{
4066
    struct dirent *buf = NULL;
4067

4068
    LOCK(&glfd->fd->lock);
4069
    {
4070
        buf = glfd->readdirbuf;
4071
        if (buf) {
4072
            memset(buf, 0, READDIRBUF_SIZE);
4073
            goto unlock;
4074
        }
4075

4076
        buf = GF_CALLOC(1, READDIRBUF_SIZE, glfs_mt_readdirbuf_t);
4077
        if (!buf) {
4078
            errno = ENOMEM;
4079
            goto unlock;
4080
        }
4081

4082
        glfd->readdirbuf = buf;
4083
    }
4084
unlock:
4085
    UNLOCK(&glfd->fd->lock);
4086

4087
    return buf;
4088
}
4089

4090
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus_r, 3.4.0)
4091
int
4092
pub_glfs_readdirplus_r(struct glfs_fd *glfd, struct stat *stat,
4093
                       struct dirent *ext, struct dirent **res)
4094
{
4095
    int ret = 0;
4096
    gf_dirent_t *entry = NULL;
4097
    struct dirent *buf = NULL;
4098

4099
    DECLARE_OLD_THIS;
4100
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4101

4102
    GF_REF_GET(glfd);
4103

4104
    errno = 0;
4105

4106
    if (ext)
4107
        buf = ext;
4108
    else
4109
        buf = glfs_readdirbuf_get(glfd);
4110

4111
    if (!buf) {
4112
        errno = ENOMEM;
4113
        ret = -1;
4114
        goto out;
4115
    }
4116

4117
    entry = glfd_entry_next(glfd, !!stat);
4118
    if (errno)
4119
        ret = -1;
4120

4121
    if (res) {
4122
        if (entry)
4123
            *res = buf;
4124
        else
4125
            *res = NULL;
4126
    }
4127

4128
    if (entry) {
4129
        gf_dirent_to_dirent(entry, buf);
4130
        if (stat)
4131
            glfs_iatt_to_stat(glfd->fs, &entry->d_stat, stat);
4132
    }
4133

4134
out:
4135
    if (glfd)
4136
        GF_REF_PUT(glfd);
4137

4138
    __GLFS_EXIT_FS;
4139

4140
    return ret;
4141

4142
invalid_fs:
4143
    return -1;
4144
}
4145

4146
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir_r, 3.4.0)
4147
int
4148
pub_glfs_readdir_r(struct glfs_fd *glfd, struct dirent *buf,
4149
                   struct dirent **res)
4150
{
4151
    return pub_glfs_readdirplus_r(glfd, 0, buf, res);
4152
}
4153

4154
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus, 3.5.0)
4155
struct dirent *
4156
pub_glfs_readdirplus(struct glfs_fd *glfd, struct stat *stat)
4157
{
4158
    struct dirent *res = NULL;
4159
    int ret = -1;
4160

4161
    ret = pub_glfs_readdirplus_r(glfd, stat, NULL, &res);
4162
    if (ret)
4163
        return NULL;
4164

4165
    return res;
4166
}
4167

4168
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir, 3.5.0)
4169
struct dirent *
4170
pub_glfs_readdir(struct glfs_fd *glfd)
4171
{
4172
    return pub_glfs_readdirplus(glfd, NULL);
4173
}
4174

4175
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_statvfs, 3.4.0)
4176
int
4177
pub_glfs_statvfs(struct glfs *fs, const char *path, struct statvfs *buf)
4178
{
4179
    int ret = -1;
4180
    xlator_t *subvol = NULL;
4181
    loc_t loc = {
4182
        0,
4183
    };
4184
    struct iatt iatt = {
4185
        0,
4186
    };
4187
    int reval = 0;
4188

4189
    DECLARE_OLD_THIS;
4190
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4191

4192
    subvol = glfs_active_subvol(fs);
4193
    if (!subvol) {
4194
        ret = -1;
4195
        errno = EIO;
4196
        goto out;
4197
    }
4198
retry:
4199
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4200

4201
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4202

4203
    if (ret)
4204
        goto out;
4205

4206
    ret = syncop_statfs(subvol, &loc, buf, NULL, NULL);
4207
    DECODE_SYNCOP_ERR(ret);
4208

4209
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4210
out:
4211
    loc_wipe(&loc);
4212

4213
    glfs_subvol_done(fs, subvol);
4214

4215
    __GLFS_EXIT_FS;
4216

4217
invalid_fs:
4218
    return ret;
4219
}
4220

4221
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setattr, 6.0)
4222
int
4223
pub_glfs_setattr(struct glfs *fs, const char *path, struct glfs_stat *stat,
4224
                 int follow)
4225
{
4226
    int ret = -1;
4227
    int glvalid;
4228
    xlator_t *subvol = NULL;
4229
    loc_t loc = {
4230
        0,
4231
    };
4232
    struct iatt riatt = {
4233
        0,
4234
    };
4235
    struct iatt iatt = {
4236
        0,
4237
    };
4238
    int reval = 0;
4239

4240
    DECLARE_OLD_THIS;
4241
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4242

4243
    GF_VALIDATE_OR_GOTO("glfs_setattr", stat, out);
4244

4245
    subvol = glfs_active_subvol(fs);
4246
    if (!subvol) {
4247
        ret = -1;
4248
        errno = EIO;
4249
        goto out;
4250
    }
4251
retry:
4252
    if (follow)
4253
        ret = glfs_resolve(fs, subvol, path, &loc, &riatt, reval);
4254
    else
4255
        ret = glfs_lresolve(fs, subvol, path, &loc, &riatt, reval);
4256

4257
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4258

4259
    if (ret)
4260
        goto out;
4261

4262
    glfs_iatt_from_statx(&iatt, stat);
4263
    glfsflags_from_gfapiflags(stat, &glvalid);
4264

4265
    /* TODO : Add leaseid */
4266
    ret = syncop_setattr(subvol, &loc, &iatt, glvalid, 0, 0, NULL, NULL);
4267
    DECODE_SYNCOP_ERR(ret);
4268

4269
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4270
out:
4271
    loc_wipe(&loc);
4272

4273
    glfs_subvol_done(fs, subvol);
4274

4275
    __GLFS_EXIT_FS;
4276

4277
invalid_fs:
4278
    return ret;
4279
}
4280

4281
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetattr, 6.0)
4282
int
4283
pub_glfs_fsetattr(struct glfs_fd *glfd, struct glfs_stat *stat)
4284
{
4285
    int ret = -1;
4286
    int glvalid;
4287
    struct iatt iatt = {
4288
        0,
4289
    };
4290
    xlator_t *subvol = NULL;
4291
    fd_t *fd = NULL;
4292

4293
    DECLARE_OLD_THIS;
4294
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4295

4296
    GF_REF_GET(glfd);
4297

4298
    GF_VALIDATE_OR_GOTO("glfs_fsetattr", stat, out);
4299

4300
    subvol = glfs_active_subvol(glfd->fs);
4301
    if (!subvol) {
4302
        ret = -1;
4303
        errno = EIO;
4304
        goto out;
4305
    }
4306

4307
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4308
    if (!fd) {
4309
        ret = -1;
4310
        errno = EBADFD;
4311
        goto out;
4312
    }
4313

4314
    glfs_iatt_from_statx(&iatt, stat);
4315
    glfsflags_from_gfapiflags(stat, &glvalid);
4316

4317
    /* TODO : Add leaseid */
4318
    ret = syncop_fsetattr(subvol, fd, &iatt, glvalid, 0, 0, NULL, NULL);
4319
    DECODE_SYNCOP_ERR(ret);
4320
out:
4321
    if (fd)
4322
        fd_unref(fd);
4323
    if (glfd)
4324
        GF_REF_PUT(glfd);
4325

4326
    glfs_subvol_done(glfd->fs, subvol);
4327

4328
    __GLFS_EXIT_FS;
4329

4330
invalid_fs:
4331
    return ret;
4332
}
4333

4334
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chmod, 3.4.0)
4335
int
4336
pub_glfs_chmod(struct glfs *fs, const char *path, mode_t mode)
4337
{
4338
    int ret = -1;
4339
    struct glfs_stat stat = {
4340
        0,
4341
    };
4342

4343
    stat.glfs_st_mode = mode;
4344
    stat.glfs_st_mask = GLFS_STAT_MODE;
4345

4346
    ret = glfs_setattr(fs, path, &stat, 1);
4347

4348
    return ret;
4349
}
4350

4351
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmod, 3.4.0)
4352
int
4353
pub_glfs_fchmod(struct glfs_fd *glfd, mode_t mode)
4354
{
4355
    int ret = -1;
4356
    struct glfs_stat stat = {
4357
        0,
4358
    };
4359

4360
    stat.glfs_st_mode = mode;
4361
    stat.glfs_st_mask = GLFS_STAT_MODE;
4362

4363
    ret = glfs_fsetattr(glfd, &stat);
4364

4365
    return ret;
4366
}
4367

4368
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chown, 3.4.0)
4369
int
4370
pub_glfs_chown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
4371
{
4372
    int ret = 0;
4373
    struct glfs_stat stat = {
4374
        0,
4375
    };
4376

4377
    if (uid != (uid_t)-1) {
4378
        stat.glfs_st_uid = uid;
4379
        stat.glfs_st_mask = GLFS_STAT_UID;
4380
    }
4381

4382
    if (gid != (uid_t)-1) {
4383
        stat.glfs_st_gid = gid;
4384
        stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
4385
    }
4386

4387
    if (stat.glfs_st_mask)
4388
        ret = glfs_setattr(fs, path, &stat, 1);
4389

4390
    return ret;
4391
}
4392

4393
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lchown, 3.4.0)
4394
int
4395
pub_glfs_lchown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
4396
{
4397
    int ret = 0;
4398
    struct glfs_stat stat = {
4399
        0,
4400
    };
4401

4402
    if (uid != (uid_t)-1) {
4403
        stat.glfs_st_uid = uid;
4404
        stat.glfs_st_mask = GLFS_STAT_UID;
4405
    }
4406

4407
    if (gid != (uid_t)-1) {
4408
        stat.glfs_st_gid = gid;
4409
        stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
4410
    }
4411

4412
    if (stat.glfs_st_mask)
4413
        ret = glfs_setattr(fs, path, &stat, 0);
4414

4415
    return ret;
4416
}
4417

4418
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchown, 3.4.0)
4419
int
4420
pub_glfs_fchown(struct glfs_fd *glfd, uid_t uid, gid_t gid)
4421
{
4422
    int ret = 0;
4423
    struct glfs_stat stat = {
4424
        0,
4425
    };
4426

4427
    if (uid != (uid_t)-1) {
4428
        stat.glfs_st_uid = uid;
4429
        stat.glfs_st_mask = GLFS_STAT_UID;
4430
    }
4431

4432
    if (gid != (uid_t)-1) {
4433
        stat.glfs_st_gid = gid;
4434
        stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
4435
    }
4436

4437
    if (stat.glfs_st_mask)
4438
        ret = glfs_fsetattr(glfd, &stat);
4439

4440
    return ret;
4441
}
4442

4443
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_utimens, 3.4.0)
4444
int
4445
pub_glfs_utimens(struct glfs *fs, const char *path,
4446
                 const struct timespec times[2])
4447
{
4448
    int ret = -1;
4449
    struct glfs_stat stat = {
4450
        0,
4451
    };
4452

4453
    stat.glfs_st_atime = times[0];
4454
    stat.glfs_st_mtime = times[1];
4455

4456
    stat.glfs_st_mask = GLFS_STAT_ATIME | GLFS_STAT_MTIME;
4457

4458
    ret = glfs_setattr(fs, path, &stat, 1);
4459

4460
    return ret;
4461
}
4462

4463
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lutimens, 3.4.0)
4464
int
4465
pub_glfs_lutimens(struct glfs *fs, const char *path,
4466
                  const struct timespec times[2])
4467
{
4468
    int ret = -1;
4469
    struct glfs_stat stat = {
4470
        0,
4471
    };
4472

4473
    stat.glfs_st_atime = times[0];
4474
    stat.glfs_st_mtime = times[1];
4475

4476
    stat.glfs_st_mask = GLFS_STAT_ATIME | GLFS_STAT_MTIME;
4477

4478
    ret = glfs_setattr(fs, path, &stat, 0);
4479

4480
    return ret;
4481
}
4482

4483
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_futimens, 3.4.0)
4484
int
4485
pub_glfs_futimens(struct glfs_fd *glfd, const struct timespec times[2])
4486
{
4487
    int ret = -1;
4488
    struct glfs_stat stat = {
4489
        0,
4490
    };
4491

4492
    stat.glfs_st_atime = times[0];
4493
    stat.glfs_st_mtime = times[1];
4494

4495
    stat.glfs_st_mask = GLFS_STAT_ATIME | GLFS_STAT_MTIME;
4496

4497
    ret = glfs_fsetattr(glfd, &stat);
4498

4499
    return ret;
4500
}
4501

4502
int
4503
glfs_getxattr_process(void *value, size_t size, dict_t *xattr, const char *name)
4504
{
4505
    data_t *data = NULL;
4506
    int ret = -1;
4507

4508
    data = dict_get(xattr, (char *)name);
4509
    if (!data) {
4510
        errno = ENODATA;
4511
        ret = -1;
4512
        goto out;
4513
    }
4514

4515
    ret = data->len;
4516
    if (!value || !size)
4517
        goto out;
4518

4519
    if (size < ret) {
4520
        ret = -1;
4521
        errno = ERANGE;
4522
        goto out;
4523
    }
4524

4525
    memcpy(value, data->data, ret);
4526
out:
4527
    return ret;
4528
}
4529

4530
ssize_t
4531
glfs_getxattr_common(struct glfs *fs, const char *path, const char *name,
4532
                     void *value, size_t size, int follow)
4533
{
4534
    int ret = -1;
4535
    xlator_t *subvol = NULL;
4536
    loc_t loc = {
4537
        0,
4538
    };
4539
    struct iatt iatt = {
4540
        0,
4541
    };
4542
    dict_t *xattr = NULL;
4543
    int reval = 0;
4544

4545
    DECLARE_OLD_THIS;
4546
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4547

4548
    if (!name || *name == '\0') {
4549
        ret = -1;
4550
        errno = EINVAL;
4551
        goto out;
4552
    }
4553

4554
    if (strlen(name) > GF_XATTR_NAME_MAX) {
4555
        ret = -1;
4556
        errno = ENAMETOOLONG;
4557
        goto out;
4558
    }
4559

4560
    subvol = glfs_active_subvol(fs);
4561
    if (!subvol) {
4562
        ret = -1;
4563
        errno = EIO;
4564
        goto out;
4565
    }
4566

4567
retry:
4568
    if (follow)
4569
        ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4570
    else
4571
        ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
4572

4573
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4574

4575
    if (ret)
4576
        goto out;
4577

4578
    ret = syncop_getxattr(subvol, &loc, &xattr, name, NULL, NULL);
4579
    DECODE_SYNCOP_ERR(ret);
4580

4581
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4582

4583
    if (ret)
4584
        goto out;
4585

4586
    ret = glfs_getxattr_process(value, size, xattr, name);
4587
out:
4588
    loc_wipe(&loc);
4589

4590
    if (xattr)
4591
        dict_unref(xattr);
4592

4593
    glfs_subvol_done(fs, subvol);
4594

4595
    __GLFS_EXIT_FS;
4596

4597
invalid_fs:
4598
    return ret;
4599
}
4600

4601
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getxattr, 3.4.0)
4602
ssize_t
4603
pub_glfs_getxattr(struct glfs *fs, const char *path, const char *name,
4604
                  void *value, size_t size)
4605
{
4606
    return glfs_getxattr_common(fs, path, name, value, size, 1);
4607
}
4608

4609
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lgetxattr, 3.4.0)
4610
ssize_t
4611
pub_glfs_lgetxattr(struct glfs *fs, const char *path, const char *name,
4612
                   void *value, size_t size)
4613
{
4614
    return glfs_getxattr_common(fs, path, name, value, size, 0);
4615
}
4616

4617
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fgetxattr, 3.4.0)
4618
ssize_t
4619
pub_glfs_fgetxattr(struct glfs_fd *glfd, const char *name, void *value,
4620
                   size_t size)
4621
{
4622
    int ret = -1;
4623
    xlator_t *subvol = NULL;
4624
    dict_t *xattr = NULL;
4625
    fd_t *fd = NULL;
4626

4627
    DECLARE_OLD_THIS;
4628
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4629

4630
    GF_REF_GET(glfd);
4631

4632
    if (!name || *name == '\0') {
4633
        ret = -1;
4634
        errno = EINVAL;
4635
        goto out;
4636
    }
4637

4638
    if (strlen(name) > GF_XATTR_NAME_MAX) {
4639
        ret = -1;
4640
        errno = ENAMETOOLONG;
4641
        goto out;
4642
    }
4643

4644
    subvol = glfs_active_subvol(glfd->fs);
4645
    if (!subvol) {
4646
        ret = -1;
4647
        errno = EIO;
4648
        goto out;
4649
    }
4650

4651
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4652
    if (!fd) {
4653
        ret = -1;
4654
        errno = EBADFD;
4655
        goto out;
4656
    }
4657

4658
    ret = syncop_fgetxattr(subvol, fd, &xattr, name, NULL, NULL);
4659
    DECODE_SYNCOP_ERR(ret);
4660
    if (ret)
4661
        goto out;
4662

4663
    ret = glfs_getxattr_process(value, size, xattr, name);
4664
out:
4665
    if (fd)
4666
        fd_unref(fd);
4667
    if (glfd)
4668
        GF_REF_PUT(glfd);
4669
    if (xattr)
4670
        dict_unref(xattr);
4671

4672
    glfs_subvol_done(glfd->fs, subvol);
4673

4674
    __GLFS_EXIT_FS;
4675

4676
invalid_fs:
4677
    return ret;
4678
}
4679

4680
/* filter out xattrs that need not be visible on the
4681
 * client application.
4682
 */
4683
static int
4684
gfapi_filter_xattr(char *key)
4685
{
4686
    int need_filter = 0;
4687

4688
    /* If there are by chance any internal virtual xattrs (those starting with
4689
     * 'glusterfs.'), filter them */
4690
    if (strncmp("glusterfs.", key, SLEN("glusterfs.")) == 0)
4691
        need_filter = 1;
4692

4693
    return need_filter;
4694
}
4695

4696
int
4697
glfs_listxattr_process(void *value, size_t size, dict_t *xattr)
4698
{
4699
    int ret = -1;
4700

4701
    if (!xattr)
4702
        goto out;
4703

4704
    ret = dict_keys_join(NULL, 0, xattr, gfapi_filter_xattr);
4705

4706
    if (!value || !size)
4707
        goto out;
4708

4709
    if (size < ret) {
4710
        ret = -1;
4711
        errno = ERANGE;
4712
    } else {
4713
        dict_keys_join(value, size, xattr, gfapi_filter_xattr);
4714
    }
4715

4716
out:
4717
    return ret;
4718
}
4719

4720
ssize_t
4721
glfs_listxattr_common(struct glfs *fs, const char *path, void *value,
4722
                      size_t size, int follow)
4723
{
4724
    int ret = -1;
4725
    xlator_t *subvol = NULL;
4726
    loc_t loc = {
4727
        0,
4728
    };
4729
    struct iatt iatt = {
4730
        0,
4731
    };
4732
    dict_t *xattr = NULL;
4733
    int reval = 0;
4734

4735
    DECLARE_OLD_THIS;
4736
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4737

4738
    subvol = glfs_active_subvol(fs);
4739
    if (!subvol) {
4740
        ret = -1;
4741
        errno = EIO;
4742
        goto out;
4743
    }
4744

4745
retry:
4746
    if (follow)
4747
        ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4748
    else
4749
        ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
4750

4751
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4752

4753
    if (ret)
4754
        goto out;
4755

4756
    ret = syncop_getxattr(subvol, &loc, &xattr, NULL, NULL, NULL);
4757
    DECODE_SYNCOP_ERR(ret);
4758

4759
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4760

4761
    if (ret)
4762
        goto out;
4763

4764
    ret = glfs_listxattr_process(value, size, xattr);
4765
out:
4766
    loc_wipe(&loc);
4767

4768
    if (xattr)
4769
        dict_unref(xattr);
4770

4771
    glfs_subvol_done(fs, subvol);
4772

4773
    __GLFS_EXIT_FS;
4774

4775
invalid_fs:
4776
    return ret;
4777
}
4778

4779
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_listxattr, 3.4.0)
4780
ssize_t
4781
pub_glfs_listxattr(struct glfs *fs, const char *path, void *value, size_t size)
4782
{
4783
    return glfs_listxattr_common(fs, path, value, size, 1);
4784
}
4785

4786
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_llistxattr, 3.4.0)
4787
ssize_t
4788
pub_glfs_llistxattr(struct glfs *fs, const char *path, void *value, size_t size)
4789
{
4790
    return glfs_listxattr_common(fs, path, value, size, 0);
4791
}
4792

4793
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_flistxattr, 3.4.0)
4794
ssize_t
4795
pub_glfs_flistxattr(struct glfs_fd *glfd, void *value, size_t size)
4796
{
4797
    int ret = -1;
4798
    xlator_t *subvol = NULL;
4799
    dict_t *xattr = NULL;
4800
    fd_t *fd = NULL;
4801

4802
    DECLARE_OLD_THIS;
4803
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4804

4805
    GF_REF_GET(glfd);
4806

4807
    subvol = glfs_active_subvol(glfd->fs);
4808
    if (!subvol) {
4809
        ret = -1;
4810
        errno = EIO;
4811
        goto out;
4812
    }
4813

4814
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4815
    if (!fd) {
4816
        ret = -1;
4817
        errno = EBADFD;
4818
        goto out;
4819
    }
4820

4821
    ret = syncop_fgetxattr(subvol, fd, &xattr, NULL, NULL, NULL);
4822
    DECODE_SYNCOP_ERR(ret);
4823
    if (ret)
4824
        goto out;
4825

4826
    ret = glfs_listxattr_process(value, size, xattr);
4827
out:
4828
    if (fd)
4829
        fd_unref(fd);
4830
    if (glfd)
4831
        GF_REF_PUT(glfd);
4832
    if (xattr)
4833
        dict_unref(xattr);
4834

4835
    glfs_subvol_done(glfd->fs, subvol);
4836

4837
    __GLFS_EXIT_FS;
4838

4839
invalid_fs:
4840
    return ret;
4841
}
4842

4843
int
4844
glfs_setxattr_common(struct glfs *fs, const char *path, const char *name,
4845
                     const void *value, size_t size, int flags, int follow)
4846
{
4847
    int ret = -1;
4848
    xlator_t *subvol = NULL;
4849
    loc_t loc = {
4850
        0,
4851
    };
4852
    struct iatt iatt = {
4853
        0,
4854
    };
4855
    dict_t *xattr = NULL;
4856
    int reval = 0;
4857
    void *value_cp = NULL;
4858

4859
    DECLARE_OLD_THIS;
4860
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4861

4862
    if (!name || *name == '\0') {
4863
        ret = -1;
4864
        errno = EINVAL;
4865
        goto out;
4866
    }
4867

4868
    if (strlen(name) > GF_XATTR_NAME_MAX) {
4869
        ret = -1;
4870
        errno = ENAMETOOLONG;
4871
        goto out;
4872
    }
4873

4874
    subvol = glfs_active_subvol(fs);
4875
    if (!subvol) {
4876
        ret = -1;
4877
        errno = EIO;
4878
        goto out;
4879
    }
4880

4881
retry:
4882
    if (follow)
4883
        ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4884
    else
4885
        ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
4886

4887
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
4888

4889
    if (ret)
4890
        goto out;
4891

4892
    value_cp = gf_memdup(value, size);
4893
    GF_CHECK_ALLOC_AND_LOG(subvol->name, value_cp, ret,
4894
                           "Failed to"
4895
                           " duplicate setxattr value",
4896
                           out);
4897

4898
    xattr = dict_for_key_value(name, value_cp, size, _gf_false);
4899
    if (!xattr) {
4900
        GF_FREE(value_cp);
4901
        ret = -1;
4902
        errno = ENOMEM;
4903
        goto out;
4904
    }
4905

4906
    ret = syncop_setxattr(subvol, &loc, xattr, flags, NULL, NULL);
4907
    DECODE_SYNCOP_ERR(ret);
4908

4909
out:
4910
    loc_wipe(&loc);
4911
    if (xattr)
4912
        dict_unref(xattr);
4913

4914
    glfs_subvol_done(fs, subvol);
4915

4916
    __GLFS_EXIT_FS;
4917

4918
invalid_fs:
4919
    return ret;
4920
}
4921

4922
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setxattr, 3.4.0)
4923
int
4924
pub_glfs_setxattr(struct glfs *fs, const char *path, const char *name,
4925
                  const void *value, size_t size, int flags)
4926
{
4927
    return glfs_setxattr_common(fs, path, name, value, size, flags, 1);
4928
}
4929

4930
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lsetxattr, 3.4.0)
4931
int
4932
pub_glfs_lsetxattr(struct glfs *fs, const char *path, const char *name,
4933
                   const void *value, size_t size, int flags)
4934
{
4935
    return glfs_setxattr_common(fs, path, name, value, size, flags, 0);
4936
}
4937

4938
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetxattr, 3.4.0)
4939
int
4940
pub_glfs_fsetxattr(struct glfs_fd *glfd, const char *name, const void *value,
4941
                   size_t size, int flags)
4942
{
4943
    int ret = -1;
4944
    xlator_t *subvol = NULL;
4945
    dict_t *xattr = NULL;
4946
    fd_t *fd = NULL;
4947
    void *value_cp = NULL;
4948

4949
    DECLARE_OLD_THIS;
4950
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4951

4952
    GF_REF_GET(glfd);
4953

4954
    if (!name || *name == '\0') {
4955
        ret = -1;
4956
        errno = EINVAL;
4957
        goto out;
4958
    }
4959

4960
    if (strlen(name) > GF_XATTR_NAME_MAX) {
4961
        ret = -1;
4962
        errno = ENAMETOOLONG;
4963
        goto out;
4964
    }
4965

4966
    subvol = glfs_active_subvol(glfd->fs);
4967
    if (!subvol) {
4968
        ret = -1;
4969
        errno = EIO;
4970
        goto out;
4971
    }
4972

4973
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4974
    if (!fd) {
4975
        ret = -1;
4976
        errno = EBADFD;
4977
        goto out;
4978
    }
4979

4980
    value_cp = gf_memdup(value, size);
4981
    GF_CHECK_ALLOC_AND_LOG(subvol->name, value_cp, ret,
4982
                           "Failed to"
4983
                           " duplicate setxattr value",
4984
                           out);
4985

4986
    xattr = dict_for_key_value(name, value_cp, size, _gf_false);
4987
    if (!xattr) {
4988
        GF_FREE(value_cp);
4989
        ret = -1;
4990
        errno = ENOMEM;
4991
        goto out;
4992
    }
4993

4994
    ret = syncop_fsetxattr(subvol, fd, xattr, flags, NULL, NULL);
4995
    DECODE_SYNCOP_ERR(ret);
4996
out:
4997
    if (xattr)
4998
        dict_unref(xattr);
4999

5000
    if (fd)
5001
        fd_unref(fd);
5002
    if (glfd)
5003
        GF_REF_PUT(glfd);
5004

5005
    glfs_subvol_done(glfd->fs, subvol);
5006

5007
    __GLFS_EXIT_FS;
5008

5009
invalid_fs:
5010
    return ret;
5011
}
5012

5013
int
5014
glfs_removexattr_common(struct glfs *fs, const char *path, const char *name,
5015
                        int follow)
5016
{
5017
    int ret = -1;
5018
    xlator_t *subvol = NULL;
5019
    loc_t loc = {
5020
        0,
5021
    };
5022
    struct iatt iatt = {
5023
        0,
5024
    };
5025
    int reval = 0;
5026

5027
    DECLARE_OLD_THIS;
5028
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5029

5030
    subvol = glfs_active_subvol(fs);
5031
    if (!subvol) {
5032
        ret = -1;
5033
        errno = EIO;
5034
        goto out;
5035
    }
5036
retry:
5037
    if (follow)
5038
        ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
5039
    else
5040
        ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
5041

5042
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
5043

5044
    if (ret)
5045
        goto out;
5046

5047
    ret = syncop_removexattr(subvol, &loc, name, NULL, NULL);
5048
    DECODE_SYNCOP_ERR(ret);
5049

5050
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
5051

5052
out:
5053
    loc_wipe(&loc);
5054

5055
    glfs_subvol_done(fs, subvol);
5056

5057
    __GLFS_EXIT_FS;
5058

5059
invalid_fs:
5060
    return ret;
5061
}
5062

5063
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_removexattr, 3.4.0)
5064
int
5065
pub_glfs_removexattr(struct glfs *fs, const char *path, const char *name)
5066
{
5067
    return glfs_removexattr_common(fs, path, name, 1);
5068
}
5069

5070
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lremovexattr, 3.4.0)
5071
int
5072
pub_glfs_lremovexattr(struct glfs *fs, const char *path, const char *name)
5073
{
5074
    return glfs_removexattr_common(fs, path, name, 0);
5075
}
5076

5077
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fremovexattr, 3.4.0)
5078
int
5079
pub_glfs_fremovexattr(struct glfs_fd *glfd, const char *name)
5080
{
5081
    int ret = -1;
5082
    xlator_t *subvol = NULL;
5083
    fd_t *fd = NULL;
5084

5085
    DECLARE_OLD_THIS;
5086
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5087

5088
    GF_REF_GET(glfd);
5089

5090
    subvol = glfs_active_subvol(glfd->fs);
5091
    if (!subvol) {
5092
        ret = -1;
5093
        errno = EIO;
5094
        goto out;
5095
    }
5096

5097
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5098
    if (!fd) {
5099
        ret = -1;
5100
        errno = EBADFD;
5101
        goto out;
5102
    }
5103

5104
    ret = syncop_fremovexattr(subvol, fd, name, NULL, NULL);
5105
    DECODE_SYNCOP_ERR(ret);
5106
out:
5107
    if (fd)
5108
        fd_unref(fd);
5109
    if (glfd)
5110
        GF_REF_PUT(glfd);
5111

5112
    glfs_subvol_done(glfd->fs, subvol);
5113

5114
    __GLFS_EXIT_FS;
5115

5116
invalid_fs:
5117
    return ret;
5118
}
5119

5120
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fallocate, 3.5.0)
5121
int
5122
pub_glfs_fallocate(struct glfs_fd *glfd, int keep_size, off_t offset,
5123
                   size_t len)
5124
{
5125
    int ret = -1;
5126
    xlator_t *subvol = NULL;
5127
    fd_t *fd = NULL;
5128
    dict_t *fop_attr = NULL;
5129

5130
    DECLARE_OLD_THIS;
5131
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5132

5133
    GF_REF_GET(glfd);
5134

5135
    subvol = glfs_active_subvol(glfd->fs);
5136
    if (!subvol) {
5137
        ret = -1;
5138
        errno = EIO;
5139
        goto out;
5140
    }
5141

5142
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5143
    if (!fd) {
5144
        ret = -1;
5145
        errno = EBADFD;
5146
        goto out;
5147
    }
5148

5149
    ret = get_fop_attr_thrd_key(&fop_attr);
5150
    if (ret)
5151
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5152

5153
    ret = syncop_fallocate(subvol, fd, keep_size, offset, len, fop_attr, NULL);
5154
    DECODE_SYNCOP_ERR(ret);
5155
out:
5156
    if (fd)
5157
        fd_unref(fd);
5158
    if (glfd)
5159
        GF_REF_PUT(glfd);
5160
    if (fop_attr)
5161
        dict_unref(fop_attr);
5162

5163
    glfs_subvol_done(glfd->fs, subvol);
5164

5165
    __GLFS_EXIT_FS;
5166

5167
invalid_fs:
5168
    return ret;
5169
}
5170

5171
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard, 3.5.0)
5172
int
5173
pub_glfs_discard(struct glfs_fd *glfd, off_t offset, size_t len)
5174
{
5175
    int ret = -1;
5176
    xlator_t *subvol = NULL;
5177
    fd_t *fd = NULL;
5178
    dict_t *fop_attr = NULL;
5179

5180
    DECLARE_OLD_THIS;
5181
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5182

5183
    GF_REF_GET(glfd);
5184

5185
    subvol = glfs_active_subvol(glfd->fs);
5186
    if (!subvol) {
5187
        ret = -1;
5188
        errno = EIO;
5189
        goto out;
5190
    }
5191

5192
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5193
    if (!fd) {
5194
        ret = -1;
5195
        errno = EBADFD;
5196
        goto out;
5197
    }
5198

5199
    ret = get_fop_attr_thrd_key(&fop_attr);
5200
    if (ret)
5201
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5202

5203
    ret = syncop_discard(subvol, fd, offset, len, fop_attr, NULL);
5204
    DECODE_SYNCOP_ERR(ret);
5205
out:
5206
    if (fd)
5207
        fd_unref(fd);
5208
    if (glfd)
5209
        GF_REF_PUT(glfd);
5210
    if (fop_attr)
5211
        dict_unref(fop_attr);
5212

5213
    glfs_subvol_done(glfd->fs, subvol);
5214

5215
    __GLFS_EXIT_FS;
5216

5217
invalid_fs:
5218
    return ret;
5219
}
5220

5221
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill, 3.5.0)
5222
int
5223
pub_glfs_zerofill(struct glfs_fd *glfd, off_t offset, off_t len)
5224
{
5225
    int ret = -1;
5226
    xlator_t *subvol = NULL;
5227
    fd_t *fd = NULL;
5228
    dict_t *fop_attr = NULL;
5229

5230
    DECLARE_OLD_THIS;
5231
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5232

5233
    GF_REF_GET(glfd);
5234

5235
    subvol = glfs_active_subvol(glfd->fs);
5236
    if (!subvol) {
5237
        errno = EIO;
5238
        goto out;
5239
    }
5240

5241
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5242
    if (!fd) {
5243
        errno = EBADFD;
5244
        goto out;
5245
    }
5246

5247
    ret = get_fop_attr_thrd_key(&fop_attr);
5248
    if (ret)
5249
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5250

5251
    ret = syncop_zerofill(subvol, fd, offset, len, fop_attr, NULL);
5252
    DECODE_SYNCOP_ERR(ret);
5253
out:
5254
    if (fd)
5255
        fd_unref(fd);
5256
    if (glfd)
5257
        GF_REF_PUT(glfd);
5258
    if (fop_attr)
5259
        dict_unref(fop_attr);
5260

5261
    glfs_subvol_done(glfd->fs, subvol);
5262

5263
    __GLFS_EXIT_FS;
5264

5265
invalid_fs:
5266
    return ret;
5267
}
5268

5269
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chdir, 3.4.0)
5270
int
5271
pub_glfs_chdir(struct glfs *fs, const char *path)
5272
{
5273
    int ret = -1;
5274
    xlator_t *subvol = NULL;
5275
    loc_t loc = {
5276
        0,
5277
    };
5278
    struct iatt iatt = {
5279
        0,
5280
    };
5281
    int reval = 0;
5282

5283
    DECLARE_OLD_THIS;
5284
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5285

5286
    subvol = glfs_active_subvol(fs);
5287
    if (!subvol) {
5288
        ret = -1;
5289
        errno = EIO;
5290
        goto out;
5291
    }
5292
retry:
5293
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
5294

5295
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
5296

5297
    if (ret)
5298
        goto out;
5299

5300
    if (!IA_ISDIR(iatt.ia_type)) {
5301
        ret = -1;
5302
        errno = ENOTDIR;
5303
        goto out;
5304
    }
5305

5306
    glfs_cwd_set(fs, loc.inode);
5307

5308
out:
5309
    loc_wipe(&loc);
5310

5311
    glfs_subvol_done(fs, subvol);
5312

5313
    __GLFS_EXIT_FS;
5314

5315
invalid_fs:
5316
    return ret;
5317
}
5318

5319
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchdir, 3.4.0)
5320
int
5321
pub_glfs_fchdir(struct glfs_fd *glfd)
5322
{
5323
    int ret = -1;
5324
    inode_t *inode = NULL;
5325
    xlator_t *subvol = NULL;
5326
    fd_t *fd = NULL;
5327

5328
    DECLARE_OLD_THIS;
5329
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5330

5331
    GF_REF_GET(glfd);
5332

5333
    subvol = glfs_active_subvol(glfd->fs);
5334
    if (!subvol) {
5335
        ret = -1;
5336
        errno = EIO;
5337
        goto out;
5338
    }
5339

5340
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5341
    if (!fd) {
5342
        ret = -1;
5343
        errno = EBADFD;
5344
        goto out;
5345
    }
5346

5347
    inode = fd->inode;
5348

5349
    if (!IA_ISDIR(inode->ia_type)) {
5350
        ret = -1;
5351
        errno = ENOTDIR;
5352
        goto out;
5353
    }
5354

5355
    glfs_cwd_set(glfd->fs, inode);
5356
    ret = 0;
5357
out:
5358
    if (fd)
5359
        fd_unref(fd);
5360
    if (glfd)
5361
        GF_REF_PUT(glfd);
5362

5363
    glfs_subvol_done(glfd->fs, subvol);
5364

5365
    __GLFS_EXIT_FS;
5366

5367
invalid_fs:
5368
    return ret;
5369
}
5370

5371
static gf_boolean_t warn_realpath = _gf_true; /* log once */
5372

5373
static char *
5374
glfs_realpath_common(struct glfs *fs, const char *path, char *resolved_path,
5375
                     gf_boolean_t warn_deprecated)
5376
{
5377
    int ret = -1;
5378
    char *retpath = NULL;
5379
    char *allocpath = NULL;
5380
    xlator_t *subvol = NULL;
5381
    loc_t loc = {
5382
        0,
5383
    };
5384
    struct iatt iatt = {
5385
        0,
5386
    };
5387
    int reval = 0;
5388

5389
    DECLARE_OLD_THIS;
5390
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5391

5392
    if (resolved_path)
5393
        retpath = resolved_path;
5394
    else if (warn_deprecated) {
5395
        retpath = allocpath = malloc(PATH_MAX + 1);
5396
        if (warn_realpath) {
5397
            warn_realpath = _gf_false;
5398
            gf_log(THIS->name, GF_LOG_WARNING,
5399
                   "this application "
5400
                   "is compiled against an old version of "
5401
                   "libgfapi, it should use glfs_free() to "
5402
                   "release the path returned by "
5403
                   "glfs_realpath()");
5404
        }
5405
    } else {
5406
        retpath = allocpath = GLFS_CALLOC(1, PATH_MAX + 1, NULL,
5407
                                          glfs_mt_realpath_t);
5408
    }
5409

5410
    if (!retpath) {
5411
        ret = -1;
5412
        errno = ENOMEM;
5413
        goto out;
5414
    }
5415

5416
    subvol = glfs_active_subvol(fs);
5417
    if (!subvol) {
5418
        ret = -1;
5419
        errno = EIO;
5420
        goto out;
5421
    }
5422
retry:
5423
    ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
5424

5425
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
5426

5427
    if (ret)
5428
        goto out;
5429

5430
    if (loc.path) {
5431
        snprintf(retpath, PATH_MAX + 1, "%s", loc.path);
5432
    }
5433

5434
out:
5435
    loc_wipe(&loc);
5436

5437
    if (ret == -1) {
5438
        if (warn_deprecated && allocpath)
5439
            free(allocpath);
5440
        else if (allocpath)
5441
            GLFS_FREE(allocpath);
5442
        retpath = NULL;
5443
    }
5444

5445
    glfs_subvol_done(fs, subvol);
5446

5447
    __GLFS_EXIT_FS;
5448

5449
invalid_fs:
5450
    return retpath;
5451
}
5452

5453
GFAPI_SYMVER_PUBLIC(glfs_realpath34, glfs_realpath, 3.4.0)
5454
char *
5455
pub_glfs_realpath34(struct glfs *fs, const char *path, char *resolved_path)
5456
{
5457
    return glfs_realpath_common(fs, path, resolved_path, _gf_true);
5458
}
5459

5460
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_realpath, 3.7.17)
5461
char *
5462
pub_glfs_realpath(struct glfs *fs, const char *path, char *resolved_path)
5463
{
5464
    return glfs_realpath_common(fs, path, resolved_path, _gf_false);
5465
}
5466

5467
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getcwd, 3.4.0)
5468
char *
5469
pub_glfs_getcwd(struct glfs *fs, char *buf, size_t n)
5470
{
5471
    int ret = -1;
5472
    inode_t *inode = NULL;
5473
    char *path = NULL;
5474

5475
    DECLARE_OLD_THIS;
5476
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5477

5478
    if (!buf || n < 2) {
5479
        ret = -1;
5480
        errno = EINVAL;
5481
        goto out;
5482
    }
5483

5484
    inode = glfs_cwd_get(fs);
5485

5486
    if (!inode) {
5487
        strncpy(buf, "/", n);
5488
        ret = 0;
5489
        goto out;
5490
    }
5491

5492
    ret = inode_path(inode, 0, &path);
5493
    if (n <= ret) {
5494
        ret = -1;
5495
        errno = ERANGE;
5496
        goto out;
5497
    }
5498

5499
    strncpy(buf, path, n);
5500
    ret = 0;
5501
out:
5502
    GF_FREE(path);
5503

5504
    if (inode)
5505
        inode_unref(inode);
5506

5507
    __GLFS_EXIT_FS;
5508

5509
invalid_fs:
5510
    if (ret < 0)
5511
        return NULL;
5512

5513
    return buf;
5514
}
5515

5516
static void
5517
gf_flock_to_flock(struct gf_flock *gf_flock, struct flock *flock)
5518
{
5519
    flock->l_type = gf_flock->l_type;
5520
    flock->l_whence = gf_flock->l_whence;
5521
    flock->l_start = gf_flock->l_start;
5522
    flock->l_len = gf_flock->l_len;
5523
    flock->l_pid = gf_flock->l_pid;
5524
}
5525

5526
static void
5527
gf_flock_from_flock(struct gf_flock *gf_flock, struct flock *flock)
5528
{
5529
    gf_flock->l_type = flock->l_type;
5530
    gf_flock->l_whence = flock->l_whence;
5531
    gf_flock->l_start = flock->l_start;
5532
    gf_flock->l_len = flock->l_len;
5533
    gf_flock->l_pid = flock->l_pid;
5534
}
5535

5536
static int
5537
glfs_lock_common(struct glfs_fd *glfd, int cmd, struct flock *flock,
5538
                 dict_t *xdata)
5539
{
5540
    int ret = -1;
5541
    xlator_t *subvol = NULL;
5542
    struct gf_flock gf_flock = {
5543
        0,
5544
    };
5545
    struct gf_flock saved_flock = {
5546
        0,
5547
    };
5548
    fd_t *fd = NULL;
5549

5550
    DECLARE_OLD_THIS;
5551
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5552

5553
    if (!flock) {
5554
        errno = EINVAL;
5555
        goto out;
5556
    }
5557

5558
    GF_REF_GET(glfd);
5559
    subvol = glfs_active_subvol(glfd->fs);
5560
    if (!subvol) {
5561
        ret = -1;
5562
        errno = EIO;
5563
        goto out;
5564
    }
5565

5566
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5567
    if (!fd) {
5568
        ret = -1;
5569
        errno = EBADFD;
5570
        goto out;
5571
    }
5572

5573
    /* Generate glusterfs flock structure from client flock
5574
     * structure to be processed by server */
5575
    gf_flock_from_flock(&gf_flock, flock);
5576

5577
    /* Keep another copy of flock for split/merge of locks
5578
     * at client side */
5579
    gf_flock_from_flock(&saved_flock, flock);
5580

5581
    if (glfd->lk_owner.len != 0) {
5582
        ret = syncopctx_setfslkowner(&glfd->lk_owner);
5583

5584
        if (ret)
5585
            goto out;
5586
    }
5587

5588
    ret = get_fop_attr_thrd_key(&xdata);
5589
    if (ret)
5590
        gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5591

5592
    ret = syncop_lk(subvol, fd, cmd, &gf_flock, xdata, NULL);
5593
    DECODE_SYNCOP_ERR(ret);
5594

5595
    /* Convert back from gf_flock to flock as expected by application */
5596
    gf_flock_to_flock(&gf_flock, flock);
5597

5598
    if (ret == 0 && (cmd == F_SETLK || cmd == F_SETLKW)) {
5599
        ret = fd_lk_insert_and_merge(fd, cmd, &saved_flock);
5600
        if (ret) {
5601
            gf_smsg(THIS->name, GF_LOG_ERROR, 0,
5602
                    API_MSG_LOCK_INSERT_MERGE_FAILED, "gfid=%s",
5603
                    uuid_utoa(fd->inode->gfid), NULL);
5604
            ret = 0;
5605
        }
5606
    }
5607

5608
out:
5609
    if (fd)
5610
        fd_unref(fd);
5611
    if (glfd)
5612
        GF_REF_PUT(glfd);
5613

5614
    glfs_subvol_done(glfd->fs, subvol);
5615

5616
    __GLFS_EXIT_FS;
5617

5618
invalid_fs:
5619
    return ret;
5620
}
5621

5622
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_file_lock, 4.0.0)
5623
int
5624
pub_glfs_file_lock(struct glfs_fd *glfd, int cmd, struct flock *flock,
5625
                   glfs_lock_mode_t lk_mode)
5626
{
5627
    int ret = -1;
5628
    dict_t *xdata_in = NULL;
5629

5630
    if (lk_mode == GLFS_LK_MANDATORY) {
5631
        /* Create a new dictionary */
5632
        xdata_in = dict_new();
5633
        if (xdata_in == NULL) {
5634
            ret = -1;
5635
            errno = ENOMEM;
5636
            goto out;
5637
        }
5638

5639
        /* Set GF_LK_MANDATORY internally within dictionary to map
5640
         * GLFS_LK_MANDATORY */
5641
        ret = dict_set_uint32(xdata_in, GF_LOCK_MODE, GF_LK_MANDATORY);
5642
        if (ret) {
5643
            gf_smsg(THIS->name, GF_LOG_ERROR, 0,
5644
                    API_MSG_SETTING_LOCK_TYPE_FAILED, NULL);
5645
            ret = -1;
5646
            errno = ENOMEM;
5647
            goto out;
5648
        }
5649
    }
5650

5651
    ret = glfs_lock_common(glfd, cmd, flock, xdata_in);
5652
out:
5653
    if (xdata_in)
5654
        dict_unref(xdata_in);
5655

5656
    return ret;
5657
}
5658

5659
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_posix_lock, 3.4.0)
5660
int
5661
pub_glfs_posix_lock(struct glfs_fd *glfd, int cmd, struct flock *flock)
5662
{
5663
    return glfs_lock_common(glfd, cmd, flock, NULL);
5664
}
5665

5666
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fd_set_lkowner, 3.10.7)
5667
int
5668
pub_glfs_fd_set_lkowner(struct glfs_fd *glfd, void *data, int len)
5669
{
5670
    int ret = -1;
5671

5672
    DECLARE_OLD_THIS;
5673
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5674

5675
    if (!GF_REF_GET(glfd)) {
5676
        goto invalid_fs;
5677
    }
5678

5679
    GF_VALIDATE_OR_GOTO(THIS->name, data, out);
5680

5681
    if ((len <= 0) || (len > GFAPI_MAX_LOCK_OWNER_LEN)) {
5682
        errno = EINVAL;
5683
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
5684
                "lk_owner len=%d", len, NULL);
5685
        goto out;
5686
    }
5687

5688
    glfd->lk_owner.len = len;
5689

5690
    memcpy(glfd->lk_owner.data, data, len);
5691

5692
    ret = 0;
5693
out:
5694
    if (glfd)
5695
        GF_REF_PUT(glfd);
5696

5697
    __GLFS_EXIT_FS;
5698

5699
invalid_fs:
5700
    return ret;
5701
}
5702

5703
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_dup, 3.4.0)
5704
struct glfs_fd *
5705
pub_glfs_dup(struct glfs_fd *glfd)
5706
{
5707
    xlator_t *subvol = NULL;
5708
    fd_t *fd = NULL;
5709
    struct glfs_fd *dupfd = NULL;
5710
    struct glfs *fs = NULL;
5711

5712
    DECLARE_OLD_THIS;
5713
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5714

5715
    GF_REF_GET(glfd);
5716

5717
    fs = glfd->fs;
5718
    subvol = glfs_active_subvol(fs);
5719
    if (!subvol) {
5720
        errno = EIO;
5721
        goto out;
5722
    }
5723

5724
    fd = glfs_resolve_fd(fs, subvol, glfd);
5725
    if (!fd) {
5726
        errno = EBADFD;
5727
        goto out;
5728
    }
5729

5730
    dupfd = glfs_fd_new(fs);
5731
    if (!dupfd) {
5732
        errno = ENOMEM;
5733
        goto out;
5734
    }
5735

5736
    dupfd->fd = fd_ref(fd);
5737
    dupfd->state = glfd->state;
5738
out:
5739
    if (fd)
5740
        fd_unref(fd);
5741
    if (dupfd)
5742
        glfs_fd_bind(dupfd);
5743
    if (glfd)
5744
        GF_REF_PUT(glfd);
5745

5746
    glfs_subvol_done(fs, subvol);
5747

5748
    __GLFS_EXIT_FS;
5749

5750
invalid_fs:
5751
    return dupfd;
5752
}
5753

5754
static void
5755
glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
5756
{
5757
    int ret = -1;
5758
    upcall_entry *u_list = NULL;
5759

5760
    if (!fs || !upcall_data)
5761
        goto out;
5762

5763
    u_list = GF_CALLOC(1, sizeof(*u_list), glfs_mt_upcall_entry_t);
5764

5765
    if (!u_list) {
5766
        gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
5767
                NULL);
5768
        goto out;
5769
    }
5770

5771
    INIT_LIST_HEAD(&u_list->upcall_list);
5772

5773
    gf_uuid_copy(u_list->upcall_data.gfid, upcall_data->gfid);
5774
    u_list->upcall_data.event_type = upcall_data->event_type;
5775

5776
    switch (upcall_data->event_type) {
5777
        case GF_UPCALL_CACHE_INVALIDATION:
5778
            ret = glfs_get_upcall_cache_invalidation(&u_list->upcall_data,
5779
                                                     upcall_data);
5780
            break;
5781
        case GF_UPCALL_RECALL_LEASE:
5782
            ret = glfs_get_upcall_lease(&u_list->upcall_data, upcall_data);
5783
            break;
5784
        default:
5785
            break;
5786
    }
5787

5788
    if (ret) {
5789
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
5790
        goto out;
5791
    }
5792

5793
    pthread_mutex_lock(&fs->upcall_list_mutex);
5794
    {
5795
        list_add_tail(&u_list->upcall_list, &fs->upcall_list);
5796
    }
5797
    pthread_mutex_unlock(&fs->upcall_list_mutex);
5798

5799
    ret = 0;
5800

5801
out:
5802
    if (ret && u_list) {
5803
        GF_FREE(u_list->upcall_data.data);
5804
        GF_FREE(u_list);
5805
    }
5806
}
5807

5808
static void
5809
glfs_free_upcall_lease(void *to_free)
5810
{
5811
    struct glfs_upcall_lease *arg = to_free;
5812

5813
    if (!arg)
5814
        return;
5815

5816
    if (arg->object)
5817
        glfs_h_close(arg->object);
5818

5819
    GF_FREE(arg);
5820
}
5821

5822
int
5823
glfs_recall_lease_fd(struct glfs *fs, struct gf_upcall *up_data)
5824
{
5825
    struct gf_upcall_recall_lease *recall_lease = NULL;
5826
    xlator_t *subvol = NULL;
5827
    int ret = 0;
5828
    inode_t *inode = NULL;
5829
    struct glfs_fd *glfd = NULL;
5830
    struct glfs_fd *tmp = NULL;
5831
    struct list_head glfd_list;
5832
    fd_t *fd = NULL;
5833
    struct glfs_lease lease = {
5834
        0,
5835
    };
5836

5837
    GF_VALIDATE_OR_GOTO("gfapi", up_data, out);
5838
    GF_VALIDATE_OR_GOTO("gfapi", fs, out);
5839

5840
    recall_lease = up_data->data;
5841
    GF_VALIDATE_OR_GOTO("gfapi", recall_lease, out);
5842

5843
    INIT_LIST_HEAD(&glfd_list);
5844

5845
    subvol = glfs_active_subvol(fs);
5846
    if (!subvol) {
5847
        ret = -1;
5848
        errno = EIO;
5849
        goto out;
5850
    }
5851

5852
    gf_msg_debug(THIS->name, 0, "Recall lease received for gfid:%s",
5853
                 uuid_utoa(up_data->gfid));
5854

5855
    inode = inode_find(subvol->itable, up_data->gfid);
5856
    if (!inode) {
5857
        ret = -1;
5858
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INODE_FIND_FAILED,
5859
                "gfid=%s", uuid_utoa(up_data->gfid), "graph_id=%d",
5860
                subvol->graph->id, NULL);
5861
        goto out;
5862
    }
5863

5864
    LOCK(&inode->lock);
5865
    {
5866
        list_for_each_entry(fd, &inode->fd_list, inode_list)
5867
        {
5868
            glfd = fd_ctx_get_ptr(fd, subvol);
5869
            if (glfd) {
5870
                gf_msg_trace(THIS->name, 0, "glfd (%p) has held lease", glfd);
5871
                GF_REF_GET(glfd);
5872
                list_add_tail(&glfd->list, &glfd_list);
5873
            }
5874
        }
5875
    }
5876
    UNLOCK(&inode->lock);
5877

5878
    if (!list_empty(&glfd_list)) {
5879
        list_for_each_entry_safe(glfd, tmp, &glfd_list, list)
5880
        {
5881
            LOCK(&glfd->lock);
5882
            {
5883
                if (glfd->state != GLFD_CLOSE) {
5884
                    gf_msg_trace(THIS->name, 0,
5885
                                 "glfd (%p) has held lease, "
5886
                                 "calling recall cbk",
5887
                                 glfd);
5888
                    glfd->cbk(lease, glfd->cookie);
5889
                }
5890
            }
5891
            UNLOCK(&glfd->lock);
5892

5893
            list_del_init(&glfd->list);
5894
            GF_REF_PUT(glfd);
5895
        }
5896
    }
5897

5898
out:
5899
    return ret;
5900
}
5901

5902
static int
5903
glfs_recall_lease_upcall(struct glfs *fs, struct glfs_upcall *up_arg,
5904
                         struct gf_upcall *up_data)
5905
{
5906
    struct gf_upcall_recall_lease *recall_lease = NULL;
5907
    struct glfs_object *object = NULL;
5908
    xlator_t *subvol = NULL;
5909
    int ret = -1;
5910
    struct glfs_upcall_lease *up_lease_arg = NULL;
5911

5912
    GF_VALIDATE_OR_GOTO("gfapi", up_data, out);
5913
    GF_VALIDATE_OR_GOTO("gfapi", fs, out);
5914

5915
    recall_lease = up_data->data;
5916
    GF_VALIDATE_OR_GOTO("gfapi", recall_lease, out);
5917

5918
    subvol = glfs_active_subvol(fs);
5919
    if (!subvol) {
5920
        errno = EIO;
5921
        goto out;
5922
    }
5923

5924
    gf_msg_debug(THIS->name, 0, "Recall lease received for gfid:%s",
5925
                 uuid_utoa(up_data->gfid));
5926

5927
    object = glfs_h_find_handle(fs, up_data->gfid, GFAPI_HANDLE_LENGTH);
5928
    if (!object) {
5929
        /* The reason handle creation will fail is because we
5930
         * couldn't find the inode in the gfapi inode table.
5931
         *
5932
         * But since application would have taken inode_ref, the
5933
         * only case when this can happen is when it has closed
5934
         * the handle and hence will no more be interested in
5935
         * the upcall for this particular gfid.
5936
         */
5937
        gf_smsg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
5938
                "gfid=%s", uuid_utoa(up_data->gfid), NULL);
5939
        errno = ESTALE;
5940
        goto out;
5941
    }
5942

5943
    up_lease_arg = GF_MALLOC(sizeof(struct glfs_upcall_lease),
5944
                             glfs_mt_upcall_inode_t);
5945
    if (!up_lease_arg) {
5946
        errno = ENOMEM;
5947
        goto out;
5948
    }
5949
    up_lease_arg->object = object;
5950
    up_lease_arg->lease_type = recall_lease->lease_type;
5951

5952
    up_arg->reason = GLFS_UPCALL_RECALL_LEASE;
5953
    up_arg->event = up_lease_arg;
5954
    up_arg->free_event = glfs_free_upcall_lease;
5955

5956
    ret = 0;
5957

5958
out:
5959
    if (ret) {
5960
        /* Close p_object and oldp_object as well if being referenced.*/
5961
        if (object)
5962
            glfs_h_close(object);
5963

5964
        /* Set reason to prevent applications from using ->event */
5965
        up_arg->reason = GF_UPCALL_EVENT_NULL;
5966
    }
5967
    return ret;
5968
}
5969

5970
static int
5971
upcall_syncop_args_free(struct upcall_syncop_args *args)
5972
{
5973
    dict_t *dict = NULL;
5974
    struct gf_upcall *upcall_data = NULL;
5975

5976
    if (args) {
5977
        upcall_data = &args->upcall_data;
5978
        switch (upcall_data->event_type) {
5979
            case GF_UPCALL_CACHE_INVALIDATION:
5980
                dict = ((struct gf_upcall_cache_invalidation *)(upcall_data
5981
                                                                    ->data))
5982
                           ->dict;
5983
                break;
5984
            case GF_UPCALL_RECALL_LEASE:
5985
                dict = ((struct gf_upcall_recall_lease *)(upcall_data->data))
5986
                           ->dict;
5987
                break;
5988
        }
5989
        if (dict)
5990
            dict_unref(dict);
5991

5992
        GF_FREE(upcall_data->client_uid);
5993
        GF_FREE(upcall_data->data);
5994
    }
5995
    GF_FREE(args);
5996
    return 0;
5997
}
5998

5999
static int
6000
glfs_upcall_syncop_cbk(int ret, call_frame_t *frame, void *opaque)
6001
{
6002
    struct upcall_syncop_args *args = opaque;
6003

6004
    (void)upcall_syncop_args_free(args);
6005

6006
    return 0;
6007
}
6008

6009
static int
6010
glfs_cbk_upcall_syncop(void *opaque)
6011
{
6012
    struct upcall_syncop_args *args = opaque;
6013
    struct gf_upcall *upcall_data = NULL;
6014
    struct glfs_upcall *up_arg = NULL;
6015
    struct glfs *fs;
6016
    int ret = -1;
6017

6018
    fs = args->fs;
6019
    upcall_data = &args->upcall_data;
6020

6021
    if (!upcall_data) {
6022
        goto out;
6023
    }
6024

6025
    up_arg = GLFS_CALLOC(1, sizeof(struct gf_upcall), glfs_release_upcall,
6026
                         glfs_mt_upcall_entry_t);
6027
    if (!up_arg) {
6028
        gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
6029
                NULL);
6030
        goto out;
6031
    }
6032

6033
    switch (upcall_data->event_type) {
6034
        case GF_UPCALL_CACHE_INVALIDATION:
6035
            ret = glfs_h_poll_cache_invalidation(fs, up_arg, upcall_data);
6036
            break;
6037
        case GF_UPCALL_RECALL_LEASE:
6038
            ret = glfs_recall_lease_upcall(fs, up_arg, upcall_data);
6039
            break;
6040
        default:
6041
            errno = EINVAL;
6042
    }
6043

6044
    /* It could so happen that the file which got
6045
     * upcall notification may have got deleted by
6046
     * the same client. In such cases up_arg->reason
6047
     * is set to GLFS_UPCALL_EVENT_NULL. No need to
6048
     * send upcall then
6049
     */
6050
    if (up_arg->reason == GLFS_UPCALL_EVENT_NULL) {
6051
        gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
6052
                API_MSG_UPCALL_EVENT_NULL_RECEIVED, NULL);
6053
        ret = 0;
6054
        GLFS_FREE(up_arg);
6055
        goto out;
6056
    } else if (ret) {
6057
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
6058
        GLFS_FREE(up_arg);
6059
        goto out;
6060
    }
6061

6062
    if (fs->up_cbk && up_arg)
6063
        (fs->up_cbk)(up_arg, fs->up_data);
6064

6065
    /* application takes care of calling glfs_free on up_arg post
6066
     * their processing */
6067

6068
out:
6069
    return ret;
6070
}
6071

6072
static struct gf_upcall_cache_invalidation *
6073
gf_copy_cache_invalidation(struct gf_upcall_cache_invalidation *src)
6074
{
6075
    struct gf_upcall_cache_invalidation *dst = NULL;
6076

6077
    if (!src)
6078
        goto out;
6079

6080
    dst = GF_MALLOC(sizeof(struct gf_upcall_cache_invalidation),
6081
                    glfs_mt_upcall_entry_t);
6082

6083
    if (!dst) {
6084
        gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
6085
                NULL);
6086
        goto out;
6087
    }
6088

6089
    dst->flags = src->flags;
6090
    dst->expire_time_attr = src->expire_time_attr;
6091
    dst->stat = src->stat;
6092
    dst->p_stat = src->p_stat;
6093
    dst->oldp_stat = src->oldp_stat;
6094
    dst->dict = NULL;
6095
    if (src->dict)
6096
        dst->dict = dict_copy_with_ref(src->dict, NULL);
6097

6098
    return dst;
6099
out:
6100
    return NULL;
6101
}
6102

6103
static struct gf_upcall_recall_lease *
6104
gf_copy_recall_lease(struct gf_upcall_recall_lease *src)
6105
{
6106
    struct gf_upcall_recall_lease *dst = NULL;
6107

6108
    if (!src)
6109
        goto out;
6110

6111
    dst = GF_MALLOC(sizeof(struct gf_upcall_recall_lease),
6112
                    glfs_mt_upcall_entry_t);
6113

6114
    if (!dst) {
6115
        gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
6116
                NULL);
6117
        goto out;
6118
    }
6119

6120
    dst->lease_type = src->lease_type;
6121
    memcpy(dst->tid, src->tid, 16);
6122
    dst->dict = NULL;
6123
    if (src->dict)
6124
        dst->dict = dict_copy_with_ref(src->dict, NULL);
6125

6126
    return dst;
6127
out:
6128
    return NULL;
6129
}
6130

6131
static struct upcall_syncop_args *
6132
upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data)
6133
{
6134
    struct upcall_syncop_args *args = NULL;
6135
    int ret = -1;
6136
    struct gf_upcall *t_data = NULL;
6137

6138
    if (!fs || !upcall_data)
6139
        goto out;
6140

6141
    args = GF_CALLOC(1, sizeof(struct upcall_syncop_args),
6142
                     glfs_mt_upcall_entry_t);
6143
    if (!args) {
6144
        gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
6145
                "syncop args", NULL);
6146
        goto out;
6147
    }
6148

6149
    /* Note: we are not taking any ref on fs here.
6150
     * Ideally applications have to unregister for upcall events
6151
     * or stop polling for upcall events before performing
6152
     * glfs_fini. And as for outstanding synctasks created, we wait
6153
     * for all syncenv threads to finish tasks before cleaning up the
6154
     * fs->ctx. Hence it seems safe to process these callback
6155
     * notification without taking any lock/ref.
6156
     */
6157
    args->fs = fs;
6158
    t_data = &(args->upcall_data);
6159
    t_data->client_uid = gf_strdup(upcall_data->client_uid);
6160

6161
    gf_uuid_copy(t_data->gfid, upcall_data->gfid);
6162
    t_data->event_type = upcall_data->event_type;
6163

6164
    switch (t_data->event_type) {
6165
        case GF_UPCALL_CACHE_INVALIDATION:
6166
            t_data->data = gf_copy_cache_invalidation(
6167
                (struct gf_upcall_cache_invalidation *)upcall_data->data);
6168
            break;
6169
        case GF_UPCALL_RECALL_LEASE:
6170
            t_data->data = gf_copy_recall_lease(
6171
                (struct gf_upcall_recall_lease *)upcall_data->data);
6172
            break;
6173
    }
6174

6175
    if (!t_data->data)
6176
        goto out;
6177

6178
    return args;
6179
out:
6180
    if (ret) {
6181
        if (args) {
6182
            GF_FREE(args->upcall_data.client_uid);
6183
            GF_FREE(args);
6184
        }
6185
    }
6186

6187
    return NULL;
6188
}
6189

6190
static void
6191
glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
6192
{
6193
    struct upcall_syncop_args *args = NULL;
6194
    int ret = -1;
6195

6196
    if (!fs || !upcall_data)
6197
        goto out;
6198

6199
    if (!(fs->upcall_events & upcall_data->event_type)) {
6200
        /* ignore events which application hasn't registered*/
6201
        goto out;
6202
    }
6203

6204
    args = upcall_syncop_args_init(fs, upcall_data);
6205

6206
    if (!args)
6207
        goto out;
6208

6209
    ret = synctask_new(THIS->ctx->env, glfs_cbk_upcall_syncop,
6210
                       glfs_upcall_syncop_cbk, NULL, args);
6211
    /* should we retry incase of failure? */
6212
    if (ret) {
6213
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
6214
                "event_type=%d", upcall_data->event_type, "gfid=%s",
6215
                (char *)(upcall_data->gfid), NULL);
6216
        upcall_syncop_args_free(args);
6217
    }
6218

6219
out:
6220
    return;
6221
}
6222

6223
/*
6224
 * This routine is called in case of any notification received
6225
 * from the server. All the upcall events are queued up in a list
6226
 * to be read by the applications.
6227
 *
6228
 * In case if the application registers a cbk function, that shall
6229
 * be called by this routine in case of any event received.
6230
 * The cbk fn is responsible for notifying the
6231
 * applications the way it desires for each event queued (for eg.,
6232
 * can raise a signal or broadcast a cond variable etc.)
6233
 *
6234
 * Otherwise all the upcall events are queued up in a list
6235
 * to be read/polled by the applications.
6236
 */
6237
GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_process_upcall_event, 3.7.0)
6238
void
6239
priv_glfs_process_upcall_event(struct glfs *fs, void *data)
6240
{
6241
    glusterfs_ctx_t *ctx = NULL;
6242
    struct gf_upcall *upcall_data = NULL;
6243

6244
    DECLARE_OLD_THIS;
6245

6246
    gf_msg_debug(THIS->name, 0, "Upcall gfapi callback is called");
6247

6248
    __GLFS_ENTRY_VALIDATE_FS(fs, err);
6249

6250
    if (!data)
6251
        goto out;
6252

6253
    /* Unlike in I/O path, "glfs_fini" would not have freed
6254
     * 'fs' by the time we take lock as it waits for all epoll
6255
     * threads to exit including this
6256
     */
6257
    pthread_mutex_lock(&fs->mutex);
6258
    {
6259
        ctx = fs->ctx;
6260

6261
        /* if we're not interested in upcalls (anymore), skip them */
6262
        if (ctx->cleanup_started || !fs->cache_upcalls) {
6263
            pthread_mutex_unlock(&fs->mutex);
6264
            goto out;
6265
        }
6266

6267
        fs->pin_refcnt++;
6268
    }
6269
    pthread_mutex_unlock(&fs->mutex);
6270

6271
    upcall_data = (struct gf_upcall *)data;
6272

6273
    gf_msg_trace(THIS->name, 0, "Upcall gfapi gfid = %s",
6274
                 (char *)(upcall_data->gfid));
6275

6276
    /* *
6277
     * TODO: RECALL LEASE for each glfd
6278
     *
6279
     * In case of RECALL_LEASE, we could associate separate
6280
     * cbk function for each glfd either by
6281
     * - extending pub_glfs_lease to accept new args (recall_cbk_fn, cookie)
6282
     * - or by defining new API "glfs_register_recall_cbk_fn (glfd,
6283
     * recall_cbk_fn, cookie) . In such cases, flag it and instead of calling
6284
     * below upcall functions, define a new one to go through the glfd list and
6285
     * invoke each of theirs recall_cbk_fn.
6286
     * */
6287

6288
    if (fs->up_cbk) { /* upcall cbk registered */
6289
        (void)glfs_cbk_upcall_data(fs, upcall_data);
6290
    } else {
6291
        (void)glfs_enqueue_upcall_data(fs, upcall_data);
6292
    }
6293

6294
    pthread_mutex_lock(&fs->mutex);
6295
    {
6296
        fs->pin_refcnt--;
6297
    }
6298
    pthread_mutex_unlock(&fs->mutex);
6299

6300
out:
6301
    __GLFS_EXIT_FS;
6302
err:
6303
    return;
6304
}
6305

6306
ssize_t
6307
glfs_anonymous_pwritev(struct glfs *fs, struct glfs_object *object,
6308
                       const struct iovec *iovec, int iovcnt, off_t offset,
6309
                       int flags)
6310
{
6311
    xlator_t *subvol = NULL;
6312
    struct iobref *iobref = NULL;
6313
    struct iobuf *iobuf = NULL;
6314
    struct iovec iov = {
6315
        0,
6316
    };
6317
    inode_t *inode = NULL;
6318
    fd_t *fd = NULL;
6319
    int ret = -1;
6320
    size_t size = -1;
6321

6322
    DECLARE_OLD_THIS;
6323
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
6324

6325
    subvol = glfs_active_subvol(fs);
6326
    if (!subvol) {
6327
        ret = -1;
6328
        errno = EIO;
6329
        goto out;
6330
    }
6331

6332
    /* get/refresh the in arg objects inode in correlation to the xlator */
6333
    inode = glfs_resolve_inode(fs, subvol, object);
6334
    if (!inode) {
6335
        ret = -1;
6336
        errno = ESTALE;
6337
        goto out;
6338
    }
6339

6340
    fd = fd_anonymous(inode);
6341
    if (!fd) {
6342
        ret = -1;
6343
        gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
6344
        errno = ENOMEM;
6345
        goto out;
6346
    }
6347

6348
    size = iov_length(iovec, iovcnt);
6349

6350
    iobuf = iobuf_get2(subvol->ctx->iobuf_pool, size);
6351
    if (!iobuf) {
6352
        ret = -1;
6353
        errno = ENOMEM;
6354
        goto out;
6355
    }
6356

6357
    iobref = iobref_new();
6358
    if (!iobref) {
6359
        iobuf_unref(iobuf);
6360
        errno = ENOMEM;
6361
        ret = -1;
6362
        goto out;
6363
    }
6364

6365
    ret = iobref_add(iobref, iobuf);
6366
    if (ret) {
6367
        iobuf_unref(iobuf);
6368
        iobref_unref(iobref);
6369
        errno = ENOMEM;
6370
        ret = -1;
6371
        goto out;
6372
    }
6373

6374
    iov_unload(iobuf_ptr(iobuf), iovec, iovcnt);
6375

6376
    iov.iov_base = iobuf_ptr(iobuf);
6377
    iov.iov_len = size;
6378

6379
    /* TODO : set leaseid */
6380
    ret = syncop_writev(subvol, fd, &iov, 1, offset, iobref, flags, NULL, NULL,
6381
                        NULL, NULL);
6382
    DECODE_SYNCOP_ERR(ret);
6383

6384
    iobuf_unref(iobuf);
6385
    iobref_unref(iobref);
6386

6387
    if (ret <= 0)
6388
        goto out;
6389

6390
out:
6391

6392
    if (fd)
6393
        fd_unref(fd);
6394

6395
    if (inode)
6396
        inode_unref(inode);
6397

6398
    glfs_subvol_done(fs, subvol);
6399

6400
    __GLFS_EXIT_FS;
6401

6402
invalid_fs:
6403
    return ret;
6404
}
6405

6406
ssize_t
6407
glfs_anonymous_preadv(struct glfs *fs, struct glfs_object *object,
6408
                      const struct iovec *iovec, int iovcnt, off_t offset,
6409
                      int flags)
6410
{
6411
    xlator_t *subvol = NULL;
6412
    struct iovec *iov = NULL;
6413
    struct iobref *iobref = NULL;
6414
    inode_t *inode = NULL;
6415
    fd_t *fd = NULL;
6416
    int cnt = 0;
6417
    ssize_t ret = -1;
6418
    ssize_t size = -1;
6419

6420
    DECLARE_OLD_THIS;
6421
    __GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
6422

6423
    subvol = glfs_active_subvol(fs);
6424
    if (!subvol) {
6425
        ret = -1;
6426
        errno = EIO;
6427
        goto out;
6428
    }
6429

6430
    /* get/refresh the in arg objects inode in correlation to the xlator */
6431
    inode = glfs_resolve_inode(fs, subvol, object);
6432
    if (!inode) {
6433
        ret = -1;
6434
        errno = ESTALE;
6435
        goto out;
6436
    }
6437

6438
    fd = fd_anonymous(inode);
6439
    if (!fd) {
6440
        ret = -1;
6441
        gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
6442
        errno = ENOMEM;
6443
        goto out;
6444
    }
6445

6446
    size = iov_length(iovec, iovcnt);
6447

6448
    /* TODO : set leaseid */
6449
    ret = syncop_readv(subvol, fd, size, offset, flags, &iov, &cnt, &iobref,
6450
                       NULL, NULL, NULL);
6451
    DECODE_SYNCOP_ERR(ret);
6452
    if (ret <= 0)
6453
        goto out;
6454

6455
    size = iov_copy(iovec, iovcnt, iov, cnt);
6456

6457
    ret = size;
6458
out:
6459
    if (iov)
6460
        GF_FREE(iov);
6461
    if (iobref)
6462
        iobref_unref(iobref);
6463
    if (fd)
6464
        fd_unref(fd);
6465

6466
    if (inode)
6467
        inode_unref(inode);
6468

6469
    glfs_subvol_done(fs, subvol);
6470

6471
    __GLFS_EXIT_FS;
6472

6473
invalid_fs:
6474
    return ret;
6475
}
6476

6477
static void
6478
glfs_release_xreaddirp_stat(void *ptr)
6479
{
6480
    struct glfs_xreaddirp_stat *to_free = ptr;
6481

6482
    if (to_free->object)
6483
        glfs_h_close(to_free->object);
6484
}
6485

6486
/*
6487
 * Given glfd of a directory, this function does readdirp and returns
6488
 * xstat along with dirents.
6489
 */
6490
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_r, 3.11.0)
6491
int
6492
pub_glfs_xreaddirplus_r(struct glfs_fd *glfd, uint32_t flags,
6493
                        struct glfs_xreaddirp_stat **xstat_p,
6494
                        struct dirent *ext, struct dirent **res)
6495
{
6496
    int ret = -1;
6497
    gf_dirent_t *entry = NULL;
6498
    struct dirent *buf = NULL;
6499
    struct glfs_xreaddirp_stat *xstat = NULL;
6500

6501
    DECLARE_OLD_THIS;
6502
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
6503

6504
    GF_REF_GET(glfd);
6505

6506
    GF_VALIDATE_OR_GOTO(THIS->name, xstat_p, out);
6507
    GF_VALIDATE_OR_GOTO(THIS->name, res, out);
6508

6509
    errno = 0;
6510

6511
    if (ext)
6512
        buf = ext;
6513
    else
6514
        buf = glfs_readdirbuf_get(glfd);
6515

6516
    if (!buf)
6517
        goto out;
6518

6519
    xstat = GLFS_CALLOC(1, sizeof(struct glfs_xreaddirp_stat),
6520
                        glfs_release_xreaddirp_stat, glfs_mt_xreaddirp_stat_t);
6521

6522
    if (!xstat)
6523
        goto out;
6524

6525
    /* this is readdirplus operation */
6526
    entry = glfd_entry_next(glfd, 1);
6527

6528
    /* XXX: Ideally when we reach EOD, errno should have been
6529
     * set to ENOENT. But that doesn't seem to be the case.
6530
     *
6531
     * The only way to confirm if its EOD at this point is that
6532
     * errno == 0 and entry == NULL
6533
     */
6534
    if (errno)
6535
        goto out;
6536

6537
    if (!entry) {
6538
        /* reached EOD, ret = 0  */
6539
        ret = 0;
6540
        *res = NULL;
6541
        *xstat_p = NULL;
6542

6543
        /* free xstat as applications shall not be using it */
6544
        GLFS_FREE(xstat);
6545

6546
        goto out;
6547
    }
6548

6549
    *res = buf;
6550
    gf_dirent_to_dirent(entry, buf);
6551

6552
    if (flags & GFAPI_XREADDIRP_STAT) {
6553
        glfs_iatt_to_stat(glfd->fs, &entry->d_stat, &xstat->st);
6554
        xstat->flags_handled |= GFAPI_XREADDIRP_STAT;
6555
    }
6556

6557
    if ((flags & GFAPI_XREADDIRP_HANDLE) &&
6558
        /* skip . and .. */
6559
        strcmp(buf->d_name, ".") && strcmp(buf->d_name, "..")) {
6560
        /* Now create object.
6561
         * We can use "glfs_h_find_handle" as well as inodes would have
6562
         * already got linked as part of 'gf_link_inodes_from_dirent' */
6563
        xstat->object = glfs_h_create_from_handle(
6564
            glfd->fs, entry->d_stat.ia_gfid, GFAPI_HANDLE_LENGTH, NULL);
6565

6566
        if (xstat->object) { /* success */
6567
            /* note: xstat->object->inode->ref is taken
6568
             * This shall be unref'ed when application does
6569
             * glfs_free(xstat) */
6570
            xstat->flags_handled |= GFAPI_XREADDIRP_HANDLE;
6571
        }
6572
    }
6573

6574
    ret = xstat->flags_handled;
6575
    *xstat_p = xstat;
6576

6577
    gf_msg_debug(THIS->name, 0,
6578
                 "xreaddirp- requested_flags (%x) , processed_flags (%x)",
6579
                 flags, xstat->flags_handled);
6580

6581
out:
6582
    GF_REF_PUT(glfd);
6583

6584
    if (ret < 0) {
6585
        gf_smsg(THIS->name, GF_LOG_WARNING, errno, API_MSG_XREADDIRP_R_FAILED,
6586
                "reason=%s", strerror(errno), NULL);
6587

6588
        if (xstat)
6589
            GLFS_FREE(xstat);
6590
    }
6591

6592
    __GLFS_EXIT_FS;
6593

6594
    return ret;
6595

6596
invalid_fs:
6597
    return -1;
6598
}
6599

6600
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_stat, 3.11.0)
6601
struct stat *
6602
pub_glfs_xreaddirplus_get_stat(struct glfs_xreaddirp_stat *xstat)
6603
{
6604
    GF_VALIDATE_OR_GOTO("glfs_xreaddirplus_get_stat", xstat, out);
6605

6606
    if (!(xstat->flags_handled & GFAPI_XREADDIRP_STAT))
6607
        gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_FLAGS_HANDLE,
6608
                "GFAPI_XREADDIRP_STAT"
6609
                "xstat=%p",
6610
                xstat, "handles=%x", xstat->flags_handled, NULL);
6611
    return &xstat->st;
6612

6613
out:
6614
    return NULL;
6615
}
6616

6617
void
6618
gf_lease_to_glfs_lease(struct gf_lease *gf_lease, struct glfs_lease *lease)
6619
{
6620
    u_int lease_type = gf_lease->lease_type;
6621
    lease->cmd = gf_lease->cmd;
6622
    lease->lease_type = lease_type;
6623
    memcpy(lease->lease_id, gf_lease->lease_id, LEASE_ID_SIZE);
6624
}
6625

6626
void
6627
glfs_lease_to_gf_lease(struct glfs_lease *lease, struct gf_lease *gf_lease)
6628
{
6629
    u_int lease_type = lease->lease_type;
6630
    gf_lease->cmd = lease->cmd;
6631
    gf_lease->lease_type = lease_type;
6632
    memcpy(gf_lease->lease_id, lease->lease_id, LEASE_ID_SIZE);
6633
}
6634

6635
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lease, 4.0.0)
6636
int
6637
pub_glfs_lease(struct glfs_fd *glfd, struct glfs_lease *lease,
6638
               glfs_recall_cbk fn, void *data)
6639
{
6640
    int ret = -1;
6641
    loc_t loc = {
6642
        0,
6643
    };
6644
    xlator_t *subvol = NULL;
6645
    fd_t *fd = NULL;
6646
    struct gf_lease gf_lease = {
6647
        0,
6648
    };
6649

6650
    DECLARE_OLD_THIS;
6651
    __GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
6652

6653
    GF_REF_GET(glfd);
6654

6655
    if (!is_valid_lease_id(lease->lease_id)) {
6656
        ret = -1;
6657
        errno = EINVAL;
6658
        goto out;
6659
    }
6660

6661
    subvol = glfs_active_subvol(glfd->fs);
6662
    if (!subvol) {
6663
        ret = -1;
6664
        errno = EIO;
6665
        goto out;
6666
    }
6667

6668
    fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
6669
    if (!fd) {
6670
        ret = -1;
6671
        errno = EBADFD;
6672
        goto out;
6673
    }
6674

6675
    switch (lease->lease_type) {
6676
        case GLFS_RD_LEASE:
6677
            if ((fd->flags != O_RDONLY) && !(fd->flags & O_RDWR)) {
6678
                ret = -1;
6679
                errno = EINVAL;
6680
                goto out;
6681
            }
6682
            break;
6683
        case GLFS_RW_LEASE:
6684
            if (!((fd->flags & O_WRONLY) || (fd->flags & O_RDWR))) {
6685
                ret = -1;
6686
                errno = EINVAL;
6687
                goto out;
6688
            }
6689
            break;
6690
        default:
6691
            if (lease->cmd != GLFS_GET_LEASE) {
6692
                ret = -1;
6693
                errno = EINVAL;
6694
                goto out;
6695
            }
6696
            break;
6697
    }
6698

6699
    /* populate loc */
6700
    GLFS_LOC_FILL_INODE(fd->inode, loc, out);
6701

6702
    glfs_lease_to_gf_lease(lease, &gf_lease);
6703

6704
    ret = syncop_lease(subvol, &loc, &gf_lease, NULL, NULL);
6705
    DECODE_SYNCOP_ERR(ret);
6706

6707
    gf_lease_to_glfs_lease(&gf_lease, lease);
6708

6709
    /* TODO: Add leases for client replay
6710
    if (ret == 0 && (cmd == F_SETLK || cmd == F_SETLKW))
6711
            fd_lk_insert_and_merge (fd, cmd, &saved_flock);
6712
    */
6713
    if (ret == 0) {
6714
        ret = fd_ctx_set(glfd->fd, subvol, (uint64_t)(long)glfd);
6715
        if (ret) {
6716
            gf_smsg(subvol->name, GF_LOG_ERROR, ENOMEM,
6717
                    API_MSG_FDCTX_SET_FAILED, "fd=%p", glfd->fd, NULL);
6718
            goto out;
6719
        }
6720
        glfd->cbk = fn;
6721
        glfd->cookie = data;
6722
    }
6723

6724
out:
6725

6726
    if (glfd)
6727
        GF_REF_PUT(glfd);
6728

6729
    if (subvol)
6730
        glfs_subvol_done(glfd->fs, subvol);
6731

6732
    __GLFS_EXIT_FS;
6733

6734
invalid_fs:
6735
    return ret;
6736
}
6737

6738
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdirat, 11.0)
6739
int
6740
pub_glfs_mkdirat(struct glfs_fd *pglfd, const char *path, mode_t mode)
6741
{
6742
    int ret = -1;
6743
    int reval = 0;
6744
    xlator_t *subvol = NULL;
6745
    loc_t loc = {
6746
        0,
6747
    };
6748
    struct iatt iatt = {
6749
        0,
6750
    };
6751
    uuid_t gfid;
6752
    dict_t *xattr_req = NULL;
6753

6754
    DECLARE_OLD_THIS;
6755
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6756

6757
retry:
6758
    /* Retry case */
6759
    if (subvol) {
6760
        cleanup_fopat_args(pglfd, subvol, ret, &loc);
6761
    }
6762

6763
    subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
6764
    if (!subvol) {
6765
        ret = -1;
6766
    }
6767

6768
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
6769

6770
    if (!subvol) {
6771
        ret = -1;
6772
        goto out;
6773
    }
6774

6775
    ret = setup_entry_fopat_args(gfid, &xattr_req, &loc);
6776
    if (ret) {
6777
        goto out;
6778
    }
6779

6780
    ret = syncop_mkdir(subvol, &loc, mode, &iatt, xattr_req, NULL);
6781
    DECODE_SYNCOP_ERR(ret);
6782

6783
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
6784

6785
    if (ret == 0)
6786
        ret = glfs_loc_link(&loc, &iatt);
6787
out:
6788
    if (xattr_req)
6789
        dict_unref(xattr_req);
6790

6791
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
6792

6793
    __GLFS_EXIT_FS;
6794

6795
invalid_fs:
6796
    return ret;
6797
}
6798

6799
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_faccessat, 11.0)
6800
int
6801
pub_glfs_faccessat(struct glfs_fd *pglfd, const char *path, int mode, int flags)
6802
{
6803
    int ret = -1;
6804
    int reval = 0;
6805
    xlator_t *subvol = NULL;
6806
    loc_t loc = {
6807
        0,
6808
    };
6809
    struct iatt iatt = {
6810
        0,
6811
    };
6812
    int no_follow = 0;
6813

6814
    DECLARE_OLD_THIS;
6815
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6816

6817
    no_follow = (flags & AT_SYMLINK_NOFOLLOW) == AT_SYMLINK_NOFOLLOW;
6818

6819
retry:
6820
    /* Retry case */
6821
    if (subvol) {
6822
        cleanup_fopat_args(pglfd, subvol, ret, &loc);
6823
    }
6824

6825
    subvol = setup_fopat_args(pglfd, path, !no_follow, &loc, &iatt, reval);
6826
    if (!subvol) {
6827
        ret = -1;
6828
    }
6829

6830
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
6831

6832
    if (!subvol) {
6833
        ret = -1;
6834
        goto out;
6835
    }
6836

6837
    if (!loc.inode) {
6838
        ret = -1;
6839
        errno = ENOENT;
6840
        goto out;
6841
    }
6842

6843
    ret = syncop_access(subvol, &loc, mode, NULL, NULL);
6844
    DECODE_SYNCOP_ERR(ret);
6845

6846
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
6847
out:
6848
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
6849
    __GLFS_EXIT_FS;
6850

6851
invalid_fs:
6852
    return ret;
6853
}
6854

6855
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmodat, 11.0)
6856
int
6857
pub_glfs_fchmodat(struct glfs_fd *pglfd, const char *path, mode_t mode,
6858
                  int flags)
6859
{
6860
    int ret = -1;
6861
    xlator_t *subvol = NULL;
6862
    loc_t loc = {
6863
        0,
6864
    };
6865
    struct iatt iatt = {
6866
        0,
6867
    };
6868
    int glvalid;
6869
    int no_follow = 0;
6870

6871
    struct glfs_stat stat = {
6872
        0,
6873
    };
6874

6875
    stat.glfs_st_mode = mode;
6876
    stat.glfs_st_mask = GLFS_STAT_MODE;
6877

6878
    DECLARE_OLD_THIS;
6879
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6880

6881
    no_follow = (flags & AT_SYMLINK_NOFOLLOW) == AT_SYMLINK_NOFOLLOW;
6882
    subvol = setup_fopat_args(pglfd, path, !no_follow, &loc, &iatt, 0);
6883
    if (!subvol) {
6884
        ret = -1;
6885
        errno = EIO;
6886
        goto out;
6887
    }
6888

6889
    if (!loc.inode) {
6890
        ret = -1;
6891
        errno = ENOENT;
6892
        goto out;
6893
    }
6894

6895
    glfs_iatt_from_statx(&iatt, &stat);
6896
    glfsflags_from_gfapiflags(&stat, &glvalid);
6897

6898
    ret = syncop_setattr(subvol, &loc, &iatt, glvalid, 0, 0, NULL, NULL);
6899
    DECODE_SYNCOP_ERR(ret);
6900

6901
out:
6902
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
6903
    __GLFS_EXIT_FS;
6904

6905
invalid_fs:
6906
    return ret;
6907
}
6908

6909
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchownat, 11.0)
6910
int
6911
pub_glfs_fchownat(struct glfs_fd *pglfd, const char *path, uid_t uid, gid_t gid,
6912
                  int flags)
6913
{
6914
    int ret = 0;
6915
    struct glfs_stat stat = {
6916
        0,
6917
    };
6918

6919
    if (uid != (uid_t)-1) {
6920
        stat.glfs_st_uid = uid;
6921
        stat.glfs_st_mask = GLFS_STAT_UID;
6922
    }
6923

6924
    if (gid != (uid_t)-1) {
6925
        stat.glfs_st_gid = gid;
6926
        stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
6927
    }
6928

6929
    xlator_t *subvol = NULL;
6930
    loc_t loc = {
6931
        0,
6932
    };
6933
    struct iatt iatt = {
6934
        0,
6935
    };
6936
    int glvalid;
6937
    int no_follow = 0;
6938
    int is_path_empty = 0;
6939

6940
    DECLARE_OLD_THIS;
6941
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6942

6943
    no_follow = (flags & AT_SYMLINK_NOFOLLOW) == AT_SYMLINK_NOFOLLOW;
6944
    is_path_empty = (flags & AT_EMPTY_PATH) == AT_EMPTY_PATH;
6945

6946
    if (is_path_empty && path[0] == '\0') {
6947
        GF_REF_GET(pglfd);
6948

6949
        subvol = glfs_active_subvol(pglfd->fs);
6950
        if (!subvol) {
6951
            ret = -1;
6952
            errno = EIO;
6953
            goto out;
6954
        }
6955

6956
        fd_to_loc(pglfd, &loc);
6957
    } else {
6958
        subvol = setup_fopat_args(pglfd, path, !no_follow, &loc, &iatt, 0);
6959
        if (!subvol) {
6960
            ret = -1;
6961
            errno = EIO;
6962
            goto out;
6963
        }
6964
    }
6965

6966
    if (!loc.inode) {
6967
        ret = -1;
6968
        errno = ENOENT;
6969
        goto out;
6970
    }
6971

6972
    glfs_iatt_from_statx(&iatt, &stat);
6973
    glfsflags_from_gfapiflags(&stat, &glvalid);
6974

6975
    if (stat.glfs_st_mask) {
6976
        ret = syncop_setattr(subvol, &loc, &iatt, glvalid, 0, 0, NULL, NULL);
6977
        DECODE_SYNCOP_ERR(ret);
6978
    }
6979

6980
out:
6981
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
6982

6983
    __GLFS_EXIT_FS;
6984

6985
invalid_fs:
6986
    return ret;
6987
}
6988

6989
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_linkat, 11.0)
6990
int
6991
pub_glfs_linkat(struct glfs_fd *oldpglfd, const char *oldpath,
6992
                struct glfs_fd *newpglfd, const char *newpath, int flags)
6993
{
6994
    int ret = -1;
6995
    int reval = 0;
6996
    xlator_t *oldsubvol = NULL;
6997
    xlator_t *newsubvol = NULL;
6998
    loc_t oldloc = {
6999
        0,
7000
    };
7001
    loc_t newloc = {
7002
        0,
7003
    };
7004
    struct iatt oldiatt = {
7005
        0,
7006
    };
7007
    struct iatt newiatt = {
7008
        0,
7009
    };
7010
    int follow = 0;
7011
    int is_path_empty = 0;
7012

7013
    DECLARE_OLD_THIS;
7014
    __GLFS_ENTRY_VALIDATE_FD(oldpglfd, invalid_fs);
7015
    __GLFS_ENTRY_VALIDATE_FD(newpglfd, invalid_fs);
7016

7017
    /* Old path will not be de-referenced by default if it is a sym-link.
7018
       If 'AT_SYMLINK_FOLLOW' flag is set, then oldpath is deferenced to
7019
       its original path.
7020

7021
       If oldpath is a symbolic link and 'AT_SYMLINK_FOLLOW' is set then
7022
       a new link created will be a symbolic link to defreferenced oldpath.
7023
    */
7024
    follow = (flags & AT_SYMLINK_FOLLOW) == AT_SYMLINK_FOLLOW;
7025
    is_path_empty = (flags & AT_EMPTY_PATH) == AT_EMPTY_PATH;
7026

7027
retry:
7028
    /* Retry case */
7029
    if (oldsubvol) {
7030
        cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7031
    }
7032

7033
    if (is_path_empty && oldpath[0] == '\0') {
7034
        GF_REF_GET(oldpglfd);
7035

7036
        oldsubvol = glfs_active_subvol(oldpglfd->fs);
7037
        if (!oldsubvol) {
7038
            ret = -1;
7039
            errno = EIO;
7040
            goto out;
7041
        }
7042

7043
        fd_to_loc(oldpglfd, &oldloc);
7044

7045
        if (*&oldloc.inode->ia_type == IA_IFDIR) {
7046
            ret = -1;
7047
            errno = EISDIR;
7048
            goto out;
7049
        }
7050
    } else {
7051
        oldsubvol = setup_fopat_args(oldpglfd, oldpath, follow, &oldloc,
7052
                                     &oldiatt, reval);
7053
    }
7054

7055
    if (!oldsubvol) {
7056
        ret = -1;
7057
    }
7058

7059
    ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
7060

7061
    if (!oldsubvol) {
7062
        goto out;
7063
    }
7064

7065
    if (oldsubvol && !oldloc.inode) {
7066
        ret = -1;
7067
        errno = ENOENT;
7068
        goto out;
7069
    }
7070

7071
retrynew:
7072
    /* Retry case */
7073
    if (newsubvol) {
7074
        cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7075
    }
7076
    /* The 'AT_SYMLINK_FOLLOW' flag applies only to oldpath.
7077
     */
7078
    newsubvol = setup_fopat_args(newpglfd, newpath, 0, &newloc, &newiatt,
7079
                                 reval);
7080
    if (!newsubvol) {
7081
        ret = -1;
7082
    }
7083

7084
    ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
7085

7086
    if (newsubvol && newloc.inode) {
7087
        ret = -1;
7088
        errno = EEXIST;
7089
        goto out;
7090
    }
7091

7092
    if (oldiatt.ia_type == IA_IFDIR) {
7093
        ret = -1;
7094
        errno = EISDIR;
7095
        goto out;
7096
    }
7097

7098
    /* Filling the inode of the hard link to be same as that of the
7099
       original file
7100
    */
7101
    if (newloc.inode) {
7102
        inode_unref(newloc.inode);
7103
        newloc.inode = NULL;
7104
    }
7105
    newloc.inode = inode_ref(oldloc.inode);
7106

7107
    ret = syncop_link(newsubvol, &oldloc, &newloc, &newiatt, NULL, NULL);
7108
    DECODE_SYNCOP_ERR(ret);
7109

7110
    if (ret == 0)
7111
        ret = glfs_loc_link(&newloc, &newiatt);
7112
out:
7113
    cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7114
    cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7115

7116
    __GLFS_EXIT_FS;
7117

7118
invalid_fs:
7119
    return ret;
7120
}
7121

7122
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknodat, 11.0)
7123
int
7124
pub_glfs_mknodat(struct glfs_fd *pglfd, const char *path, mode_t mode,
7125
                 dev_t dev)
7126
{
7127
    int ret = -1;
7128
    int reval = 0;
7129
    xlator_t *subvol = NULL;
7130
    loc_t loc = {
7131
        0,
7132
    };
7133
    struct iatt iatt = {
7134
        0,
7135
    };
7136
    uuid_t gfid;
7137
    dict_t *xattr_req = NULL;
7138

7139
    DECLARE_OLD_THIS;
7140
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7141

7142
retry:
7143
    /* Retry case */
7144
    if (subvol) {
7145
        cleanup_fopat_args(pglfd, subvol, ret, &loc);
7146
    }
7147

7148
    subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7149
    if (!subvol) {
7150
        ret = -1;
7151
    }
7152

7153
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7154

7155
    if (!subvol) {
7156
        ret = -1;
7157
        goto out;
7158
    }
7159

7160
    ret = setup_entry_fopat_args(gfid, &xattr_req, &loc);
7161
    if (ret) {
7162
        goto out;
7163
    }
7164

7165
    ret = syncop_mknod(subvol, &loc, mode, dev, &iatt, xattr_req, NULL);
7166
    DECODE_SYNCOP_ERR(ret);
7167

7168
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7169

7170
    if (ret == 0)
7171
        ret = glfs_loc_link(&loc, &iatt);
7172

7173
out:
7174
    if (xattr_req)
7175
        dict_unref(xattr_req);
7176

7177
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
7178
    __GLFS_EXIT_FS;
7179

7180
invalid_fs:
7181
    return ret;
7182
}
7183

7184
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlinkat, 11.0)
7185
int
7186
pub_glfs_readlinkat(struct glfs_fd *pglfd, const char *path, char *buf,
7187
                    size_t bufsiz)
7188
{
7189
    int ret = -1;
7190
    int reval = 0;
7191
    xlator_t *subvol = NULL;
7192
    loc_t loc = {
7193
        0,
7194
    };
7195
    struct iatt iatt = {
7196
        0,
7197
    };
7198
    char *linkval = NULL;
7199

7200
    DECLARE_OLD_THIS;
7201
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7202

7203
retry:
7204
    /* retry case */
7205
    if (subvol) {
7206
        cleanup_fopat_args(pglfd, subvol, ret, &loc);
7207
    }
7208

7209
    subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7210
    if (!subvol) {
7211
        ret = -1;
7212
    }
7213

7214
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7215

7216
    if (!subvol)
7217
        goto out;
7218

7219
    if (!loc.inode) {
7220
        ret = -1;
7221
        errno = ENOENT;
7222
        goto out;
7223
    }
7224

7225
    if (iatt.ia_type != IA_IFLNK) {
7226
        ret = -1;
7227
        errno = EINVAL;
7228
        goto out;
7229
    }
7230

7231
    ret = syncop_readlink(subvol, &loc, &linkval, bufsiz, NULL, NULL);
7232
    DECODE_SYNCOP_ERR(ret);
7233
    if (ret > 0) {
7234
        memcpy(buf, linkval, ret);
7235
        GF_FREE(linkval);
7236
    }
7237

7238
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7239

7240
out:
7241
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
7242

7243
    __GLFS_EXIT_FS;
7244

7245
invalid_fs:
7246
    return ret;
7247
}
7248

7249
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_renameat, 11.0)
7250
int
7251
pub_glfs_renameat(struct glfs_fd *oldpglfd, const char *oldpath,
7252
                  struct glfs_fd *newpglfd, const char *newpath)
7253
{
7254
    int ret = -1;
7255
    int reval = 0;
7256
    xlator_t *oldsubvol = NULL;
7257
    xlator_t *newsubvol = NULL;
7258
    loc_t oldloc = {
7259
        0,
7260
    };
7261
    loc_t newloc = {
7262
        0,
7263
    };
7264
    struct iatt oldiatt = {
7265
        0,
7266
    };
7267
    struct iatt newiatt = {
7268
        0,
7269
    };
7270

7271
    DECLARE_OLD_THIS;
7272
    __GLFS_ENTRY_VALIDATE_FD(oldpglfd, invalid_fs);
7273
    __GLFS_ENTRY_VALIDATE_FD(newpglfd, invalid_fs);
7274

7275
retry:
7276
    /* Retry case */
7277
    if (oldsubvol) {
7278
        cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7279
    }
7280

7281
    oldsubvol = setup_fopat_args(oldpglfd, oldpath, 0, &oldloc, &oldiatt,
7282
                                 reval);
7283
    if (!oldsubvol) {
7284
        ret = -1;
7285
    }
7286

7287
    ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
7288

7289
    if (!oldsubvol) {
7290
        goto out;
7291
    }
7292

7293
    /* subvol is not NULL */
7294
    if (!oldloc.inode) {
7295
        ret = -1;
7296
        errno = ENOENT;
7297
        goto out;
7298
    }
7299

7300
retrynew:
7301
    /* Retry case */
7302
    if (newsubvol) {
7303
        cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7304
    }
7305

7306
    newsubvol = setup_fopat_args(newpglfd, newpath, 0, &newloc, &newiatt,
7307
                                 reval);
7308
    if (!newsubvol) {
7309
        ret = -1;
7310
    }
7311

7312
    ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
7313

7314
    if (!newsubvol) {
7315
        goto out;
7316
    }
7317

7318
    if (newsubvol && newloc.inode) {
7319
        ret = -1;
7320
        errno = EEXIST;
7321
        goto out;
7322
    }
7323

7324
    if (errno != ENOENT && newloc.parent)
7325
        goto out;
7326

7327
    if (newiatt.ia_type != IA_INVAL) {
7328
        if ((oldiatt.ia_type == IA_IFDIR) != (newiatt.ia_type == IA_IFDIR)) {
7329
            /* Either both old and new must be dirs,
7330
             * or both must be non-dirs. Else, fail.
7331
             */
7332
            ret = -1;
7333
            errno = EISDIR;
7334
            goto out;
7335
        }
7336
    }
7337

7338
    /* TODO: - check if new or old is a prefix of the other, and fail EINVAL
7339
     *       - Add leaseid */
7340

7341
    ret = syncop_rename(newsubvol, &oldloc, &newloc, NULL, NULL);
7342
    DECODE_SYNCOP_ERR(ret);
7343

7344
    if (ret == 0) {
7345
        inode_rename(oldloc.parent->table, oldloc.parent, oldloc.name,
7346
                     newloc.parent, newloc.name, oldloc.inode, &oldiatt);
7347

7348
        if (newloc.inode && !inode_has_dentry(newloc.inode))
7349
            inode_forget(newloc.inode, 0);
7350
    }
7351
out:
7352
    cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7353
    cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7354

7355
    __GLFS_EXIT_FS;
7356

7357
invalid_fs:
7358
    return ret;
7359
}
7360

7361
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_renameat2, 11.0)
7362
int
7363
pub_glfs_renameat2(struct glfs_fd *oldpglfd, const char *oldpath,
7364
                   struct glfs_fd *newpglfd, const char *newpath, int flags)
7365
{
7366
    int ret = -1;
7367
    int reval = 0;
7368
    xlator_t *oldsubvol = NULL;
7369
    xlator_t *newsubvol = NULL;
7370
    loc_t oldloc = {
7371
        0,
7372
    };
7373
    loc_t newloc = {
7374
        0,
7375
    };
7376
    struct iatt oldiatt = {
7377
        0,
7378
    };
7379
    struct iatt newiatt = {
7380
        0,
7381
    };
7382

7383
    DECLARE_OLD_THIS;
7384
    __GLFS_ENTRY_VALIDATE_FD(oldpglfd, invalid_fs);
7385
    __GLFS_ENTRY_VALIDATE_FD(newpglfd, invalid_fs);
7386

7387
retry:
7388
    /* Retry case */
7389
    if (oldsubvol) {
7390
        cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7391
    }
7392

7393
    oldsubvol = setup_fopat_args(oldpglfd, oldpath, 0, &oldloc, &oldiatt,
7394
                                 reval);
7395
    if (!oldsubvol) {
7396
        ret = -1;
7397
    }
7398

7399
    ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
7400

7401
    if (!oldsubvol) {
7402
        goto out;
7403
    }
7404

7405
    /* subvol is not NULL */
7406
    if (!oldloc.inode) {
7407
        ret = -1;
7408
        errno = ENOENT;
7409
        goto out;
7410
    }
7411

7412
retrynew:
7413
    /* Retry case */
7414
    if (newsubvol) {
7415
        cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7416
    }
7417

7418
    newsubvol = setup_fopat_args(newpglfd, newpath, 0, &newloc, &newiatt,
7419
                                 reval);
7420
    if (!newsubvol) {
7421
        ret = -1;
7422
    }
7423

7424
    ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
7425

7426
    if (!newsubvol) {
7427
        goto out;
7428
    }
7429

7430
    if (newloc.inode) {
7431
        ret = -1;
7432
        errno = EEXIST;
7433
        goto out;
7434
    }
7435

7436
    if (errno != ENOENT && newloc.parent)
7437
        goto out;
7438

7439
    if (newiatt.ia_type != IA_INVAL) {
7440
        if ((oldiatt.ia_type == IA_IFDIR) != (newiatt.ia_type == IA_IFDIR)) {
7441
            /* Either both old and new must be dirs,
7442
             * or both must be non-dirs. Else, fail.
7443
             */
7444
            ret = -1;
7445
            errno = EISDIR;
7446
            goto out;
7447
        }
7448
    }
7449

7450
    /* TODO: - check if new or old is a prefix of the other, and fail EINVAL
7451
     *       - Add leaseid */
7452

7453
    ret = syncop_rename(newsubvol, &oldloc, &newloc, NULL, NULL);
7454
    DECODE_SYNCOP_ERR(ret);
7455

7456
    if (ret == -1 && errno == ESTALE) {
7457
        if (reval < DEFAULT_REVAL_COUNT) {
7458
            reval++;
7459
            loc_wipe(&oldloc);
7460
            loc_wipe(&newloc);
7461
            goto retry;
7462
        }
7463
    }
7464

7465
    if (ret == 0) {
7466
        inode_rename(oldloc.parent->table, oldloc.parent, oldloc.name,
7467
                     newloc.parent, newloc.name, oldloc.inode, &oldiatt);
7468

7469
        if (newloc.inode && !inode_has_dentry(newloc.inode))
7470
            inode_forget(newloc.inode, 0);
7471
    }
7472
out:
7473
    cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7474
    cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7475

7476
    __GLFS_EXIT_FS;
7477

7478
invalid_fs:
7479
    return ret;
7480
}
7481

7482
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlinkat, 11.0)
7483
int
7484
pub_glfs_symlinkat(const char *data, struct glfs_fd *pglfd, const char *path)
7485
{
7486
    int ret = -1;
7487
    int reval = 0;
7488
    xlator_t *subvol = NULL;
7489
    loc_t loc = {
7490
        0,
7491
    };
7492
    struct iatt iatt = {
7493
        0,
7494
    };
7495
    uuid_t gfid;
7496
    dict_t *xattr_req = NULL;
7497

7498
    DECLARE_OLD_THIS;
7499
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7500

7501
retry:
7502
    /* Retry case */
7503
    if (subvol) {
7504
        cleanup_fopat_args(pglfd, subvol, ret, &loc);
7505
    }
7506

7507
    subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7508
    if (!subvol) {
7509
        ret = -1;
7510
    }
7511

7512
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7513

7514
    if (!subvol) {
7515
        ret = -1;
7516
        goto out;
7517
    }
7518

7519
    /* loc == newloc */
7520
    if (subvol && loc.inode) {
7521
        ret = -1;
7522
        errno = EEXIST;
7523
        goto out;
7524
    }
7525

7526
    ret = setup_entry_fopat_args(gfid, &xattr_req, &loc);
7527
    if (ret) {
7528
        goto out;
7529
    }
7530

7531
    ret = syncop_symlink(subvol, &loc, data, &iatt, xattr_req, NULL);
7532
    DECODE_SYNCOP_ERR(ret);
7533

7534
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7535

7536
    if (ret == 0)
7537
        ret = glfs_loc_link(&loc, &iatt);
7538
out:
7539
    if (xattr_req)
7540
        dict_unref(xattr_req);
7541

7542
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
7543

7544
    __GLFS_EXIT_FS;
7545

7546
invalid_fs:
7547
    return ret;
7548
}
7549

7550
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlinkat, 11.0)
7551
int
7552
pub_glfs_unlinkat(struct glfs_fd *pglfd, const char *path, int flags)
7553
{
7554
    int ret = -1;
7555
    int reval = 0;
7556
    int is_rmdir = 0;
7557
    xlator_t *subvol = NULL;
7558
    loc_t loc = {
7559
        0,
7560
    };
7561
    struct iatt iatt = {
7562
        0,
7563
    };
7564

7565
    DECLARE_OLD_THIS;
7566
    __GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7567

7568
    is_rmdir = (flags & AT_REMOVEDIR) == AT_REMOVEDIR;
7569

7570
retry:
7571
    /* Retry case */
7572
    if (subvol) {
7573
        cleanup_fopat_args(pglfd, subvol, ret, &loc);
7574
    }
7575

7576
    subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7577
    if (!subvol) {
7578
        ret = -1;
7579
    }
7580

7581
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7582

7583
    if (!subvol) {
7584
        ret = -1;
7585
        goto out;
7586
    }
7587

7588
    /* If a directory is to be unlinked then 'AT_REMOVEDIR'
7589
       is to be used mandatorily.
7590
    */
7591
    if (iatt.ia_type == IA_IFDIR && !is_rmdir) {
7592
        ret = -1;
7593
        errno = EISDIR;
7594
        goto out;
7595
    } else if (iatt.ia_type != IA_IFDIR && is_rmdir) {
7596
        ret = -1;
7597
        errno = ENOTDIR;
7598
        goto out;
7599
    }
7600

7601
    /* TODO: Add leaseid */
7602
    /* Unlink or rmdir based on 'AT_REMOVEDIR' flag */
7603
    if (!is_rmdir)
7604
        ret = syncop_unlink(subvol, &loc, NULL, NULL);
7605
    else
7606
        ret = syncop_rmdir(subvol, &loc, 0, NULL, NULL);
7607

7608
    DECODE_SYNCOP_ERR(ret);
7609

7610
    ESTALE_RETRY(ret, errno, reval, &loc, retry);
7611

7612
    if (ret == 0)
7613
        ret = glfs_loc_unlink(&loc);
7614
out:
7615
    cleanup_fopat_args(pglfd, subvol, ret, &loc);
7616

7617
    __GLFS_EXIT_FS;
7618

7619
invalid_fs:
7620
    return ret;
7621
}
7622

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.