2
Copyright (c) 2012-2018 Red Hat, Inc. <http://www.redhat.com>
3
This file is part of GlusterFS.
5
This file is licensed to you under your choice of the GNU Lesser
6
General Public License, version 3 or any later version (LGPLv3 or
7
later), or the GNU General Public License, version 2 (GPLv2), in all
8
cases as published by the Free Software Foundation.
11
/* for SEEK_HOLE and SEEK_DATA */
18
#include "glfs-internal.h"
19
#include "glfs-mem-types.h"
21
#include "gfapi-messages.h"
22
#include <glusterfs/compat-errno.h>
23
#include <glusterfs/common-utils.h>
25
#include "glusterfs3.h"
28
#define GF_NAME_MAX NAME_MAX
30
#define GF_NAME_MAX 255
33
struct upcall_syncop_args {
35
struct gf_upcall upcall_data;
38
#define READDIRBUF_SIZE (sizeof(struct dirent) + GF_NAME_MAX + 1)
40
typedef void (*glfs_io_cbk34)(glfs_fd_t *fd, ssize_t ret, void *data);
43
* This function will mark glfd for deletion and decrement its refcount.
46
glfs_mark_glfd_for_deletion(struct glfs_fd *glfd)
50
glfd->state = GLFD_CLOSE;
59
/* This function is useful for all async fops. There is chance that glfd is
60
* closed before async fop is completed. When glfd is closed we change the
61
* state to GLFD_CLOSE.
63
* This function will return _gf_true if the glfd is still valid else return
67
glfs_is_glfd_still_valid(struct glfs_fd *glfd)
69
gf_boolean_t ret = _gf_false;
73
if (glfd->state != GLFD_CLOSE)
82
glfd_set_state_bind(struct glfs_fd *glfd)
86
glfd->state = GLFD_OPEN;
97
* This routine is called when an upcall event of type
98
* 'GF_UPCALL_CACHE_INVALIDATION' is received.
99
* It makes a copy of the contents of the upcall cache-invalidation
100
* data received into an entry which is stored in the upcall list
101
* maintained by gfapi.
104
glfs_get_upcall_cache_invalidation(struct gf_upcall *to_up_data,
105
struct gf_upcall *from_up_data)
107
struct gf_upcall_cache_invalidation *ca_data = NULL;
108
struct gf_upcall_cache_invalidation *f_ca_data = NULL;
111
GF_VALIDATE_OR_GOTO(THIS->name, to_up_data, out);
112
GF_VALIDATE_OR_GOTO(THIS->name, from_up_data, out);
114
f_ca_data = from_up_data->data;
115
GF_VALIDATE_OR_GOTO(THIS->name, f_ca_data, out);
117
ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
120
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
125
to_up_data->data = ca_data;
127
ca_data->flags = f_ca_data->flags;
128
ca_data->expire_time_attr = f_ca_data->expire_time_attr;
129
ca_data->stat = f_ca_data->stat;
130
ca_data->p_stat = f_ca_data->p_stat;
131
ca_data->oldp_stat = f_ca_data->oldp_stat;
139
glfs_get_upcall_lease(struct gf_upcall *to_up_data,
140
struct gf_upcall *from_up_data)
142
struct gf_upcall_recall_lease *ca_data = NULL;
143
struct gf_upcall_recall_lease *f_ca_data = NULL;
146
GF_VALIDATE_OR_GOTO(THIS->name, to_up_data, out);
147
GF_VALIDATE_OR_GOTO(THIS->name, from_up_data, out);
149
f_ca_data = from_up_data->data;
150
GF_VALIDATE_OR_GOTO(THIS->name, f_ca_data, out);
152
ca_data = GF_CALLOC(1, sizeof(*ca_data), glfs_mt_upcall_entry_t);
155
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_ALLOC_FAILED, "entry",
160
to_up_data->data = ca_data;
162
ca_data->lease_type = f_ca_data->lease_type;
163
gf_uuid_copy(ca_data->tid, f_ca_data->tid);
164
ca_data->dict = f_ca_data->dict;
171
glfs_loc_link(loc_t *loc, struct iatt *iatt)
174
inode_t *old_inode = NULL;
175
uint64_t ctx_value = LOOKUP_NOT_NEEDED;
182
old_inode = loc->inode;
184
/* If the inode already exists in the cache, the inode
185
* returned here points to the existing one. We need
186
* to update loc.inode accordingly.
188
loc->inode = inode_link(loc->inode, loc->parent, loc->name, iatt);
190
inode_ctx_set(loc->inode, THIS, &ctx_value);
191
inode_lookup(loc->inode);
192
inode_unref(old_inode);
202
glfs_iatt_to_stat(struct glfs *fs, struct iatt *iatt, struct stat *stat)
204
iatt_to_stat(iatt, stat);
205
stat->st_dev = fs->dev_id;
209
glfs_iatt_to_statx(struct glfs *fs, const struct iatt *iatt,
210
struct glfs_stat *statx)
212
statx->glfs_st_mask = 0;
214
statx->glfs_st_mode = 0;
215
if (IATT_TYPE_VALID(iatt->ia_flags)) {
216
statx->glfs_st_mode |= st_mode_type_from_ia(iatt->ia_type);
217
statx->glfs_st_mask |= GLFS_STAT_TYPE;
220
if (IATT_MODE_VALID(iatt->ia_flags)) {
221
statx->glfs_st_mode |= st_mode_prot_from_ia(iatt->ia_prot);
222
statx->glfs_st_mask |= GLFS_STAT_MODE;
225
if (IATT_NLINK_VALID(iatt->ia_flags)) {
226
statx->glfs_st_nlink = iatt->ia_nlink;
227
statx->glfs_st_mask |= GLFS_STAT_NLINK;
230
if (IATT_UID_VALID(iatt->ia_flags)) {
231
statx->glfs_st_uid = iatt->ia_uid;
232
statx->glfs_st_mask |= GLFS_STAT_UID;
235
if (IATT_GID_VALID(iatt->ia_flags)) {
236
statx->glfs_st_gid = iatt->ia_gid;
237
statx->glfs_st_mask |= GLFS_STAT_GID;
240
if (IATT_ATIME_VALID(iatt->ia_flags)) {
241
statx->glfs_st_atime.tv_sec = iatt->ia_atime;
242
statx->glfs_st_atime.tv_nsec = iatt->ia_atime_nsec;
243
statx->glfs_st_mask |= GLFS_STAT_ATIME;
246
if (IATT_MTIME_VALID(iatt->ia_flags)) {
247
statx->glfs_st_mtime.tv_sec = iatt->ia_mtime;
248
statx->glfs_st_mtime.tv_nsec = iatt->ia_mtime_nsec;
249
statx->glfs_st_mask |= GLFS_STAT_MTIME;
252
if (IATT_CTIME_VALID(iatt->ia_flags)) {
253
statx->glfs_st_ctime.tv_sec = iatt->ia_ctime;
254
statx->glfs_st_ctime.tv_nsec = iatt->ia_ctime_nsec;
255
statx->glfs_st_mask |= GLFS_STAT_CTIME;
258
if (IATT_BTIME_VALID(iatt->ia_flags)) {
259
statx->glfs_st_btime.tv_sec = iatt->ia_btime;
260
statx->glfs_st_btime.tv_nsec = iatt->ia_btime_nsec;
261
statx->glfs_st_mask |= GLFS_STAT_BTIME;
264
if (IATT_INO_VALID(iatt->ia_flags)) {
265
statx->glfs_st_ino = iatt->ia_ino;
266
statx->glfs_st_mask |= GLFS_STAT_INO;
269
if (IATT_SIZE_VALID(iatt->ia_flags)) {
270
statx->glfs_st_size = iatt->ia_size;
271
statx->glfs_st_mask |= GLFS_STAT_SIZE;
274
if (IATT_BLOCKS_VALID(iatt->ia_flags)) {
275
statx->glfs_st_blocks = iatt->ia_blocks;
276
statx->glfs_st_mask |= GLFS_STAT_BLOCKS;
279
/* unconditionally present, encode as is */
280
statx->glfs_st_blksize = iatt->ia_blksize;
281
statx->glfs_st_rdev_major = ia_major(iatt->ia_rdev);
282
statx->glfs_st_rdev_minor = ia_minor(iatt->ia_rdev);
283
statx->glfs_st_dev_major = ia_major(fs->dev_id);
284
statx->glfs_st_dev_minor = ia_minor(fs->dev_id);
286
/* At present we do not read any localFS attributes and pass them along,
287
* so setting this to 0. As we start supporting file attributes we can
288
* populate the same here as well */
289
statx->glfs_st_attributes = 0;
290
statx->glfs_st_attributes_mask = 0;
293
GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_iatt_from_statx, 6.0)
295
priv_glfs_iatt_from_statx(struct iatt *iatt, const struct glfs_stat *statx)
297
/* Most code in xlators are not checking validity flags before accessing
298
the items. Hence zero everything before setting valid items */
299
memset(iatt, 0, sizeof(struct iatt));
301
if (GLFS_STAT_TYPE_VALID(statx->glfs_st_mask)) {
302
iatt->ia_type = ia_type_from_st_mode(statx->glfs_st_mode);
303
iatt->ia_flags |= IATT_TYPE;
306
if (GLFS_STAT_MODE_VALID(statx->glfs_st_mask)) {
307
iatt->ia_prot = ia_prot_from_st_mode(statx->glfs_st_mode);
308
iatt->ia_flags |= IATT_MODE;
311
if (GLFS_STAT_NLINK_VALID(statx->glfs_st_mask)) {
312
iatt->ia_nlink = statx->glfs_st_nlink;
313
iatt->ia_flags |= IATT_NLINK;
316
if (GLFS_STAT_UID_VALID(statx->glfs_st_mask)) {
317
iatt->ia_uid = statx->glfs_st_uid;
318
iatt->ia_flags |= IATT_UID;
321
if (GLFS_STAT_GID_VALID(statx->glfs_st_mask)) {
322
iatt->ia_gid = statx->glfs_st_gid;
323
iatt->ia_flags |= IATT_GID;
326
if (GLFS_STAT_ATIME_VALID(statx->glfs_st_mask)) {
327
iatt->ia_atime = statx->glfs_st_atime.tv_sec;
328
iatt->ia_atime_nsec = statx->glfs_st_atime.tv_nsec;
329
iatt->ia_flags |= IATT_ATIME;
332
if (GLFS_STAT_MTIME_VALID(statx->glfs_st_mask)) {
333
iatt->ia_mtime = statx->glfs_st_mtime.tv_sec;
334
iatt->ia_mtime_nsec = statx->glfs_st_mtime.tv_nsec;
335
iatt->ia_flags |= IATT_MTIME;
338
if (GLFS_STAT_CTIME_VALID(statx->glfs_st_mask)) {
339
iatt->ia_ctime = statx->glfs_st_ctime.tv_sec;
340
iatt->ia_ctime_nsec = statx->glfs_st_ctime.tv_nsec;
341
iatt->ia_flags |= IATT_CTIME;
344
if (GLFS_STAT_BTIME_VALID(statx->glfs_st_mask)) {
345
iatt->ia_btime = statx->glfs_st_btime.tv_sec;
346
iatt->ia_btime_nsec = statx->glfs_st_btime.tv_nsec;
347
iatt->ia_flags |= IATT_BTIME;
350
if (GLFS_STAT_INO_VALID(statx->glfs_st_mask)) {
351
iatt->ia_ino = statx->glfs_st_ino;
352
iatt->ia_flags |= IATT_INO;
355
if (GLFS_STAT_SIZE_VALID(statx->glfs_st_mask)) {
356
iatt->ia_size = statx->glfs_st_size;
357
iatt->ia_flags |= IATT_SIZE;
360
if (GLFS_STAT_BLOCKS_VALID(statx->glfs_st_mask)) {
361
iatt->ia_blocks = statx->glfs_st_blocks;
362
iatt->ia_flags |= IATT_BLOCKS;
365
/* unconditionally present, encode as is */
366
iatt->ia_blksize = statx->glfs_st_blksize;
367
iatt->ia_rdev = makedev(statx->glfs_st_rdev_major,
368
statx->glfs_st_rdev_minor);
369
iatt->ia_dev = makedev(statx->glfs_st_dev_major, statx->glfs_st_dev_minor);
370
iatt->ia_attributes = statx->glfs_st_attributes;
371
iatt->ia_attributes_mask = statx->glfs_st_attributes_mask;
375
glfsflags_from_gfapiflags(struct glfs_stat *stat, int *glvalid)
378
if (stat->glfs_st_mask & GLFS_STAT_MODE) {
379
*glvalid |= GF_SET_ATTR_MODE;
382
if (stat->glfs_st_mask & GLFS_STAT_SIZE) {
383
*glvalid |= GF_SET_ATTR_SIZE;
386
if (stat->glfs_st_mask & GLFS_STAT_UID) {
387
*glvalid |= GF_SET_ATTR_UID;
390
if (stat->glfs_st_mask & GLFS_STAT_GID) {
391
*glvalid |= GF_SET_ATTR_GID;
394
if (stat->glfs_st_mask & GLFS_STAT_ATIME) {
395
*glvalid |= GF_SET_ATTR_ATIME;
398
if (stat->glfs_st_mask & GLFS_STAT_MTIME) {
399
*glvalid |= GF_SET_ATTR_MTIME;
404
glfs_loc_unlink(loc_t *loc)
406
inode_unlink(loc->inode, loc->parent, loc->name);
408
/* since glfs_h_* objects hold a reference to inode
409
* it is safe to keep lookup count to '0' */
410
if (!inode_has_dentry(loc->inode))
411
inode_forget(loc->inode, 0);
416
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_open, 3.4.0)
418
pub_glfs_open(struct glfs *fs, const char *path, int flags)
421
struct glfs_fd *glfd = NULL;
422
xlator_t *subvol = NULL;
430
dict_t *fop_attr = NULL;
433
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
435
subvol = glfs_active_subvol(fs);
442
glfd = glfs_fd_new(fs);
447
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
449
ESTALE_RETRY(ret, errno, reval, &loc, retry);
454
ret = validate_open_flags(flags, iatt.ia_type);
459
/* Retry. Safe to touch glfd->fd as we
460
still have not glfs_fd_bind() yet.
466
glfd->fd = fd_create(loc.inode, getpid());
472
glfd->fd->flags = flags;
474
ret = get_fop_attr_thrd_key(&fop_attr);
476
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
478
if (IA_ISDIR(iatt.ia_type))
479
ret = syncop_opendir(subvol, &loc, glfd->fd, NULL, NULL);
481
ret = syncop_open(subvol, &loc, flags, glfd->fd, fop_attr, NULL);
483
DECODE_SYNCOP_ERR(ret);
485
ESTALE_RETRY(ret, errno, reval, &loc, retry);
490
dict_unref(fop_attr);
496
glfd_set_state_bind(glfd);
499
glfs_subvol_done(fs, subvol);
508
cleanup_fopat_args(struct glfs_fd *pglfd, xlator_t *subvol, int ret, loc_t *loc)
514
glfs_subvol_done(pglfd->fs, subvol);
520
setup_fopat_args(struct glfs_fd *pglfd, const char *path, gf_boolean_t follow,
521
loc_t *loc, struct iatt *iatt, int reval)
524
xlator_t *subvol = NULL;
528
subvol = glfs_active_subvol(pglfd->fs);
535
glfs_lock(pglfd->fs, _gf_true);
537
ret = glfs_resolve_at(pglfd->fs, subvol, pglfd->fd->inode, path, loc,
538
iatt, follow, reval);
540
glfs_unlock(pglfd->fs);
548
if (ret < 0 && errno != ENOENT) {
549
cleanup_fopat_args(pglfd, subvol, ret, loc);
557
setup_entry_fopat_args(uuid_t gfid, dict_t **xattr_req, loc_t *loc)
567
/* errno from setup_fopat_args */
569
/* Any other type of error is fatal */
572
/* errno == ENOENT */
574
/* The parent directory or an ancestor even
575
higher does not exist
579
loc->inode = inode_new(loc->parent->table);
586
*xattr_req = dict_new();
593
gf_uuid_generate(gfid);
594
ret = dict_set_gfuuid(*xattr_req, "gfid-req", gfid, true);
606
dict_unref(*xattr_req);
612
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_openat, 11.0)
614
pub_glfs_openat(struct glfs_fd *pglfd, const char *path, int flags, mode_t mode)
617
struct glfs_fd *glfd = NULL;
618
xlator_t *subvol = NULL;
622
dict_t *fop_attr = NULL;
623
struct iatt iatt = {0};
625
gf_boolean_t is_create = 0;
628
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
630
is_create = !!(flags & O_CREAT);
631
subvol = setup_fopat_args(pglfd, path, !(flags & O_NOFOLLOW), &loc, &iatt,
637
if (is_create && !loc.inode) {
638
ret = setup_entry_fopat_args(gfid, &fop_attr, &loc);
644
/* Error is ENOENT but O_CREAT flag is not set */
651
glfd = glfs_fd_new(pglfd->fs);
657
glfd->fd = fd_create(loc.inode, getpid());
663
glfd->fd->flags = flags;
665
ret = get_fop_attr_thrd_key(&fop_attr);
667
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
670
if (IA_ISDIR(iatt.ia_type))
671
ret = syncop_opendir(subvol, &loc, glfd->fd, NULL, NULL);
673
ret = syncop_open(subvol, &loc, flags, glfd->fd, fop_attr, NULL);
675
ret = syncop_create(subvol, &loc, flags, mode, glfd->fd, &iatt,
678
DECODE_SYNCOP_ERR(ret);
680
if (is_create && ret == 0)
681
ret = glfs_loc_link(&loc, &iatt);
683
/* Because it is openat(), no ESTALE expected */
689
glfd_set_state_bind(glfd);
693
dict_unref(fop_attr);
695
cleanup_fopat_args(pglfd, subvol, ret, &loc);
703
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_close, 3.4.0)
705
pub_glfs_close(struct glfs_fd *glfd)
707
xlator_t *subvol = NULL;
710
struct glfs *fs = NULL;
711
dict_t *fop_attr = NULL;
714
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
716
gf_dirent_free(list_entry(&glfd->entries, gf_dirent_t, list));
717
subvol = glfs_active_subvol(glfd->fs);
724
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
731
if (glfd->lk_owner.len != 0) {
732
ret = syncopctx_setfslkowner(&glfd->lk_owner);
736
ret = get_fop_attr_thrd_key(&fop_attr);
738
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
740
ret = syncop_flush(subvol, fd, fop_attr, NULL);
741
DECODE_SYNCOP_ERR(ret);
748
dict_unref(fop_attr);
750
glfs_mark_glfd_for_deletion(glfd);
751
glfs_subvol_done(fs, subvol);
759
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lstat, 3.4.0)
761
pub_glfs_lstat(struct glfs *fs, const char *path, struct stat *stat)
764
xlator_t *subvol = NULL;
774
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
776
subvol = glfs_active_subvol(fs);
783
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
785
ESTALE_RETRY(ret, errno, reval, &loc, retry);
787
if (ret == 0 && stat)
788
glfs_iatt_to_stat(fs, &iatt, stat);
792
glfs_subvol_done(fs, subvol);
800
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_stat, 3.4.0)
802
pub_glfs_stat(struct glfs *fs, const char *path, struct stat *stat)
805
xlator_t *subvol = NULL;
815
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
817
subvol = glfs_active_subvol(fs);
824
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
826
ESTALE_RETRY(ret, errno, reval, &loc, retry);
828
if (ret == 0 && stat)
829
glfs_iatt_to_stat(fs, &iatt, stat);
833
glfs_subvol_done(fs, subvol);
841
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstatat, 11.0)
843
pub_glfs_fstatat(struct glfs_fd *pglfd, const char *path, struct stat *stat,
847
xlator_t *subvol = NULL;
855
int is_path_empty = 0;
858
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
861
is_path_empty = (flags & AT_EMPTY_PATH) == AT_EMPTY_PATH;
866
cleanup_fopat_args(pglfd, subvol, ret, &loc);
869
if (is_path_empty && path[0] == '\0') {
872
subvol = glfs_active_subvol(pglfd->fs);
879
fd = glfs_resolve_fd(pglfd->fs, subvol, pglfd);
886
ret = syncop_fstat(subvol, fd, &iatt, NULL, NULL);
887
DECODE_SYNCOP_ERR(ret);
889
subvol = setup_fopat_args(pglfd, path, !(flags & AT_SYMLINK_NOFOLLOW),
897
ESTALE_RETRY(ret, errno, reval, &loc, retry);
899
if (!subvol || !stat) {
904
if (!loc.inode && !is_path_empty) {
910
glfs_iatt_to_stat(pglfd->fs, &iatt, stat);
914
cleanup_fopat_args(pglfd, subvol, ret, &loc);
921
GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_statx, 6.0)
923
priv_glfs_statx(struct glfs *fs, const char *path, const unsigned int mask,
924
struct glfs_stat *statxbuf)
927
xlator_t *subvol = NULL;
937
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
945
if (mask & ~GLFS_STAT_ALL) {
951
subvol = glfs_active_subvol(fs);
959
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
960
ESTALE_RETRY(ret, errno, reval, &loc, retry);
962
if (ret == 0 && statxbuf)
963
glfs_iatt_to_statx(fs, &iatt, statxbuf);
967
glfs_subvol_done(fs, subvol);
975
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fstat, 3.4.0)
977
pub_glfs_fstat(struct glfs_fd *glfd, struct stat *stat)
980
xlator_t *subvol = NULL;
987
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
991
subvol = glfs_active_subvol(glfd->fs);
998
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1006
ret = syncop_fstat(subvol, fd, &iatt, NULL, NULL);
1008
glfs_iatt_to_stat(glfd->fs, &iatt, stat);
1010
ret = syncop_fstat(subvol, fd, NULL, NULL, NULL);
1011
DECODE_SYNCOP_ERR(ret);
1019
glfs_subvol_done(glfd->fs, subvol);
1027
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_creat, 3.4.0)
1029
pub_glfs_creat(struct glfs *fs, const char *path, int flags, mode_t mode)
1032
struct glfs_fd *glfd = NULL;
1033
xlator_t *subvol = NULL;
1037
struct iatt iatt = {
1041
dict_t *xattr_req = NULL;
1045
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
1047
subvol = glfs_active_subvol(fs);
1054
xattr_req = dict_new();
1061
gf_uuid_generate(gfid);
1062
ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
1069
glfd = glfs_fd_new(fs);
1073
/* This must be glfs_resolve() and NOT glfs_lresolve().
1074
That is because open("name", O_CREAT) where "name"
1075
is a danging symlink must create the dangling
1079
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
1081
ESTALE_RETRY(ret, errno, reval, &loc, retry);
1083
if (ret == -1 && errno != ENOENT)
1084
/* Any other type of error is fatal */
1087
if (ret == -1 && errno == ENOENT && !loc.parent)
1088
/* The parent directory or an ancestor even
1089
higher does not exist
1094
if (flags & O_EXCL) {
1100
if (IA_ISDIR(iatt.ia_type)) {
1106
if (!IA_ISREG(iatt.ia_type)) {
1113
if (ret == -1 && errno == ENOENT) {
1114
loc.inode = inode_new(loc.parent->table);
1123
/* Retry. Safe to touch glfd->fd as we
1124
still have not glfs_fd_bind() yet.
1130
glfd->fd = fd_create(loc.inode, getpid());
1136
glfd->fd->flags = flags;
1138
if (get_fop_attr_thrd_key(&xattr_req))
1139
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1141
ret = syncop_open(subvol, &loc, flags, glfd->fd, xattr_req, NULL);
1142
DECODE_SYNCOP_ERR(ret);
1144
ret = syncop_create(subvol, &loc, flags, mode, glfd->fd, &iatt,
1146
DECODE_SYNCOP_ERR(ret);
1149
ESTALE_RETRY(ret, errno, reval, &loc, retry);
1152
ret = glfs_loc_link(&loc, &iatt);
1157
dict_unref(xattr_req);
1163
glfd_set_state_bind(glfd);
1166
glfs_subvol_done(fs, subvol);
1174
#ifdef HAVE_SEEK_HOLE
1176
glfs_seek(struct glfs_fd *glfd, off_t offset, int whence)
1179
xlator_t *subvol = NULL;
1181
gf_seek_what_t what = 0;
1186
what = GF_SEEK_DATA;
1189
what = GF_SEEK_HOLE;
1192
/* other SEEK_* do not make sense, all operations get an offset
1193
* and the position in the fd is not tracked */
1198
subvol = glfs_active_subvol(glfd->fs);
1204
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1210
ret = syncop_seek(subvol, fd, offset, what, NULL, &off);
1211
DECODE_SYNCOP_ERR(ret);
1220
glfs_subvol_done(glfd->fs, subvol);
1227
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lseek, 3.4.0)
1229
pub_glfs_lseek(struct glfs_fd *glfd, off_t offset, int whence)
1238
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1244
glfd->offset = offset;
1248
glfd->offset += offset;
1252
ret = pub_glfs_fstat(glfd, &sb);
1254
/* seek cannot fail :O */
1257
glfd->offset = sb.st_size + offset;
1259
#ifdef HAVE_SEEK_HOLE
1262
ret = glfs_seek(glfd, offset, whence);
1284
glfs_preadv_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
1285
off_t offset, int flags, struct glfs_stat *poststat)
1287
xlator_t *subvol = NULL;
1290
struct iovec *iov = NULL;
1292
struct iobref *iobref = NULL;
1294
struct iatt iatt = {
1297
dict_t *fop_attr = NULL;
1300
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1304
subvol = glfs_active_subvol(glfd->fs);
1311
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1318
size = iov_length(iovec, iovcnt);
1320
ret = get_fop_attr_thrd_key(&fop_attr);
1322
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1325
ret = syncop_readv(subvol, fd, size, offset, 0, &iov, &cnt, &iobref,
1326
&iatt, fop_attr, NULL);
1328
glfs_iatt_to_statx(glfd->fs, &iatt, poststat);
1330
ret = syncop_readv(subvol, fd, size, offset, 0, &iov, &cnt, &iobref,
1331
NULL, fop_attr, NULL);
1333
DECODE_SYNCOP_ERR(ret);
1338
size = iov_copy(iovec, iovcnt, iov, cnt); /* FIXME!!! */
1340
glfd->offset = (offset + size);
1347
iobref_unref(iobref);
1354
dict_unref(fop_attr);
1356
glfs_subvol_done(glfd->fs, subvol);
1364
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv, 3.4.0)
1366
pub_glfs_preadv(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
1367
off_t offset, int flags)
1369
return glfs_preadv_common(glfd, iovec, iovcnt, offset, flags, NULL);
1372
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read, 3.4.0)
1374
pub_glfs_read(struct glfs_fd *glfd, void *buf, size_t count, int flags)
1376
struct iovec iov = {
1387
iov.iov_len = count;
1389
ret = pub_glfs_preadv(glfd, &iov, 1, glfd->offset, flags);
1394
GFAPI_SYMVER_PUBLIC(glfs_pread34, glfs_pread, 3.4.0)
1396
pub_glfs_pread34(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
1399
struct iovec iov = {
1405
iov.iov_len = count;
1407
ret = pub_glfs_preadv(glfd, &iov, 1, offset, flags);
1412
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread, 6.0)
1414
pub_glfs_pread(struct glfs_fd *glfd, void *buf, size_t count, off_t offset,
1415
int flags, struct glfs_stat *poststat)
1417
struct iovec iov = {
1423
iov.iov_len = count;
1425
ret = glfs_preadv_common(glfd, &iov, 1, offset, flags, poststat);
1430
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv, 3.4.0)
1432
pub_glfs_readv(struct glfs_fd *glfd, const struct iovec *iov, int count,
1442
ret = pub_glfs_preadv(glfd, iov, count, glfd->offset, flags);
1448
struct glfs_fd *glfd;
1463
glfs_io_async_cbk(int op_ret, int op_errno, call_frame_t *frame, void *cookie,
1464
struct iovec *iovec, int count, struct iatt *prebuf,
1465
struct iatt *postbuf)
1467
struct glfs_io *gio = NULL;
1468
xlator_t *subvol = NULL;
1469
struct glfs *fs = NULL;
1470
struct glfs_fd *glfd = NULL;
1472
struct glfs_stat prestat = {}, *prestatp = NULL;
1473
struct glfs_stat poststat = {}, *poststatp = NULL;
1475
GF_VALIDATE_OR_GOTO("gfapi", frame, inval);
1476
GF_VALIDATE_OR_GOTO("gfapi", cookie, inval);
1479
frame->local = NULL;
1484
if (!glfs_is_glfd_still_valid(glfd))
1489
} else if (gio->op == GF_FOP_READ) {
1496
op_ret = iov_copy(gio->iov, gio->count, iovec, count);
1497
glfd->offset = gio->offset + op_ret;
1498
} else if (gio->op == GF_FOP_WRITE) {
1499
glfd->offset = gio->offset + gio->iov->iov_len;
1505
gio->fn34(gio->glfd, op_ret, gio->data);
1508
prestatp = &prestat;
1509
glfs_iatt_to_statx(fs, prebuf, prestatp);
1513
poststatp = &poststat;
1514
glfs_iatt_to_statx(fs, postbuf, poststatp);
1517
gio->fn(gio->glfd, op_ret, prestatp, poststatp, gio->data);
1521
/* Since the async operation is complete
1522
* release the ref taken during the start
1523
* of async operation
1528
STACK_DESTROY(frame->root);
1529
glfs_subvol_done(fs, subvol);
1537
glfs_preadv_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
1538
int op_ret, int op_errno, struct iovec *iovec, int count,
1539
struct iatt *stbuf, struct iobref *iobref, dict_t *xdata)
1541
glfs_io_async_cbk(op_ret, op_errno, frame, cookie, iovec, count, NULL,
1548
glfs_preadv_async_common(struct glfs_fd *glfd, const struct iovec *iovec,
1549
int count, off_t offset, int flags, gf_boolean_t oldcb,
1550
glfs_io_cbk fn, void *data)
1552
struct glfs_io *gio = NULL;
1554
call_frame_t *frame = NULL;
1555
xlator_t *subvol = NULL;
1556
struct glfs *fs = NULL;
1558
dict_t *fop_attr = NULL;
1561
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1565
subvol = glfs_active_subvol(glfd->fs);
1572
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1581
frame = syncop_create_frame(THIS);
1588
gio = GF_MALLOC(sizeof(*gio) + (count * sizeof(struct iovec)),
1596
gio->op = GF_FOP_READ;
1597
gio->offset = offset;
1603
memcpy(gio->iov, iovec, sizeof(struct iovec) * count);
1607
ret = get_fop_attr_thrd_key(&fop_attr);
1609
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1611
STACK_WIND_COOKIE(frame, glfs_preadv_async_cbk, subvol, subvol,
1612
subvol->fops->readv, fd, iov_length(iovec, count), offset,
1625
STACK_DESTROY(frame->root);
1627
glfs_subvol_done(fs, subvol);
1630
dict_unref(fop_attr);
1640
GFAPI_SYMVER_PUBLIC(glfs_preadv_async34, glfs_preadv_async, 3.4.0)
1642
pub_glfs_preadv_async34(struct glfs_fd *glfd, const struct iovec *iovec,
1643
int count, off_t offset, int flags, glfs_io_cbk34 fn,
1646
return glfs_preadv_async_common(glfd, iovec, count, offset, flags, _gf_true,
1650
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_preadv_async, 6.0)
1652
pub_glfs_preadv_async(struct glfs_fd *glfd, const struct iovec *iovec,
1653
int count, off_t offset, int flags, glfs_io_cbk fn,
1656
return glfs_preadv_async_common(glfd, iovec, count, offset, flags,
1657
_gf_false, fn, data);
1660
GFAPI_SYMVER_PUBLIC(glfs_read_async34, glfs_read_async, 3.4.0)
1662
pub_glfs_read_async34(struct glfs_fd *glfd, void *buf, size_t count, int flags,
1663
glfs_io_cbk34 fn, void *data)
1665
struct iovec iov = {
1676
iov.iov_len = count;
1678
ret = glfs_preadv_async_common(glfd, &iov, 1, glfd->offset, flags, _gf_true,
1684
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_read_async, 6.0)
1686
pub_glfs_read_async(struct glfs_fd *glfd, void *buf, size_t count, int flags,
1687
glfs_io_cbk fn, void *data)
1689
struct iovec iov = {
1700
iov.iov_len = count;
1702
ret = glfs_preadv_async_common(glfd, &iov, 1, glfd->offset, flags,
1703
_gf_false, fn, data);
1708
GFAPI_SYMVER_PUBLIC(glfs_pread_async34, glfs_pread_async, 3.4.0)
1710
pub_glfs_pread_async34(struct glfs_fd *glfd, void *buf, size_t count,
1711
off_t offset, int flags, glfs_io_cbk34 fn, void *data)
1713
struct iovec iov = {
1719
iov.iov_len = count;
1721
ret = glfs_preadv_async_common(glfd, &iov, 1, offset, flags, _gf_true,
1727
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pread_async, 6.0)
1729
pub_glfs_pread_async(struct glfs_fd *glfd, void *buf, size_t count,
1730
off_t offset, int flags, glfs_io_cbk fn, void *data)
1732
struct iovec iov = {
1738
iov.iov_len = count;
1740
ret = glfs_preadv_async_common(glfd, &iov, 1, offset, flags, _gf_false, fn,
1746
GFAPI_SYMVER_PUBLIC(glfs_readv_async34, glfs_readv_async, 3.4.0)
1748
pub_glfs_readv_async34(struct glfs_fd *glfd, const struct iovec *iov, int count,
1749
int flags, glfs_io_cbk34 fn, void *data)
1758
ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
1759
_gf_true, (void *)fn, data);
1763
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readv_async, 6.0)
1765
pub_glfs_readv_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
1766
int flags, glfs_io_cbk fn, void *data)
1775
ret = glfs_preadv_async_common(glfd, iov, count, glfd->offset, flags,
1776
_gf_false, fn, data);
1781
glfs_pwritev_common(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
1782
off_t offset, int flags, struct glfs_stat *prestat,
1783
struct glfs_stat *poststat)
1785
xlator_t *subvol = NULL;
1787
struct iobref *iobref = NULL;
1788
struct iobuf *iobuf = NULL;
1789
struct iovec iov = {
1793
struct iatt preiatt =
1800
dict_t *fop_attr = NULL;
1803
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
1807
if (iovec->iov_len >= GF_UNIT_GB) {
1810
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
1811
"Data size too large", "size = %llu", GF_UNIT_GB, NULL);
1815
subvol = glfs_active_subvol(glfd->fs);
1822
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
1829
ret = iobuf_copy(subvol->ctx->iobuf_pool, iovec, iovcnt, &iobref, &iobuf,
1834
ret = get_fop_attr_thrd_key(&fop_attr);
1836
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1838
ret = syncop_writev(subvol, fd, &iov, 1, offset, iobref, flags, &preiatt,
1839
&postiatt, fop_attr, NULL);
1840
DECODE_SYNCOP_ERR(ret);
1844
glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
1846
glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
1852
glfd->offset = (offset + iov.iov_len);
1857
iobref_unref(iobref);
1863
dict_unref(fop_attr);
1865
glfs_subvol_done(glfd->fs, subvol);
1873
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_copy_file_range, 6.0)
1875
pub_glfs_copy_file_range(struct glfs_fd *glfd_in, off64_t *off_in,
1876
struct glfs_fd *glfd_out, off64_t *off_out, size_t len,
1877
unsigned int flags, struct glfs_stat *statbuf,
1878
struct glfs_stat *prestat, struct glfs_stat *poststat)
1880
xlator_t *subvol = NULL;
1883
fd_t *fd_out = NULL;
1884
struct iatt preiatt =
1895
dict_t *fop_attr = NULL;
1900
__GLFS_ENTRY_VALIDATE_FD(glfd_in, invalid_fs);
1901
__GLFS_ENTRY_VALIDATE_FD(glfd_out, invalid_fs);
1903
GF_REF_GET(glfd_in);
1904
GF_REF_GET(glfd_out);
1906
if (glfd_in->fs != glfd_out->fs) {
1912
subvol = glfs_active_subvol(glfd_in->fs);
1919
fd_in = glfs_resolve_fd(glfd_in->fs, subvol, glfd_in);
1926
fd_out = glfs_resolve_fd(glfd_out->fs, subvol, glfd_out);
1934
* This is based on how the vfs layer in the kernel handles
1935
* copy_file_range call. Upon receiving it follows the
1936
* below method to consider the offset.
1937
* if (off_in != NULL)
1938
* use the value off_in to perform the op
1939
* else if off_in == NULL
1940
* use the current file offset position to perform the op
1942
* For gfapi, glfd->offset is used. For a freshly opened
1943
* fd, the offset is set to 0.
1948
pos_in = glfd_in->offset;
1953
pos_out = glfd_out->offset;
1955
ret = get_fop_attr_thrd_key(&fop_attr);
1957
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
1959
ret = syncop_copy_file_range(subvol, fd_in, pos_in, fd_out, pos_out, len,
1960
flags, &iattbuf, &preiatt, &postiatt, fop_attr,
1962
DECODE_SYNCOP_ERR(ret);
1974
glfs_iatt_to_statx(glfd_in->fs, &iattbuf, statbuf);
1976
glfs_iatt_to_statx(glfd_in->fs, &preiatt, prestat);
1978
glfs_iatt_to_statx(glfd_in->fs, &postiatt, poststat);
1985
* If *off_in is NULL, then there is no offset info that can
1986
* obtained from the input argument. Hence follow below method.
1987
* If *off_in is NULL, then
1988
* glfd->offset = offset + ret;
1992
* According to the man page of copy_file_range, if off_in is
1993
* NULL, then the offset of the source file is advanced by
1994
* the return value of the fop. The same applies to off_out as
1995
* well. Otherwise, if *off_in is not NULL, then the offset
1996
* is not advanced by the filesystem. The entity which sends
1997
* the copy_file_range call is supposed to advance the offset
1998
* value in its buffer (pointed to by *off_in or *off_out)
1999
* by the return value of copy_file_range.
2002
glfd_in->offset += ret;
2005
glfd_out->offset += ret;
2013
GF_REF_PUT(glfd_in);
2015
GF_REF_PUT(glfd_out);
2017
dict_unref(fop_attr);
2019
glfs_subvol_done(glfd_in->fs, subvol);
2027
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev, 3.4.0)
2029
pub_glfs_pwritev(struct glfs_fd *glfd, const struct iovec *iovec, int iovcnt,
2030
off_t offset, int flags)
2032
return glfs_pwritev_common(glfd, iovec, iovcnt, offset, flags, NULL, NULL);
2035
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write, 3.4.0)
2037
pub_glfs_write(struct glfs_fd *glfd, const void *buf, size_t count, int flags)
2039
struct iovec iov = {
2049
iov.iov_base = (void *)buf;
2050
iov.iov_len = count;
2052
ret = pub_glfs_pwritev(glfd, &iov, 1, glfd->offset, flags);
2057
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev, 3.4.0)
2059
pub_glfs_writev(struct glfs_fd *glfd, const struct iovec *iov, int count,
2069
ret = pub_glfs_pwritev(glfd, iov, count, glfd->offset, flags);
2074
GFAPI_SYMVER_PUBLIC(glfs_pwrite34, glfs_pwrite, 3.4.0)
2076
pub_glfs_pwrite34(struct glfs_fd *glfd, const void *buf, size_t count,
2077
off_t offset, int flags)
2079
struct iovec iov = {
2084
iov.iov_base = (void *)buf;
2085
iov.iov_len = count;
2087
ret = pub_glfs_pwritev(glfd, &iov, 1, offset, flags);
2092
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite, 6.0)
2094
pub_glfs_pwrite(struct glfs_fd *glfd, const void *buf, size_t count,
2095
off_t offset, int flags, struct glfs_stat *prestat,
2096
struct glfs_stat *poststat)
2098
struct iovec iov = {
2103
iov.iov_base = (void *)buf;
2104
iov.iov_len = count;
2106
ret = glfs_pwritev_common(glfd, &iov, 1, offset, flags, prestat, poststat);
2112
pub_glfs_from_glfd(glfs_fd_t *);
2115
glfs_pwritev_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2116
int op_ret, int op_errno, struct iatt *prebuf,
2117
struct iatt *postbuf, dict_t *xdata)
2119
glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, prebuf,
2126
glfs_pwritev_async_common(struct glfs_fd *glfd, const struct iovec *iovec,
2127
int count, off_t offset, int flags,
2128
gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
2130
struct glfs_io *gio = NULL;
2132
call_frame_t *frame = NULL;
2133
xlator_t *subvol = NULL;
2135
struct iobref *iobref = NULL;
2136
struct iobuf *iobuf = NULL;
2137
dict_t *fop_attr = NULL;
2140
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2142
/* Need to take explicit ref so that the fd
2143
* is not destroyed before the fop is complete
2147
subvol = glfs_active_subvol(glfd->fs);
2153
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2159
gio = GF_MALLOC(sizeof(*gio) + (1 * (sizeof(struct iovec))),
2161
if (caa_unlikely(!gio)) {
2167
gio->op = GF_FOP_WRITE;
2168
gio->offset = offset;
2175
ret = iobuf_copy(subvol->ctx->iobuf_pool, iovec, count, &iobref, &iobuf,
2180
frame = syncop_create_frame(THIS);
2189
ret = get_fop_attr_thrd_key(&fop_attr);
2191
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2193
STACK_WIND_COOKIE(frame, glfs_pwritev_async_cbk, subvol, subvol,
2194
subvol->fops->writev, fd, gio->iov, gio->count, offset,
2195
flags, iobref, fop_attr);
2206
* If there is any error condition check after the frame
2207
* creation, we have to destroy the frame root.
2209
glfs_subvol_done(glfd->fs, subvol);
2212
dict_unref(fop_attr);
2217
iobref_unref(iobref);
2225
GFAPI_SYMVER_PUBLIC(glfs_pwritev_async34, glfs_pwritev_async, 3.4.0)
2227
pub_glfs_pwritev_async34(struct glfs_fd *glfd, const struct iovec *iovec,
2228
int count, off_t offset, int flags, glfs_io_cbk34 fn,
2231
return glfs_pwritev_async_common(glfd, iovec, count, offset, flags,
2232
_gf_true, (void *)fn, data);
2235
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwritev_async, 6.0)
2237
pub_glfs_pwritev_async(struct glfs_fd *glfd, const struct iovec *iovec,
2238
int count, off_t offset, int flags, glfs_io_cbk fn,
2241
return glfs_pwritev_async_common(glfd, iovec, count, offset, flags,
2242
_gf_false, fn, data);
2245
GFAPI_SYMVER_PUBLIC(glfs_write_async34, glfs_write_async, 3.4.0)
2247
pub_glfs_write_async34(struct glfs_fd *glfd, const void *buf, size_t count,
2248
int flags, glfs_io_cbk34 fn, void *data)
2250
struct iovec iov = {
2260
iov.iov_base = (void *)buf;
2261
iov.iov_len = count;
2263
ret = glfs_pwritev_async_common(glfd, &iov, 1, glfd->offset, flags,
2264
_gf_true, (void *)fn, data);
2269
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_write_async, 6.0)
2271
pub_glfs_write_async(struct glfs_fd *glfd, const void *buf, size_t count,
2272
int flags, glfs_io_cbk fn, void *data)
2274
struct iovec iov = {
2284
iov.iov_base = (void *)buf;
2285
iov.iov_len = count;
2287
ret = glfs_pwritev_async_common(glfd, &iov, 1, glfd->offset, flags,
2288
_gf_false, fn, data);
2293
GFAPI_SYMVER_PUBLIC(glfs_pwrite_async34, glfs_pwrite_async, 3.4.0)
2295
pub_glfs_pwrite_async34(struct glfs_fd *glfd, const void *buf, int count,
2296
off_t offset, int flags, glfs_io_cbk34 fn, void *data)
2298
struct iovec iov = {
2303
iov.iov_base = (void *)buf;
2304
iov.iov_len = count;
2306
ret = glfs_pwritev_async_common(glfd, &iov, 1, offset, flags, _gf_true,
2312
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_pwrite_async, 6.0)
2314
pub_glfs_pwrite_async(struct glfs_fd *glfd, const void *buf, int count,
2315
off_t offset, int flags, glfs_io_cbk fn, void *data)
2317
struct iovec iov = {
2322
iov.iov_base = (void *)buf;
2323
iov.iov_len = count;
2325
ret = glfs_pwritev_async_common(glfd, &iov, 1, offset, flags, _gf_false, fn,
2331
GFAPI_SYMVER_PUBLIC(glfs_writev_async34, glfs_writev_async, 3.4.0)
2333
pub_glfs_writev_async34(struct glfs_fd *glfd, const struct iovec *iov,
2334
int count, int flags, glfs_io_cbk34 fn, void *data)
2343
ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
2344
_gf_true, (void *)fn, data);
2348
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_writev_async, 6.0)
2350
pub_glfs_writev_async(struct glfs_fd *glfd, const struct iovec *iov, int count,
2351
int flags, glfs_io_cbk fn, void *data)
2360
ret = glfs_pwritev_async_common(glfd, iov, count, glfd->offset, flags,
2361
_gf_false, fn, data);
2366
glfs_fsync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
2367
struct glfs_stat *poststat)
2370
xlator_t *subvol = NULL;
2372
struct iatt preiatt =
2379
dict_t *fop_attr = NULL;
2382
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2386
subvol = glfs_active_subvol(glfd->fs);
2393
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2400
ret = get_fop_attr_thrd_key(&fop_attr);
2402
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2404
ret = syncop_fsync(subvol, fd, 0, &preiatt, &postiatt, fop_attr, NULL);
2405
DECODE_SYNCOP_ERR(ret);
2409
glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
2411
glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
2419
dict_unref(fop_attr);
2421
glfs_subvol_done(glfd->fs, subvol);
2429
GFAPI_SYMVER_PUBLIC(glfs_fsync34, glfs_fsync, 3.4.0)
2431
pub_glfs_fsync34(struct glfs_fd *glfd)
2433
return glfs_fsync_common(glfd, NULL, NULL);
2436
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync, 6.0)
2438
pub_glfs_fsync(struct glfs_fd *glfd, struct glfs_stat *prestat,
2439
struct glfs_stat *poststat)
2441
return glfs_fsync_common(glfd, prestat, poststat);
2445
glfs_fsync_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2446
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
2447
struct iatt *postbuf, dict_t *xdata)
2449
glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, prebuf,
2456
glfs_fsync_async_common(struct glfs_fd *glfd, gf_boolean_t oldcb,
2457
glfs_io_cbk fn, void *data, int dataonly)
2459
struct glfs_io *gio = NULL;
2461
call_frame_t *frame = NULL;
2462
xlator_t *subvol = NULL;
2465
/* Need to take explicit ref so that the fd
2466
* is not destroyed before the fop is complete
2470
subvol = glfs_active_subvol(glfd->fs);
2477
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2484
frame = syncop_create_frame(THIS);
2491
gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
2498
gio->op = GF_FOP_FSYNC;
2500
gio->flags = dataonly;
2507
STACK_WIND_COOKIE(frame, glfs_fsync_async_cbk, subvol, subvol,
2508
subvol->fops->fsync, fd, dataonly, NULL);
2517
STACK_DESTROY(frame->root);
2518
glfs_subvol_done(glfd->fs, subvol);
2524
GFAPI_SYMVER_PUBLIC(glfs_fsync_async34, glfs_fsync_async, 3.4.0)
2526
pub_glfs_fsync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
2531
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2533
ret = glfs_fsync_async_common(glfd, _gf_true, (void *)fn, data, 0);
2541
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsync_async, 6.0)
2543
pub_glfs_fsync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
2548
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2550
ret = glfs_fsync_async_common(glfd, _gf_false, fn, data, 0);
2559
glfs_fdatasync_common(struct glfs_fd *glfd, struct glfs_stat *prestat,
2560
struct glfs_stat *poststat)
2563
xlator_t *subvol = NULL;
2565
struct iatt preiatt =
2572
dict_t *fop_attr = NULL;
2575
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2579
subvol = glfs_active_subvol(glfd->fs);
2586
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2593
ret = get_fop_attr_thrd_key(&fop_attr);
2595
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2597
ret = syncop_fsync(subvol, fd, 1, &preiatt, &postiatt, fop_attr, NULL);
2598
DECODE_SYNCOP_ERR(ret);
2602
glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
2604
glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
2612
dict_unref(fop_attr);
2614
glfs_subvol_done(glfd->fs, subvol);
2622
GFAPI_SYMVER_PUBLIC(glfs_fdatasync34, glfs_fdatasync, 3.4.0)
2624
pub_glfs_fdatasync34(struct glfs_fd *glfd)
2626
return glfs_fdatasync_common(glfd, NULL, NULL);
2629
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync, 6.0)
2631
pub_glfs_fdatasync(struct glfs_fd *glfd, struct glfs_stat *prestat,
2632
struct glfs_stat *poststat)
2634
return glfs_fdatasync_common(glfd, prestat, poststat);
2637
GFAPI_SYMVER_PUBLIC(glfs_fdatasync_async34, glfs_fdatasync_async, 3.4.0)
2639
pub_glfs_fdatasync_async34(struct glfs_fd *glfd, glfs_io_cbk34 fn, void *data)
2644
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2646
ret = glfs_fsync_async_common(glfd, _gf_true, (void *)fn, data, 1);
2654
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fdatasync_async, 6.0)
2656
pub_glfs_fdatasync_async(struct glfs_fd *glfd, glfs_io_cbk fn, void *data)
2661
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2663
ret = glfs_fsync_async_common(glfd, _gf_false, fn, data, 1);
2672
glfs_ftruncate_common(struct glfs_fd *glfd, off_t offset,
2673
struct glfs_stat *prestat, struct glfs_stat *poststat)
2676
xlator_t *subvol = NULL;
2678
struct iatt preiatt =
2685
dict_t *fop_attr = NULL;
2688
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2692
subvol = glfs_active_subvol(glfd->fs);
2699
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2706
ret = get_fop_attr_thrd_key(&fop_attr);
2708
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2710
ret = syncop_ftruncate(subvol, fd, offset, &preiatt, &postiatt, fop_attr,
2712
DECODE_SYNCOP_ERR(ret);
2716
glfs_iatt_to_statx(glfd->fs, &preiatt, prestat);
2718
glfs_iatt_to_statx(glfd->fs, &postiatt, poststat);
2726
dict_unref(fop_attr);
2728
glfs_subvol_done(glfd->fs, subvol);
2736
GFAPI_SYMVER_PUBLIC(glfs_ftruncate34, glfs_ftruncate, 3.4.0)
2738
pub_glfs_ftruncate34(struct glfs_fd *glfd, off_t offset)
2740
return glfs_ftruncate_common(glfd, offset, NULL, NULL);
2743
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate, 6.0)
2745
pub_glfs_ftruncate(struct glfs_fd *glfd, off_t offset,
2746
struct glfs_stat *prestat, struct glfs_stat *poststat)
2748
return glfs_ftruncate_common(glfd, offset, prestat, poststat);
2751
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_truncate, 3.7.15)
2753
pub_glfs_truncate(struct glfs *fs, const char *path, off_t length)
2756
xlator_t *subvol = NULL;
2760
struct iatt iatt = {
2766
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
2768
subvol = glfs_active_subvol(fs);
2775
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
2777
ESTALE_RETRY(ret, errno, reval, &loc, retry);
2782
ret = syncop_truncate(subvol, &loc, length, NULL, NULL);
2783
DECODE_SYNCOP_ERR(ret);
2785
ESTALE_RETRY(ret, errno, reval, &loc, retry);
2789
glfs_subvol_done(fs, subvol);
2798
glfs_ftruncate_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
2799
int32_t op_ret, int32_t op_errno, struct iatt *prebuf,
2800
struct iatt *postbuf, dict_t *xdata)
2802
glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, prebuf,
2809
glfs_ftruncate_async_common(struct glfs_fd *glfd, off_t offset,
2810
gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
2812
struct glfs_io *gio = NULL;
2814
call_frame_t *frame = NULL;
2815
xlator_t *subvol = NULL;
2817
dict_t *fop_attr = NULL;
2820
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
2822
/* Need to take explicit ref so that the fd
2823
* is not destroyed before the fop is complete
2827
subvol = glfs_active_subvol(glfd->fs);
2833
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
2839
frame = syncop_create_frame(THIS);
2845
gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
2851
gio->op = GF_FOP_FTRUNCATE;
2853
gio->offset = offset;
2860
ret = get_fop_attr_thrd_key(&fop_attr);
2862
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
2864
STACK_WIND_COOKIE(frame, glfs_ftruncate_async_cbk, subvol, subvol,
2865
subvol->fops->ftruncate, fd, offset, fop_attr);
2877
STACK_DESTROY(frame->root);
2878
glfs_subvol_done(glfd->fs, subvol);
2881
dict_unref(fop_attr);
2889
GFAPI_SYMVER_PUBLIC(glfs_ftruncate_async34, glfs_ftruncate_async, 3.4.0)
2891
pub_glfs_ftruncate_async34(struct glfs_fd *glfd, off_t offset, glfs_io_cbk34 fn,
2894
return glfs_ftruncate_async_common(glfd, offset, _gf_true, (void *)fn,
2898
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_ftruncate_async, 6.0)
2900
pub_glfs_ftruncate_async(struct glfs_fd *glfd, off_t offset, glfs_io_cbk fn,
2903
return glfs_ftruncate_async_common(glfd, offset, _gf_false, fn, data);
2906
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_access, 3.4.0)
2908
pub_glfs_access(struct glfs *fs, const char *path, int mode)
2911
xlator_t *subvol = NULL;
2915
struct iatt iatt = {
2921
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
2923
subvol = glfs_active_subvol(fs);
2930
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
2932
ESTALE_RETRY(ret, errno, reval, &loc, retry);
2937
ret = syncop_access(subvol, &loc, mode, NULL, NULL);
2938
DECODE_SYNCOP_ERR(ret);
2940
ESTALE_RETRY(ret, errno, reval, &loc, retry);
2944
glfs_subvol_done(fs, subvol);
2952
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlink, 3.4.0)
2954
pub_glfs_symlink(struct glfs *fs, const char *data, const char *path)
2957
xlator_t *subvol = NULL;
2961
struct iatt iatt = {
2965
dict_t *xattr_req = NULL;
2969
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
2971
subvol = glfs_active_subvol(fs);
2978
xattr_req = dict_new();
2985
gf_uuid_generate(gfid);
2986
ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
2993
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
2995
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3003
if (ret == -1 && errno != ENOENT)
3004
/* Any other type of error is fatal */
3007
if (ret == -1 && errno == ENOENT && !loc.parent)
3008
/* The parent directory or an ancestor even
3009
higher does not exist
3013
/* ret == -1 && errno == ENOENT */
3014
loc.inode = inode_new(loc.parent->table);
3021
ret = syncop_symlink(subvol, &loc, data, &iatt, xattr_req, NULL);
3022
DECODE_SYNCOP_ERR(ret);
3024
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3027
ret = glfs_loc_link(&loc, &iatt);
3032
dict_unref(xattr_req);
3034
glfs_subvol_done(fs, subvol);
3042
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlink, 3.4.0)
3044
pub_glfs_readlink(struct glfs *fs, const char *path, char *buf, size_t bufsiz)
3047
xlator_t *subvol = NULL;
3051
struct iatt iatt = {
3055
char *linkval = NULL;
3058
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3060
subvol = glfs_active_subvol(fs);
3067
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3069
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3074
if (iatt.ia_type != IA_IFLNK) {
3080
ret = syncop_readlink(subvol, &loc, &linkval, bufsiz, NULL, NULL);
3081
DECODE_SYNCOP_ERR(ret);
3083
memcpy(buf, linkval, ret);
3087
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3091
glfs_subvol_done(fs, subvol);
3099
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknod, 3.4.0)
3101
pub_glfs_mknod(struct glfs *fs, const char *path, mode_t mode, dev_t dev)
3104
xlator_t *subvol = NULL;
3108
struct iatt iatt = {
3112
dict_t *xattr_req = NULL;
3116
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3118
subvol = glfs_active_subvol(fs);
3125
xattr_req = dict_new();
3132
gf_uuid_generate(gfid);
3133
ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
3140
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3142
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3150
if (ret == -1 && errno != ENOENT)
3151
/* Any other type of error is fatal */
3154
if (ret == -1 && errno == ENOENT && !loc.parent)
3155
/* The parent directory or an ancestor even
3156
higher does not exist
3160
/* ret == -1 && errno == ENOENT */
3161
loc.inode = inode_new(loc.parent->table);
3168
ret = syncop_mknod(subvol, &loc, mode, dev, &iatt, xattr_req, NULL);
3169
DECODE_SYNCOP_ERR(ret);
3171
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3174
ret = glfs_loc_link(&loc, &iatt);
3179
dict_unref(xattr_req);
3181
glfs_subvol_done(fs, subvol);
3189
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdir, 3.4.0)
3191
pub_glfs_mkdir(struct glfs *fs, const char *path, mode_t mode)
3194
xlator_t *subvol = NULL;
3198
struct iatt iatt = {
3202
dict_t *xattr_req = NULL;
3206
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3208
subvol = glfs_active_subvol(fs);
3215
xattr_req = dict_new();
3222
gf_uuid_generate(gfid);
3223
ret = dict_set_gfuuid(xattr_req, "gfid-req", gfid, true);
3230
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3232
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3240
if (ret == -1 && errno != ENOENT)
3241
/* Any other type of error is fatal */
3244
if (ret == -1 && errno == ENOENT && !loc.parent)
3245
/* The parent directory or an ancestor even
3246
higher does not exist
3250
/* ret == -1 && errno == ENOENT */
3251
loc.inode = inode_new(loc.parent->table);
3258
ret = syncop_mkdir(subvol, &loc, mode, &iatt, xattr_req, NULL);
3259
DECODE_SYNCOP_ERR(ret);
3261
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3264
ret = glfs_loc_link(&loc, &iatt);
3269
dict_unref(xattr_req);
3271
glfs_subvol_done(fs, subvol);
3279
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlink, 3.4.0)
3281
pub_glfs_unlink(struct glfs *fs, const char *path)
3284
xlator_t *subvol = NULL;
3288
struct iatt iatt = {
3294
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3296
subvol = glfs_active_subvol(fs);
3303
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3305
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3310
if (iatt.ia_type == IA_IFDIR) {
3316
/* TODO: Add leaseid */
3317
ret = syncop_unlink(subvol, &loc, NULL, NULL);
3318
DECODE_SYNCOP_ERR(ret);
3320
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3323
ret = glfs_loc_unlink(&loc);
3327
glfs_subvol_done(fs, subvol);
3335
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rmdir, 3.4.0)
3337
pub_glfs_rmdir(struct glfs *fs, const char *path)
3340
xlator_t *subvol = NULL;
3344
struct iatt iatt = {
3350
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3352
subvol = glfs_active_subvol(fs);
3359
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
3361
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3366
if (iatt.ia_type != IA_IFDIR) {
3372
ret = syncop_rmdir(subvol, &loc, 0, NULL, NULL);
3373
DECODE_SYNCOP_ERR(ret);
3375
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3378
ret = glfs_loc_unlink(&loc);
3382
glfs_subvol_done(fs, subvol);
3390
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_rename, 3.4.0)
3392
pub_glfs_rename(struct glfs *fs, const char *oldpath, const char *newpath)
3395
xlator_t *subvol = NULL;
3402
struct iatt oldiatt = {
3405
struct iatt newiatt = {
3411
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3413
subvol = glfs_active_subvol(fs);
3420
ret = glfs_lresolve(fs, subvol, oldpath, &oldloc, &oldiatt, reval);
3422
ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
3427
ret = glfs_lresolve(fs, subvol, newpath, &newloc, &newiatt, reval);
3429
ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
3431
if (ret && errno != ENOENT && newloc.parent)
3434
if (newiatt.ia_type != IA_INVAL) {
3435
if ((oldiatt.ia_type == IA_IFDIR) != (newiatt.ia_type == IA_IFDIR)) {
3436
/* Either both old and new must be dirs,
3437
* or both must be non-dirs. Else, fail.
3445
/* TODO: - check if new or old is a prefix of the other, and fail EINVAL
3448
ret = syncop_rename(subvol, &oldloc, &newloc, NULL, NULL);
3449
DECODE_SYNCOP_ERR(ret);
3451
if (ret == -1 && errno == ESTALE) {
3452
if (reval < DEFAULT_REVAL_COUNT) {
3461
inode_rename(oldloc.parent->table, oldloc.parent, oldloc.name,
3462
newloc.parent, newloc.name, oldloc.inode, &oldiatt);
3464
if (newloc.inode && !inode_has_dentry(newloc.inode))
3465
inode_forget(newloc.inode, 0);
3471
glfs_subvol_done(fs, subvol);
3479
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_link, 3.4.0)
3481
pub_glfs_link(struct glfs *fs, const char *oldpath, const char *newpath)
3484
xlator_t *subvol = NULL;
3491
struct iatt oldiatt = {
3494
struct iatt newiatt = {
3500
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3502
subvol = glfs_active_subvol(fs);
3509
ret = glfs_lresolve(fs, subvol, oldpath, &oldloc, &oldiatt, reval);
3511
ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
3516
ret = glfs_lresolve(fs, subvol, newpath, &newloc, &newiatt, reval);
3518
ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
3526
if (oldiatt.ia_type == IA_IFDIR) {
3532
/* Filling the inode of the hard link to be same as that of the
3536
inode_unref(newloc.inode);
3537
newloc.inode = NULL;
3539
newloc.inode = inode_ref(oldloc.inode);
3541
ret = syncop_link(subvol, &oldloc, &newloc, &newiatt, NULL, NULL);
3542
DECODE_SYNCOP_ERR(ret);
3544
if (ret == -1 && errno == ESTALE) {
3552
ret = glfs_loc_link(&newloc, &newiatt);
3557
glfs_subvol_done(fs, subvol);
3565
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_opendir, 3.4.0)
3567
pub_glfs_opendir(struct glfs *fs, const char *path)
3570
struct glfs_fd *glfd = NULL;
3571
xlator_t *subvol = NULL;
3575
struct iatt iatt = {
3581
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
3583
subvol = glfs_active_subvol(fs);
3590
glfd = glfs_fd_new(fs);
3595
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
3597
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3602
if (!IA_ISDIR(iatt.ia_type)) {
3609
/* Retry. Safe to touch glfd->fd as we
3610
still have not glfs_fd_bind() yet.
3616
glfd->fd = fd_create(loc.inode, getpid());
3623
ret = syncop_opendir(subvol, &loc, glfd->fd, NULL, NULL);
3624
DECODE_SYNCOP_ERR(ret);
3626
ESTALE_RETRY(ret, errno, reval, &loc, retry);
3634
glfd_set_state_bind(glfd);
3637
glfs_subvol_done(fs, subvol);
3645
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_closedir, 3.4.0)
3647
pub_glfs_closedir(struct glfs_fd *glfd)
3652
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
3654
gf_dirent_free(list_entry(&glfd->entries, gf_dirent_t, list));
3656
glfs_mark_glfd_for_deletion(glfd);
3666
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_telldir, 3.4.0)
3668
pub_glfs_telldir(struct glfs_fd *fd)
3678
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_seekdir, 3.4.0)
3680
pub_glfs_seekdir(struct glfs_fd *fd, long offset)
3682
gf_dirent_t *entry = NULL;
3683
gf_dirent_t *tmp = NULL;
3690
if (fd->offset == offset)
3693
fd->offset = offset;
3696
list_for_each_entry_safe(entry, tmp, &fd->entries, list)
3698
if (entry->d_off != offset)
3701
if (&tmp->list != &fd->entries) {
3707
/* could not find entry at requested offset in the cache.
3708
next readdir_r() will result in glfd_entry_refresh()
3713
glfs_discard_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3714
int32_t op_ret, int32_t op_errno,
3715
struct iatt *preop_stbuf, struct iatt *postop_stbuf,
3718
glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, preop_stbuf,
3725
glfs_discard_async_common(struct glfs_fd *glfd, off_t offset, size_t len,
3726
gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
3728
struct glfs_io *gio = NULL;
3730
call_frame_t *frame = NULL;
3731
xlator_t *subvol = NULL;
3733
dict_t *fop_attr = NULL;
3736
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
3738
/* Need to take explicit ref so that the fd
3739
* is not destroyed before the fop is complete
3743
subvol = glfs_active_subvol(glfd->fs);
3749
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
3755
frame = syncop_create_frame(THIS);
3761
gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
3767
gio->op = GF_FOP_DISCARD;
3769
gio->offset = offset;
3776
ret = get_fop_attr_thrd_key(&fop_attr);
3778
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
3780
STACK_WIND_COOKIE(frame, glfs_discard_async_cbk, subvol, subvol,
3781
subvol->fops->discard, fd, offset, len, fop_attr);
3786
dict_unref(fop_attr);
3795
STACK_DESTROY(frame->root);
3796
glfs_subvol_done(glfd->fs, subvol);
3805
GFAPI_SYMVER_PUBLIC(glfs_discard_async35, glfs_discard_async, 3.5.0)
3807
pub_glfs_discard_async35(struct glfs_fd *glfd, off_t offset, size_t len,
3808
glfs_io_cbk34 fn, void *data)
3810
return glfs_discard_async_common(glfd, offset, len, _gf_true, (void *)fn,
3814
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard_async, 6.0)
3816
pub_glfs_discard_async(struct glfs_fd *glfd, off_t offset, size_t len,
3817
glfs_io_cbk fn, void *data)
3819
return glfs_discard_async_common(glfd, offset, len, _gf_false, fn, data);
3823
glfs_zerofill_async_cbk(call_frame_t *frame, void *cookie, xlator_t *this,
3824
int32_t op_ret, int32_t op_errno,
3825
struct iatt *preop_stbuf, struct iatt *postop_stbuf,
3828
glfs_io_async_cbk(op_ret, op_errno, frame, cookie, NULL, 0, preop_stbuf,
3835
glfs_zerofill_async_common(struct glfs_fd *glfd, off_t offset, off_t len,
3836
gf_boolean_t oldcb, glfs_io_cbk fn, void *data)
3838
struct glfs_io *gio = NULL;
3840
call_frame_t *frame = NULL;
3841
xlator_t *subvol = NULL;
3843
dict_t *fop_attr = NULL;
3846
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
3848
/* Need to take explicit ref so that the fd
3849
* is not destroyed before the fop is complete
3853
subvol = glfs_active_subvol(glfd->fs);
3859
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
3865
frame = syncop_create_frame(THIS);
3871
gio = GF_CALLOC(1, sizeof(*gio), glfs_mt_glfs_io_t);
3877
gio->op = GF_FOP_ZEROFILL;
3879
gio->offset = offset;
3887
ret = get_fop_attr_thrd_key(&fop_attr);
3889
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
3891
STACK_WIND_COOKIE(frame, glfs_zerofill_async_cbk, subvol, subvol,
3892
subvol->fops->zerofill, fd, offset, len, fop_attr);
3902
STACK_DESTROY(frame->root);
3903
glfs_subvol_done(glfd->fs, subvol);
3906
dict_unref(fop_attr);
3914
GFAPI_SYMVER_PUBLIC(glfs_zerofill_async35, glfs_zerofill_async, 3.5.0)
3916
pub_glfs_zerofill_async35(struct glfs_fd *glfd, off_t offset, off_t len,
3917
glfs_io_cbk34 fn, void *data)
3919
return glfs_zerofill_async_common(glfd, offset, len, _gf_true, (void *)fn,
3923
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill_async, 6.0)
3925
pub_glfs_zerofill_async(struct glfs_fd *glfd, off_t offset, off_t len,
3926
glfs_io_cbk fn, void *data)
3928
return glfs_zerofill_async_common(glfd, offset, len, _gf_false, fn, data);
3932
gf_dirent_to_dirent(gf_dirent_t *gf_dirent, struct dirent *dirent)
3934
dirent->d_ino = gf_dirent->d_ino;
3936
#ifdef _DIRENT_HAVE_D_OFF
3937
dirent->d_off = gf_dirent->d_off;
3940
#ifdef _DIRENT_HAVE_D_TYPE
3941
dirent->d_type = gf_dirent->d_type;
3944
#ifdef _DIRENT_HAVE_D_NAMLEN
3945
dirent->d_namlen = strlen(gf_dirent->d_name);
3948
snprintf(dirent->d_name, NAME_MAX + 1, "%s", gf_dirent->d_name);
3952
glfd_entry_refresh(struct glfs_fd *glfd, int plus)
3954
xlator_t *subvol = NULL;
3955
gf_dirent_t entries;
3957
gf_dirent_t *entry = NULL;
3961
subvol = glfs_active_subvol(glfd->fs);
3968
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
3975
if (fd->inode->ia_type != IA_IFDIR) {
3981
INIT_LIST_HEAD(&entries.list);
3982
INIT_LIST_HEAD(&old.list);
3985
ret = syncop_readdirp(subvol, fd, 131072, glfd->offset, &entries, NULL,
3988
ret = syncop_readdir(subvol, fd, 131072, glfd->offset, &entries, NULL,
3990
DECODE_SYNCOP_ERR(ret);
3993
list_for_each_entry(entry, &entries.list, list)
3995
if ((!entry->inode && (!IA_ISDIR(entry->d_stat.ia_type))) ||
3996
((entry->d_stat.ia_ctime == 0) &&
3997
!inode_dir_or_parentdir(entry))) {
3998
/* entry->inode for directories will be
3999
* always set to null to force a lookup
4000
* on the dentry. Hence to not degrade
4001
* readdir performance, we skip lookups
4002
* for directory entries. Also we will have
4003
* proper stat if directory present on
4006
* In addition, if the stat is invalid, force
4007
* lookup to fetch proper stat.
4009
gf_fill_iatt_for_dirent(entry, fd->inode, subvol);
4013
gf_link_inodes_from_dirent(fd->inode, &entries);
4016
list_splice_init(&glfd->entries, &old.list);
4017
list_splice_init(&entries.list, &glfd->entries);
4019
/* spurious errno is dangerous for glfd_entry_next() */
4023
if ((ret > 0) && !list_empty(&glfd->entries)) {
4024
glfd->next = list_entry(glfd->entries.next, gf_dirent_t, list);
4027
gf_dirent_free(&old);
4032
glfs_subvol_done(glfd->fs, subvol);
4038
glfd_entry_next(struct glfs_fd *glfd, int plus)
4040
gf_dirent_t *entry = NULL;
4043
if (!glfd->offset || !glfd->next) {
4044
ret = glfd_entry_refresh(glfd, plus);
4053
if (&entry->next->list == &glfd->entries)
4056
glfd->next = entry->next;
4058
glfd->offset = entry->d_off;
4064
glfs_readdirbuf_get(struct glfs_fd *glfd)
4066
struct dirent *buf = NULL;
4068
LOCK(&glfd->fd->lock);
4070
buf = glfd->readdirbuf;
4072
memset(buf, 0, READDIRBUF_SIZE);
4076
buf = GF_CALLOC(1, READDIRBUF_SIZE, glfs_mt_readdirbuf_t);
4082
glfd->readdirbuf = buf;
4085
UNLOCK(&glfd->fd->lock);
4090
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus_r, 3.4.0)
4092
pub_glfs_readdirplus_r(struct glfs_fd *glfd, struct stat *stat,
4093
struct dirent *ext, struct dirent **res)
4096
gf_dirent_t *entry = NULL;
4097
struct dirent *buf = NULL;
4100
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4109
buf = glfs_readdirbuf_get(glfd);
4117
entry = glfd_entry_next(glfd, !!stat);
4129
gf_dirent_to_dirent(entry, buf);
4131
glfs_iatt_to_stat(glfd->fs, &entry->d_stat, stat);
4146
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir_r, 3.4.0)
4148
pub_glfs_readdir_r(struct glfs_fd *glfd, struct dirent *buf,
4149
struct dirent **res)
4151
return pub_glfs_readdirplus_r(glfd, 0, buf, res);
4154
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdirplus, 3.5.0)
4156
pub_glfs_readdirplus(struct glfs_fd *glfd, struct stat *stat)
4158
struct dirent *res = NULL;
4161
ret = pub_glfs_readdirplus_r(glfd, stat, NULL, &res);
4168
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readdir, 3.5.0)
4170
pub_glfs_readdir(struct glfs_fd *glfd)
4172
return pub_glfs_readdirplus(glfd, NULL);
4175
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_statvfs, 3.4.0)
4177
pub_glfs_statvfs(struct glfs *fs, const char *path, struct statvfs *buf)
4180
xlator_t *subvol = NULL;
4184
struct iatt iatt = {
4190
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4192
subvol = glfs_active_subvol(fs);
4199
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4201
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4206
ret = syncop_statfs(subvol, &loc, buf, NULL, NULL);
4207
DECODE_SYNCOP_ERR(ret);
4209
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4213
glfs_subvol_done(fs, subvol);
4221
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setattr, 6.0)
4223
pub_glfs_setattr(struct glfs *fs, const char *path, struct glfs_stat *stat,
4228
xlator_t *subvol = NULL;
4232
struct iatt riatt = {
4235
struct iatt iatt = {
4241
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4243
GF_VALIDATE_OR_GOTO("glfs_setattr", stat, out);
4245
subvol = glfs_active_subvol(fs);
4253
ret = glfs_resolve(fs, subvol, path, &loc, &riatt, reval);
4255
ret = glfs_lresolve(fs, subvol, path, &loc, &riatt, reval);
4257
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4262
glfs_iatt_from_statx(&iatt, stat);
4263
glfsflags_from_gfapiflags(stat, &glvalid);
4265
/* TODO : Add leaseid */
4266
ret = syncop_setattr(subvol, &loc, &iatt, glvalid, 0, 0, NULL, NULL);
4267
DECODE_SYNCOP_ERR(ret);
4269
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4273
glfs_subvol_done(fs, subvol);
4281
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetattr, 6.0)
4283
pub_glfs_fsetattr(struct glfs_fd *glfd, struct glfs_stat *stat)
4287
struct iatt iatt = {
4290
xlator_t *subvol = NULL;
4294
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4298
GF_VALIDATE_OR_GOTO("glfs_fsetattr", stat, out);
4300
subvol = glfs_active_subvol(glfd->fs);
4307
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4314
glfs_iatt_from_statx(&iatt, stat);
4315
glfsflags_from_gfapiflags(stat, &glvalid);
4317
/* TODO : Add leaseid */
4318
ret = syncop_fsetattr(subvol, fd, &iatt, glvalid, 0, 0, NULL, NULL);
4319
DECODE_SYNCOP_ERR(ret);
4326
glfs_subvol_done(glfd->fs, subvol);
4334
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chmod, 3.4.0)
4336
pub_glfs_chmod(struct glfs *fs, const char *path, mode_t mode)
4339
struct glfs_stat stat = {
4343
stat.glfs_st_mode = mode;
4344
stat.glfs_st_mask = GLFS_STAT_MODE;
4346
ret = glfs_setattr(fs, path, &stat, 1);
4351
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmod, 3.4.0)
4353
pub_glfs_fchmod(struct glfs_fd *glfd, mode_t mode)
4356
struct glfs_stat stat = {
4360
stat.glfs_st_mode = mode;
4361
stat.glfs_st_mask = GLFS_STAT_MODE;
4363
ret = glfs_fsetattr(glfd, &stat);
4368
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chown, 3.4.0)
4370
pub_glfs_chown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
4373
struct glfs_stat stat = {
4377
if (uid != (uid_t)-1) {
4378
stat.glfs_st_uid = uid;
4379
stat.glfs_st_mask = GLFS_STAT_UID;
4382
if (gid != (uid_t)-1) {
4383
stat.glfs_st_gid = gid;
4384
stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
4387
if (stat.glfs_st_mask)
4388
ret = glfs_setattr(fs, path, &stat, 1);
4393
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lchown, 3.4.0)
4395
pub_glfs_lchown(struct glfs *fs, const char *path, uid_t uid, gid_t gid)
4398
struct glfs_stat stat = {
4402
if (uid != (uid_t)-1) {
4403
stat.glfs_st_uid = uid;
4404
stat.glfs_st_mask = GLFS_STAT_UID;
4407
if (gid != (uid_t)-1) {
4408
stat.glfs_st_gid = gid;
4409
stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
4412
if (stat.glfs_st_mask)
4413
ret = glfs_setattr(fs, path, &stat, 0);
4418
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchown, 3.4.0)
4420
pub_glfs_fchown(struct glfs_fd *glfd, uid_t uid, gid_t gid)
4423
struct glfs_stat stat = {
4427
if (uid != (uid_t)-1) {
4428
stat.glfs_st_uid = uid;
4429
stat.glfs_st_mask = GLFS_STAT_UID;
4432
if (gid != (uid_t)-1) {
4433
stat.glfs_st_gid = gid;
4434
stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
4437
if (stat.glfs_st_mask)
4438
ret = glfs_fsetattr(glfd, &stat);
4443
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_utimens, 3.4.0)
4445
pub_glfs_utimens(struct glfs *fs, const char *path,
4446
const struct timespec times[2])
4449
struct glfs_stat stat = {
4453
stat.glfs_st_atime = times[0];
4454
stat.glfs_st_mtime = times[1];
4456
stat.glfs_st_mask = GLFS_STAT_ATIME | GLFS_STAT_MTIME;
4458
ret = glfs_setattr(fs, path, &stat, 1);
4463
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lutimens, 3.4.0)
4465
pub_glfs_lutimens(struct glfs *fs, const char *path,
4466
const struct timespec times[2])
4469
struct glfs_stat stat = {
4473
stat.glfs_st_atime = times[0];
4474
stat.glfs_st_mtime = times[1];
4476
stat.glfs_st_mask = GLFS_STAT_ATIME | GLFS_STAT_MTIME;
4478
ret = glfs_setattr(fs, path, &stat, 0);
4483
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_futimens, 3.4.0)
4485
pub_glfs_futimens(struct glfs_fd *glfd, const struct timespec times[2])
4488
struct glfs_stat stat = {
4492
stat.glfs_st_atime = times[0];
4493
stat.glfs_st_mtime = times[1];
4495
stat.glfs_st_mask = GLFS_STAT_ATIME | GLFS_STAT_MTIME;
4497
ret = glfs_fsetattr(glfd, &stat);
4503
glfs_getxattr_process(void *value, size_t size, dict_t *xattr, const char *name)
4505
data_t *data = NULL;
4508
data = dict_get(xattr, (char *)name);
4516
if (!value || !size)
4525
memcpy(value, data->data, ret);
4531
glfs_getxattr_common(struct glfs *fs, const char *path, const char *name,
4532
void *value, size_t size, int follow)
4535
xlator_t *subvol = NULL;
4539
struct iatt iatt = {
4542
dict_t *xattr = NULL;
4546
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4548
if (!name || *name == '\0') {
4554
if (strlen(name) > GF_XATTR_NAME_MAX) {
4556
errno = ENAMETOOLONG;
4560
subvol = glfs_active_subvol(fs);
4569
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4571
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
4573
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4578
ret = syncop_getxattr(subvol, &loc, &xattr, name, NULL, NULL);
4579
DECODE_SYNCOP_ERR(ret);
4581
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4586
ret = glfs_getxattr_process(value, size, xattr, name);
4593
glfs_subvol_done(fs, subvol);
4601
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getxattr, 3.4.0)
4603
pub_glfs_getxattr(struct glfs *fs, const char *path, const char *name,
4604
void *value, size_t size)
4606
return glfs_getxattr_common(fs, path, name, value, size, 1);
4609
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lgetxattr, 3.4.0)
4611
pub_glfs_lgetxattr(struct glfs *fs, const char *path, const char *name,
4612
void *value, size_t size)
4614
return glfs_getxattr_common(fs, path, name, value, size, 0);
4617
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fgetxattr, 3.4.0)
4619
pub_glfs_fgetxattr(struct glfs_fd *glfd, const char *name, void *value,
4623
xlator_t *subvol = NULL;
4624
dict_t *xattr = NULL;
4628
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4632
if (!name || *name == '\0') {
4638
if (strlen(name) > GF_XATTR_NAME_MAX) {
4640
errno = ENAMETOOLONG;
4644
subvol = glfs_active_subvol(glfd->fs);
4651
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4658
ret = syncop_fgetxattr(subvol, fd, &xattr, name, NULL, NULL);
4659
DECODE_SYNCOP_ERR(ret);
4663
ret = glfs_getxattr_process(value, size, xattr, name);
4672
glfs_subvol_done(glfd->fs, subvol);
4680
/* filter out xattrs that need not be visible on the
4681
* client application.
4684
gfapi_filter_xattr(char *key)
4686
int need_filter = 0;
4688
/* If there are by chance any internal virtual xattrs (those starting with
4689
* 'glusterfs.'), filter them */
4690
if (strncmp("glusterfs.", key, SLEN("glusterfs.")) == 0)
4697
glfs_listxattr_process(void *value, size_t size, dict_t *xattr)
4704
ret = dict_keys_join(NULL, 0, xattr, gfapi_filter_xattr);
4706
if (!value || !size)
4713
dict_keys_join(value, size, xattr, gfapi_filter_xattr);
4721
glfs_listxattr_common(struct glfs *fs, const char *path, void *value,
4722
size_t size, int follow)
4725
xlator_t *subvol = NULL;
4729
struct iatt iatt = {
4732
dict_t *xattr = NULL;
4736
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4738
subvol = glfs_active_subvol(fs);
4747
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4749
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
4751
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4756
ret = syncop_getxattr(subvol, &loc, &xattr, NULL, NULL, NULL);
4757
DECODE_SYNCOP_ERR(ret);
4759
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4764
ret = glfs_listxattr_process(value, size, xattr);
4771
glfs_subvol_done(fs, subvol);
4779
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_listxattr, 3.4.0)
4781
pub_glfs_listxattr(struct glfs *fs, const char *path, void *value, size_t size)
4783
return glfs_listxattr_common(fs, path, value, size, 1);
4786
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_llistxattr, 3.4.0)
4788
pub_glfs_llistxattr(struct glfs *fs, const char *path, void *value, size_t size)
4790
return glfs_listxattr_common(fs, path, value, size, 0);
4793
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_flistxattr, 3.4.0)
4795
pub_glfs_flistxattr(struct glfs_fd *glfd, void *value, size_t size)
4798
xlator_t *subvol = NULL;
4799
dict_t *xattr = NULL;
4803
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4807
subvol = glfs_active_subvol(glfd->fs);
4814
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4821
ret = syncop_fgetxattr(subvol, fd, &xattr, NULL, NULL, NULL);
4822
DECODE_SYNCOP_ERR(ret);
4826
ret = glfs_listxattr_process(value, size, xattr);
4835
glfs_subvol_done(glfd->fs, subvol);
4844
glfs_setxattr_common(struct glfs *fs, const char *path, const char *name,
4845
const void *value, size_t size, int flags, int follow)
4848
xlator_t *subvol = NULL;
4852
struct iatt iatt = {
4855
dict_t *xattr = NULL;
4857
void *value_cp = NULL;
4860
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
4862
if (!name || *name == '\0') {
4868
if (strlen(name) > GF_XATTR_NAME_MAX) {
4870
errno = ENAMETOOLONG;
4874
subvol = glfs_active_subvol(fs);
4883
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
4885
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
4887
ESTALE_RETRY(ret, errno, reval, &loc, retry);
4892
value_cp = gf_memdup(value, size);
4893
GF_CHECK_ALLOC_AND_LOG(subvol->name, value_cp, ret,
4895
" duplicate setxattr value",
4898
xattr = dict_for_key_value(name, value_cp, size, _gf_false);
4906
ret = syncop_setxattr(subvol, &loc, xattr, flags, NULL, NULL);
4907
DECODE_SYNCOP_ERR(ret);
4914
glfs_subvol_done(fs, subvol);
4922
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_setxattr, 3.4.0)
4924
pub_glfs_setxattr(struct glfs *fs, const char *path, const char *name,
4925
const void *value, size_t size, int flags)
4927
return glfs_setxattr_common(fs, path, name, value, size, flags, 1);
4930
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lsetxattr, 3.4.0)
4932
pub_glfs_lsetxattr(struct glfs *fs, const char *path, const char *name,
4933
const void *value, size_t size, int flags)
4935
return glfs_setxattr_common(fs, path, name, value, size, flags, 0);
4938
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fsetxattr, 3.4.0)
4940
pub_glfs_fsetxattr(struct glfs_fd *glfd, const char *name, const void *value,
4941
size_t size, int flags)
4944
xlator_t *subvol = NULL;
4945
dict_t *xattr = NULL;
4947
void *value_cp = NULL;
4950
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
4954
if (!name || *name == '\0') {
4960
if (strlen(name) > GF_XATTR_NAME_MAX) {
4962
errno = ENAMETOOLONG;
4966
subvol = glfs_active_subvol(glfd->fs);
4973
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
4980
value_cp = gf_memdup(value, size);
4981
GF_CHECK_ALLOC_AND_LOG(subvol->name, value_cp, ret,
4983
" duplicate setxattr value",
4986
xattr = dict_for_key_value(name, value_cp, size, _gf_false);
4994
ret = syncop_fsetxattr(subvol, fd, xattr, flags, NULL, NULL);
4995
DECODE_SYNCOP_ERR(ret);
5005
glfs_subvol_done(glfd->fs, subvol);
5014
glfs_removexattr_common(struct glfs *fs, const char *path, const char *name,
5018
xlator_t *subvol = NULL;
5022
struct iatt iatt = {
5028
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5030
subvol = glfs_active_subvol(fs);
5038
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
5040
ret = glfs_lresolve(fs, subvol, path, &loc, &iatt, reval);
5042
ESTALE_RETRY(ret, errno, reval, &loc, retry);
5047
ret = syncop_removexattr(subvol, &loc, name, NULL, NULL);
5048
DECODE_SYNCOP_ERR(ret);
5050
ESTALE_RETRY(ret, errno, reval, &loc, retry);
5055
glfs_subvol_done(fs, subvol);
5063
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_removexattr, 3.4.0)
5065
pub_glfs_removexattr(struct glfs *fs, const char *path, const char *name)
5067
return glfs_removexattr_common(fs, path, name, 1);
5070
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lremovexattr, 3.4.0)
5072
pub_glfs_lremovexattr(struct glfs *fs, const char *path, const char *name)
5074
return glfs_removexattr_common(fs, path, name, 0);
5077
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fremovexattr, 3.4.0)
5079
pub_glfs_fremovexattr(struct glfs_fd *glfd, const char *name)
5082
xlator_t *subvol = NULL;
5086
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5090
subvol = glfs_active_subvol(glfd->fs);
5097
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5104
ret = syncop_fremovexattr(subvol, fd, name, NULL, NULL);
5105
DECODE_SYNCOP_ERR(ret);
5112
glfs_subvol_done(glfd->fs, subvol);
5120
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fallocate, 3.5.0)
5122
pub_glfs_fallocate(struct glfs_fd *glfd, int keep_size, off_t offset,
5126
xlator_t *subvol = NULL;
5128
dict_t *fop_attr = NULL;
5131
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5135
subvol = glfs_active_subvol(glfd->fs);
5142
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5149
ret = get_fop_attr_thrd_key(&fop_attr);
5151
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5153
ret = syncop_fallocate(subvol, fd, keep_size, offset, len, fop_attr, NULL);
5154
DECODE_SYNCOP_ERR(ret);
5161
dict_unref(fop_attr);
5163
glfs_subvol_done(glfd->fs, subvol);
5171
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_discard, 3.5.0)
5173
pub_glfs_discard(struct glfs_fd *glfd, off_t offset, size_t len)
5176
xlator_t *subvol = NULL;
5178
dict_t *fop_attr = NULL;
5181
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5185
subvol = glfs_active_subvol(glfd->fs);
5192
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5199
ret = get_fop_attr_thrd_key(&fop_attr);
5201
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5203
ret = syncop_discard(subvol, fd, offset, len, fop_attr, NULL);
5204
DECODE_SYNCOP_ERR(ret);
5211
dict_unref(fop_attr);
5213
glfs_subvol_done(glfd->fs, subvol);
5221
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_zerofill, 3.5.0)
5223
pub_glfs_zerofill(struct glfs_fd *glfd, off_t offset, off_t len)
5226
xlator_t *subvol = NULL;
5228
dict_t *fop_attr = NULL;
5231
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5235
subvol = glfs_active_subvol(glfd->fs);
5241
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5247
ret = get_fop_attr_thrd_key(&fop_attr);
5249
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5251
ret = syncop_zerofill(subvol, fd, offset, len, fop_attr, NULL);
5252
DECODE_SYNCOP_ERR(ret);
5259
dict_unref(fop_attr);
5261
glfs_subvol_done(glfd->fs, subvol);
5269
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_chdir, 3.4.0)
5271
pub_glfs_chdir(struct glfs *fs, const char *path)
5274
xlator_t *subvol = NULL;
5278
struct iatt iatt = {
5284
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5286
subvol = glfs_active_subvol(fs);
5293
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
5295
ESTALE_RETRY(ret, errno, reval, &loc, retry);
5300
if (!IA_ISDIR(iatt.ia_type)) {
5306
glfs_cwd_set(fs, loc.inode);
5311
glfs_subvol_done(fs, subvol);
5319
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchdir, 3.4.0)
5321
pub_glfs_fchdir(struct glfs_fd *glfd)
5324
inode_t *inode = NULL;
5325
xlator_t *subvol = NULL;
5329
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5333
subvol = glfs_active_subvol(glfd->fs);
5340
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5349
if (!IA_ISDIR(inode->ia_type)) {
5355
glfs_cwd_set(glfd->fs, inode);
5363
glfs_subvol_done(glfd->fs, subvol);
5371
static gf_boolean_t warn_realpath = _gf_true; /* log once */
5374
glfs_realpath_common(struct glfs *fs, const char *path, char *resolved_path,
5375
gf_boolean_t warn_deprecated)
5378
char *retpath = NULL;
5379
char *allocpath = NULL;
5380
xlator_t *subvol = NULL;
5384
struct iatt iatt = {
5390
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5393
retpath = resolved_path;
5394
else if (warn_deprecated) {
5395
retpath = allocpath = malloc(PATH_MAX + 1);
5396
if (warn_realpath) {
5397
warn_realpath = _gf_false;
5398
gf_log(THIS->name, GF_LOG_WARNING,
5400
"is compiled against an old version of "
5401
"libgfapi, it should use glfs_free() to "
5402
"release the path returned by "
5406
retpath = allocpath = GLFS_CALLOC(1, PATH_MAX + 1, NULL,
5407
glfs_mt_realpath_t);
5416
subvol = glfs_active_subvol(fs);
5423
ret = glfs_resolve(fs, subvol, path, &loc, &iatt, reval);
5425
ESTALE_RETRY(ret, errno, reval, &loc, retry);
5431
snprintf(retpath, PATH_MAX + 1, "%s", loc.path);
5438
if (warn_deprecated && allocpath)
5441
GLFS_FREE(allocpath);
5445
glfs_subvol_done(fs, subvol);
5453
GFAPI_SYMVER_PUBLIC(glfs_realpath34, glfs_realpath, 3.4.0)
5455
pub_glfs_realpath34(struct glfs *fs, const char *path, char *resolved_path)
5457
return glfs_realpath_common(fs, path, resolved_path, _gf_true);
5460
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_realpath, 3.7.17)
5462
pub_glfs_realpath(struct glfs *fs, const char *path, char *resolved_path)
5464
return glfs_realpath_common(fs, path, resolved_path, _gf_false);
5467
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_getcwd, 3.4.0)
5469
pub_glfs_getcwd(struct glfs *fs, char *buf, size_t n)
5472
inode_t *inode = NULL;
5476
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
5478
if (!buf || n < 2) {
5484
inode = glfs_cwd_get(fs);
5487
strncpy(buf, "/", n);
5492
ret = inode_path(inode, 0, &path);
5499
strncpy(buf, path, n);
5517
gf_flock_to_flock(struct gf_flock *gf_flock, struct flock *flock)
5519
flock->l_type = gf_flock->l_type;
5520
flock->l_whence = gf_flock->l_whence;
5521
flock->l_start = gf_flock->l_start;
5522
flock->l_len = gf_flock->l_len;
5523
flock->l_pid = gf_flock->l_pid;
5527
gf_flock_from_flock(struct gf_flock *gf_flock, struct flock *flock)
5529
gf_flock->l_type = flock->l_type;
5530
gf_flock->l_whence = flock->l_whence;
5531
gf_flock->l_start = flock->l_start;
5532
gf_flock->l_len = flock->l_len;
5533
gf_flock->l_pid = flock->l_pid;
5537
glfs_lock_common(struct glfs_fd *glfd, int cmd, struct flock *flock,
5541
xlator_t *subvol = NULL;
5542
struct gf_flock gf_flock = {
5545
struct gf_flock saved_flock = {
5551
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5559
subvol = glfs_active_subvol(glfd->fs);
5566
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
5573
/* Generate glusterfs flock structure from client flock
5574
* structure to be processed by server */
5575
gf_flock_from_flock(&gf_flock, flock);
5577
/* Keep another copy of flock for split/merge of locks
5579
gf_flock_from_flock(&saved_flock, flock);
5581
if (glfd->lk_owner.len != 0) {
5582
ret = syncopctx_setfslkowner(&glfd->lk_owner);
5588
ret = get_fop_attr_thrd_key(&xdata);
5590
gf_msg_debug("gfapi", 0, "Getting leaseid from thread failed");
5592
ret = syncop_lk(subvol, fd, cmd, &gf_flock, xdata, NULL);
5593
DECODE_SYNCOP_ERR(ret);
5595
/* Convert back from gf_flock to flock as expected by application */
5596
gf_flock_to_flock(&gf_flock, flock);
5598
if (ret == 0 && (cmd == F_SETLK || cmd == F_SETLKW)) {
5599
ret = fd_lk_insert_and_merge(fd, cmd, &saved_flock);
5601
gf_smsg(THIS->name, GF_LOG_ERROR, 0,
5602
API_MSG_LOCK_INSERT_MERGE_FAILED, "gfid=%s",
5603
uuid_utoa(fd->inode->gfid), NULL);
5614
glfs_subvol_done(glfd->fs, subvol);
5622
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_file_lock, 4.0.0)
5624
pub_glfs_file_lock(struct glfs_fd *glfd, int cmd, struct flock *flock,
5625
glfs_lock_mode_t lk_mode)
5628
dict_t *xdata_in = NULL;
5630
if (lk_mode == GLFS_LK_MANDATORY) {
5631
/* Create a new dictionary */
5632
xdata_in = dict_new();
5633
if (xdata_in == NULL) {
5639
/* Set GF_LK_MANDATORY internally within dictionary to map
5640
* GLFS_LK_MANDATORY */
5641
ret = dict_set_uint32(xdata_in, GF_LOCK_MODE, GF_LK_MANDATORY);
5643
gf_smsg(THIS->name, GF_LOG_ERROR, 0,
5644
API_MSG_SETTING_LOCK_TYPE_FAILED, NULL);
5651
ret = glfs_lock_common(glfd, cmd, flock, xdata_in);
5654
dict_unref(xdata_in);
5659
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_posix_lock, 3.4.0)
5661
pub_glfs_posix_lock(struct glfs_fd *glfd, int cmd, struct flock *flock)
5663
return glfs_lock_common(glfd, cmd, flock, NULL);
5666
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fd_set_lkowner, 3.10.7)
5668
pub_glfs_fd_set_lkowner(struct glfs_fd *glfd, void *data, int len)
5673
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5675
if (!GF_REF_GET(glfd)) {
5679
GF_VALIDATE_OR_GOTO(THIS->name, data, out);
5681
if ((len <= 0) || (len > GFAPI_MAX_LOCK_OWNER_LEN)) {
5683
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ARG,
5684
"lk_owner len=%d", len, NULL);
5688
glfd->lk_owner.len = len;
5690
memcpy(glfd->lk_owner.data, data, len);
5703
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_dup, 3.4.0)
5705
pub_glfs_dup(struct glfs_fd *glfd)
5707
xlator_t *subvol = NULL;
5709
struct glfs_fd *dupfd = NULL;
5710
struct glfs *fs = NULL;
5713
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
5718
subvol = glfs_active_subvol(fs);
5724
fd = glfs_resolve_fd(fs, subvol, glfd);
5730
dupfd = glfs_fd_new(fs);
5736
dupfd->fd = fd_ref(fd);
5737
dupfd->state = glfd->state;
5742
glfs_fd_bind(dupfd);
5746
glfs_subvol_done(fs, subvol);
5755
glfs_enqueue_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
5758
upcall_entry *u_list = NULL;
5760
if (!fs || !upcall_data)
5763
u_list = GF_CALLOC(1, sizeof(*u_list), glfs_mt_upcall_entry_t);
5766
gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
5771
INIT_LIST_HEAD(&u_list->upcall_list);
5773
gf_uuid_copy(u_list->upcall_data.gfid, upcall_data->gfid);
5774
u_list->upcall_data.event_type = upcall_data->event_type;
5776
switch (upcall_data->event_type) {
5777
case GF_UPCALL_CACHE_INVALIDATION:
5778
ret = glfs_get_upcall_cache_invalidation(&u_list->upcall_data,
5781
case GF_UPCALL_RECALL_LEASE:
5782
ret = glfs_get_upcall_lease(&u_list->upcall_data, upcall_data);
5789
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
5793
pthread_mutex_lock(&fs->upcall_list_mutex);
5795
list_add_tail(&u_list->upcall_list, &fs->upcall_list);
5797
pthread_mutex_unlock(&fs->upcall_list_mutex);
5802
if (ret && u_list) {
5803
GF_FREE(u_list->upcall_data.data);
5809
glfs_free_upcall_lease(void *to_free)
5811
struct glfs_upcall_lease *arg = to_free;
5817
glfs_h_close(arg->object);
5823
glfs_recall_lease_fd(struct glfs *fs, struct gf_upcall *up_data)
5825
struct gf_upcall_recall_lease *recall_lease = NULL;
5826
xlator_t *subvol = NULL;
5828
inode_t *inode = NULL;
5829
struct glfs_fd *glfd = NULL;
5830
struct glfs_fd *tmp = NULL;
5831
struct list_head glfd_list;
5833
struct glfs_lease lease = {
5837
GF_VALIDATE_OR_GOTO("gfapi", up_data, out);
5838
GF_VALIDATE_OR_GOTO("gfapi", fs, out);
5840
recall_lease = up_data->data;
5841
GF_VALIDATE_OR_GOTO("gfapi", recall_lease, out);
5843
INIT_LIST_HEAD(&glfd_list);
5845
subvol = glfs_active_subvol(fs);
5852
gf_msg_debug(THIS->name, 0, "Recall lease received for gfid:%s",
5853
uuid_utoa(up_data->gfid));
5855
inode = inode_find(subvol->itable, up_data->gfid);
5858
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INODE_FIND_FAILED,
5859
"gfid=%s", uuid_utoa(up_data->gfid), "graph_id=%d",
5860
subvol->graph->id, NULL);
5866
list_for_each_entry(fd, &inode->fd_list, inode_list)
5868
glfd = fd_ctx_get_ptr(fd, subvol);
5870
gf_msg_trace(THIS->name, 0, "glfd (%p) has held lease", glfd);
5872
list_add_tail(&glfd->list, &glfd_list);
5876
UNLOCK(&inode->lock);
5878
if (!list_empty(&glfd_list)) {
5879
list_for_each_entry_safe(glfd, tmp, &glfd_list, list)
5883
if (glfd->state != GLFD_CLOSE) {
5884
gf_msg_trace(THIS->name, 0,
5885
"glfd (%p) has held lease, "
5886
"calling recall cbk",
5888
glfd->cbk(lease, glfd->cookie);
5891
UNLOCK(&glfd->lock);
5893
list_del_init(&glfd->list);
5903
glfs_recall_lease_upcall(struct glfs *fs, struct glfs_upcall *up_arg,
5904
struct gf_upcall *up_data)
5906
struct gf_upcall_recall_lease *recall_lease = NULL;
5907
struct glfs_object *object = NULL;
5908
xlator_t *subvol = NULL;
5910
struct glfs_upcall_lease *up_lease_arg = NULL;
5912
GF_VALIDATE_OR_GOTO("gfapi", up_data, out);
5913
GF_VALIDATE_OR_GOTO("gfapi", fs, out);
5915
recall_lease = up_data->data;
5916
GF_VALIDATE_OR_GOTO("gfapi", recall_lease, out);
5918
subvol = glfs_active_subvol(fs);
5924
gf_msg_debug(THIS->name, 0, "Recall lease received for gfid:%s",
5925
uuid_utoa(up_data->gfid));
5927
object = glfs_h_find_handle(fs, up_data->gfid, GFAPI_HANDLE_LENGTH);
5929
/* The reason handle creation will fail is because we
5930
* couldn't find the inode in the gfapi inode table.
5932
* But since application would have taken inode_ref, the
5933
* only case when this can happen is when it has closed
5934
* the handle and hence will no more be interested in
5935
* the upcall for this particular gfid.
5937
gf_smsg(THIS->name, GF_LOG_DEBUG, errno, API_MSG_CREATE_HANDLE_FAILED,
5938
"gfid=%s", uuid_utoa(up_data->gfid), NULL);
5943
up_lease_arg = GF_MALLOC(sizeof(struct glfs_upcall_lease),
5944
glfs_mt_upcall_inode_t);
5945
if (!up_lease_arg) {
5949
up_lease_arg->object = object;
5950
up_lease_arg->lease_type = recall_lease->lease_type;
5952
up_arg->reason = GLFS_UPCALL_RECALL_LEASE;
5953
up_arg->event = up_lease_arg;
5954
up_arg->free_event = glfs_free_upcall_lease;
5960
/* Close p_object and oldp_object as well if being referenced.*/
5962
glfs_h_close(object);
5964
/* Set reason to prevent applications from using ->event */
5965
up_arg->reason = GF_UPCALL_EVENT_NULL;
5971
upcall_syncop_args_free(struct upcall_syncop_args *args)
5973
dict_t *dict = NULL;
5974
struct gf_upcall *upcall_data = NULL;
5977
upcall_data = &args->upcall_data;
5978
switch (upcall_data->event_type) {
5979
case GF_UPCALL_CACHE_INVALIDATION:
5980
dict = ((struct gf_upcall_cache_invalidation *)(upcall_data
5984
case GF_UPCALL_RECALL_LEASE:
5985
dict = ((struct gf_upcall_recall_lease *)(upcall_data->data))
5992
GF_FREE(upcall_data->client_uid);
5993
GF_FREE(upcall_data->data);
6000
glfs_upcall_syncop_cbk(int ret, call_frame_t *frame, void *opaque)
6002
struct upcall_syncop_args *args = opaque;
6004
(void)upcall_syncop_args_free(args);
6010
glfs_cbk_upcall_syncop(void *opaque)
6012
struct upcall_syncop_args *args = opaque;
6013
struct gf_upcall *upcall_data = NULL;
6014
struct glfs_upcall *up_arg = NULL;
6019
upcall_data = &args->upcall_data;
6025
up_arg = GLFS_CALLOC(1, sizeof(struct gf_upcall), glfs_release_upcall,
6026
glfs_mt_upcall_entry_t);
6028
gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
6033
switch (upcall_data->event_type) {
6034
case GF_UPCALL_CACHE_INVALIDATION:
6035
ret = glfs_h_poll_cache_invalidation(fs, up_arg, upcall_data);
6037
case GF_UPCALL_RECALL_LEASE:
6038
ret = glfs_recall_lease_upcall(fs, up_arg, upcall_data);
6044
/* It could so happen that the file which got
6045
* upcall notification may have got deleted by
6046
* the same client. In such cases up_arg->reason
6047
* is set to GLFS_UPCALL_EVENT_NULL. No need to
6050
if (up_arg->reason == GLFS_UPCALL_EVENT_NULL) {
6051
gf_smsg(THIS->name, GF_LOG_DEBUG, errno,
6052
API_MSG_UPCALL_EVENT_NULL_RECEIVED, NULL);
6057
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_INVALID_ENTRY, NULL);
6062
if (fs->up_cbk && up_arg)
6063
(fs->up_cbk)(up_arg, fs->up_data);
6065
/* application takes care of calling glfs_free on up_arg post
6066
* their processing */
6072
static struct gf_upcall_cache_invalidation *
6073
gf_copy_cache_invalidation(struct gf_upcall_cache_invalidation *src)
6075
struct gf_upcall_cache_invalidation *dst = NULL;
6080
dst = GF_MALLOC(sizeof(struct gf_upcall_cache_invalidation),
6081
glfs_mt_upcall_entry_t);
6084
gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
6089
dst->flags = src->flags;
6090
dst->expire_time_attr = src->expire_time_attr;
6091
dst->stat = src->stat;
6092
dst->p_stat = src->p_stat;
6093
dst->oldp_stat = src->oldp_stat;
6096
dst->dict = dict_copy_with_ref(src->dict, NULL);
6103
static struct gf_upcall_recall_lease *
6104
gf_copy_recall_lease(struct gf_upcall_recall_lease *src)
6106
struct gf_upcall_recall_lease *dst = NULL;
6111
dst = GF_MALLOC(sizeof(struct gf_upcall_recall_lease),
6112
glfs_mt_upcall_entry_t);
6115
gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED, "entry",
6120
dst->lease_type = src->lease_type;
6121
memcpy(dst->tid, src->tid, 16);
6124
dst->dict = dict_copy_with_ref(src->dict, NULL);
6131
static struct upcall_syncop_args *
6132
upcall_syncop_args_init(struct glfs *fs, struct gf_upcall *upcall_data)
6134
struct upcall_syncop_args *args = NULL;
6136
struct gf_upcall *t_data = NULL;
6138
if (!fs || !upcall_data)
6141
args = GF_CALLOC(1, sizeof(struct upcall_syncop_args),
6142
glfs_mt_upcall_entry_t);
6144
gf_smsg(THIS->name, GF_LOG_ERROR, ENOMEM, API_MSG_ALLOC_FAILED,
6145
"syncop args", NULL);
6149
/* Note: we are not taking any ref on fs here.
6150
* Ideally applications have to unregister for upcall events
6151
* or stop polling for upcall events before performing
6152
* glfs_fini. And as for outstanding synctasks created, we wait
6153
* for all syncenv threads to finish tasks before cleaning up the
6154
* fs->ctx. Hence it seems safe to process these callback
6155
* notification without taking any lock/ref.
6158
t_data = &(args->upcall_data);
6159
t_data->client_uid = gf_strdup(upcall_data->client_uid);
6161
gf_uuid_copy(t_data->gfid, upcall_data->gfid);
6162
t_data->event_type = upcall_data->event_type;
6164
switch (t_data->event_type) {
6165
case GF_UPCALL_CACHE_INVALIDATION:
6166
t_data->data = gf_copy_cache_invalidation(
6167
(struct gf_upcall_cache_invalidation *)upcall_data->data);
6169
case GF_UPCALL_RECALL_LEASE:
6170
t_data->data = gf_copy_recall_lease(
6171
(struct gf_upcall_recall_lease *)upcall_data->data);
6182
GF_FREE(args->upcall_data.client_uid);
6191
glfs_cbk_upcall_data(struct glfs *fs, struct gf_upcall *upcall_data)
6193
struct upcall_syncop_args *args = NULL;
6196
if (!fs || !upcall_data)
6199
if (!(fs->upcall_events & upcall_data->event_type)) {
6200
/* ignore events which application hasn't registered*/
6204
args = upcall_syncop_args_init(fs, upcall_data);
6209
ret = synctask_new(THIS->ctx->env, glfs_cbk_upcall_syncop,
6210
glfs_upcall_syncop_cbk, NULL, args);
6211
/* should we retry incase of failure? */
6213
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_UPCALL_SYNCOP_FAILED,
6214
"event_type=%d", upcall_data->event_type, "gfid=%s",
6215
(char *)(upcall_data->gfid), NULL);
6216
upcall_syncop_args_free(args);
6224
* This routine is called in case of any notification received
6225
* from the server. All the upcall events are queued up in a list
6226
* to be read by the applications.
6228
* In case if the application registers a cbk function, that shall
6229
* be called by this routine in case of any event received.
6230
* The cbk fn is responsible for notifying the
6231
* applications the way it desires for each event queued (for eg.,
6232
* can raise a signal or broadcast a cond variable etc.)
6234
* Otherwise all the upcall events are queued up in a list
6235
* to be read/polled by the applications.
6237
GFAPI_SYMVER_PRIVATE_DEFAULT(glfs_process_upcall_event, 3.7.0)
6239
priv_glfs_process_upcall_event(struct glfs *fs, void *data)
6241
glusterfs_ctx_t *ctx = NULL;
6242
struct gf_upcall *upcall_data = NULL;
6246
gf_msg_debug(THIS->name, 0, "Upcall gfapi callback is called");
6248
__GLFS_ENTRY_VALIDATE_FS(fs, err);
6253
/* Unlike in I/O path, "glfs_fini" would not have freed
6254
* 'fs' by the time we take lock as it waits for all epoll
6255
* threads to exit including this
6257
pthread_mutex_lock(&fs->mutex);
6261
/* if we're not interested in upcalls (anymore), skip them */
6262
if (ctx->cleanup_started || !fs->cache_upcalls) {
6263
pthread_mutex_unlock(&fs->mutex);
6269
pthread_mutex_unlock(&fs->mutex);
6271
upcall_data = (struct gf_upcall *)data;
6273
gf_msg_trace(THIS->name, 0, "Upcall gfapi gfid = %s",
6274
(char *)(upcall_data->gfid));
6277
* TODO: RECALL LEASE for each glfd
6279
* In case of RECALL_LEASE, we could associate separate
6280
* cbk function for each glfd either by
6281
* - extending pub_glfs_lease to accept new args (recall_cbk_fn, cookie)
6282
* - or by defining new API "glfs_register_recall_cbk_fn (glfd,
6283
* recall_cbk_fn, cookie) . In such cases, flag it and instead of calling
6284
* below upcall functions, define a new one to go through the glfd list and
6285
* invoke each of theirs recall_cbk_fn.
6288
if (fs->up_cbk) { /* upcall cbk registered */
6289
(void)glfs_cbk_upcall_data(fs, upcall_data);
6291
(void)glfs_enqueue_upcall_data(fs, upcall_data);
6294
pthread_mutex_lock(&fs->mutex);
6298
pthread_mutex_unlock(&fs->mutex);
6307
glfs_anonymous_pwritev(struct glfs *fs, struct glfs_object *object,
6308
const struct iovec *iovec, int iovcnt, off_t offset,
6311
xlator_t *subvol = NULL;
6312
struct iobref *iobref = NULL;
6313
struct iobuf *iobuf = NULL;
6314
struct iovec iov = {
6317
inode_t *inode = NULL;
6323
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
6325
subvol = glfs_active_subvol(fs);
6332
/* get/refresh the in arg objects inode in correlation to the xlator */
6333
inode = glfs_resolve_inode(fs, subvol, object);
6340
fd = fd_anonymous(inode);
6343
gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
6348
size = iov_length(iovec, iovcnt);
6350
iobuf = iobuf_get2(subvol->ctx->iobuf_pool, size);
6357
iobref = iobref_new();
6365
ret = iobref_add(iobref, iobuf);
6368
iobref_unref(iobref);
6374
iov_unload(iobuf_ptr(iobuf), iovec, iovcnt);
6376
iov.iov_base = iobuf_ptr(iobuf);
6379
/* TODO : set leaseid */
6380
ret = syncop_writev(subvol, fd, &iov, 1, offset, iobref, flags, NULL, NULL,
6382
DECODE_SYNCOP_ERR(ret);
6385
iobref_unref(iobref);
6398
glfs_subvol_done(fs, subvol);
6407
glfs_anonymous_preadv(struct glfs *fs, struct glfs_object *object,
6408
const struct iovec *iovec, int iovcnt, off_t offset,
6411
xlator_t *subvol = NULL;
6412
struct iovec *iov = NULL;
6413
struct iobref *iobref = NULL;
6414
inode_t *inode = NULL;
6421
__GLFS_ENTRY_VALIDATE_FS(fs, invalid_fs);
6423
subvol = glfs_active_subvol(fs);
6430
/* get/refresh the in arg objects inode in correlation to the xlator */
6431
inode = glfs_resolve_inode(fs, subvol, object);
6438
fd = fd_anonymous(inode);
6441
gf_smsg("gfapi", GF_LOG_ERROR, ENOMEM, API_MSG_FDCREATE_FAILED, NULL);
6446
size = iov_length(iovec, iovcnt);
6448
/* TODO : set leaseid */
6449
ret = syncop_readv(subvol, fd, size, offset, flags, &iov, &cnt, &iobref,
6451
DECODE_SYNCOP_ERR(ret);
6455
size = iov_copy(iovec, iovcnt, iov, cnt);
6462
iobref_unref(iobref);
6469
glfs_subvol_done(fs, subvol);
6478
glfs_release_xreaddirp_stat(void *ptr)
6480
struct glfs_xreaddirp_stat *to_free = ptr;
6482
if (to_free->object)
6483
glfs_h_close(to_free->object);
6487
* Given glfd of a directory, this function does readdirp and returns
6488
* xstat along with dirents.
6490
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_r, 3.11.0)
6492
pub_glfs_xreaddirplus_r(struct glfs_fd *glfd, uint32_t flags,
6493
struct glfs_xreaddirp_stat **xstat_p,
6494
struct dirent *ext, struct dirent **res)
6497
gf_dirent_t *entry = NULL;
6498
struct dirent *buf = NULL;
6499
struct glfs_xreaddirp_stat *xstat = NULL;
6502
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
6506
GF_VALIDATE_OR_GOTO(THIS->name, xstat_p, out);
6507
GF_VALIDATE_OR_GOTO(THIS->name, res, out);
6514
buf = glfs_readdirbuf_get(glfd);
6519
xstat = GLFS_CALLOC(1, sizeof(struct glfs_xreaddirp_stat),
6520
glfs_release_xreaddirp_stat, glfs_mt_xreaddirp_stat_t);
6525
/* this is readdirplus operation */
6526
entry = glfd_entry_next(glfd, 1);
6528
/* XXX: Ideally when we reach EOD, errno should have been
6529
* set to ENOENT. But that doesn't seem to be the case.
6531
* The only way to confirm if its EOD at this point is that
6532
* errno == 0 and entry == NULL
6538
/* reached EOD, ret = 0 */
6543
/* free xstat as applications shall not be using it */
6550
gf_dirent_to_dirent(entry, buf);
6552
if (flags & GFAPI_XREADDIRP_STAT) {
6553
glfs_iatt_to_stat(glfd->fs, &entry->d_stat, &xstat->st);
6554
xstat->flags_handled |= GFAPI_XREADDIRP_STAT;
6557
if ((flags & GFAPI_XREADDIRP_HANDLE) &&
6559
strcmp(buf->d_name, ".") && strcmp(buf->d_name, "..")) {
6560
/* Now create object.
6561
* We can use "glfs_h_find_handle" as well as inodes would have
6562
* already got linked as part of 'gf_link_inodes_from_dirent' */
6563
xstat->object = glfs_h_create_from_handle(
6564
glfd->fs, entry->d_stat.ia_gfid, GFAPI_HANDLE_LENGTH, NULL);
6566
if (xstat->object) { /* success */
6567
/* note: xstat->object->inode->ref is taken
6568
* This shall be unref'ed when application does
6569
* glfs_free(xstat) */
6570
xstat->flags_handled |= GFAPI_XREADDIRP_HANDLE;
6574
ret = xstat->flags_handled;
6577
gf_msg_debug(THIS->name, 0,
6578
"xreaddirp- requested_flags (%x) , processed_flags (%x)",
6579
flags, xstat->flags_handled);
6585
gf_smsg(THIS->name, GF_LOG_WARNING, errno, API_MSG_XREADDIRP_R_FAILED,
6586
"reason=%s", strerror(errno), NULL);
6600
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_xreaddirplus_get_stat, 3.11.0)
6602
pub_glfs_xreaddirplus_get_stat(struct glfs_xreaddirp_stat *xstat)
6604
GF_VALIDATE_OR_GOTO("glfs_xreaddirplus_get_stat", xstat, out);
6606
if (!(xstat->flags_handled & GFAPI_XREADDIRP_STAT))
6607
gf_smsg(THIS->name, GF_LOG_ERROR, errno, API_MSG_FLAGS_HANDLE,
6608
"GFAPI_XREADDIRP_STAT"
6610
xstat, "handles=%x", xstat->flags_handled, NULL);
6618
gf_lease_to_glfs_lease(struct gf_lease *gf_lease, struct glfs_lease *lease)
6620
u_int lease_type = gf_lease->lease_type;
6621
lease->cmd = gf_lease->cmd;
6622
lease->lease_type = lease_type;
6623
memcpy(lease->lease_id, gf_lease->lease_id, LEASE_ID_SIZE);
6627
glfs_lease_to_gf_lease(struct glfs_lease *lease, struct gf_lease *gf_lease)
6629
u_int lease_type = lease->lease_type;
6630
gf_lease->cmd = lease->cmd;
6631
gf_lease->lease_type = lease_type;
6632
memcpy(gf_lease->lease_id, lease->lease_id, LEASE_ID_SIZE);
6635
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_lease, 4.0.0)
6637
pub_glfs_lease(struct glfs_fd *glfd, struct glfs_lease *lease,
6638
glfs_recall_cbk fn, void *data)
6644
xlator_t *subvol = NULL;
6646
struct gf_lease gf_lease = {
6651
__GLFS_ENTRY_VALIDATE_FD(glfd, invalid_fs);
6655
if (!is_valid_lease_id(lease->lease_id)) {
6661
subvol = glfs_active_subvol(glfd->fs);
6668
fd = glfs_resolve_fd(glfd->fs, subvol, glfd);
6675
switch (lease->lease_type) {
6677
if ((fd->flags != O_RDONLY) && !(fd->flags & O_RDWR)) {
6684
if (!((fd->flags & O_WRONLY) || (fd->flags & O_RDWR))) {
6691
if (lease->cmd != GLFS_GET_LEASE) {
6700
GLFS_LOC_FILL_INODE(fd->inode, loc, out);
6702
glfs_lease_to_gf_lease(lease, &gf_lease);
6704
ret = syncop_lease(subvol, &loc, &gf_lease, NULL, NULL);
6705
DECODE_SYNCOP_ERR(ret);
6707
gf_lease_to_glfs_lease(&gf_lease, lease);
6709
/* TODO: Add leases for client replay
6710
if (ret == 0 && (cmd == F_SETLK || cmd == F_SETLKW))
6711
fd_lk_insert_and_merge (fd, cmd, &saved_flock);
6714
ret = fd_ctx_set(glfd->fd, subvol, (uint64_t)(long)glfd);
6716
gf_smsg(subvol->name, GF_LOG_ERROR, ENOMEM,
6717
API_MSG_FDCTX_SET_FAILED, "fd=%p", glfd->fd, NULL);
6721
glfd->cookie = data;
6730
glfs_subvol_done(glfd->fs, subvol);
6738
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mkdirat, 11.0)
6740
pub_glfs_mkdirat(struct glfs_fd *pglfd, const char *path, mode_t mode)
6744
xlator_t *subvol = NULL;
6748
struct iatt iatt = {
6752
dict_t *xattr_req = NULL;
6755
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6760
cleanup_fopat_args(pglfd, subvol, ret, &loc);
6763
subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
6768
ESTALE_RETRY(ret, errno, reval, &loc, retry);
6775
ret = setup_entry_fopat_args(gfid, &xattr_req, &loc);
6780
ret = syncop_mkdir(subvol, &loc, mode, &iatt, xattr_req, NULL);
6781
DECODE_SYNCOP_ERR(ret);
6783
ESTALE_RETRY(ret, errno, reval, &loc, retry);
6786
ret = glfs_loc_link(&loc, &iatt);
6789
dict_unref(xattr_req);
6791
cleanup_fopat_args(pglfd, subvol, ret, &loc);
6799
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_faccessat, 11.0)
6801
pub_glfs_faccessat(struct glfs_fd *pglfd, const char *path, int mode, int flags)
6805
xlator_t *subvol = NULL;
6809
struct iatt iatt = {
6815
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6817
no_follow = (flags & AT_SYMLINK_NOFOLLOW) == AT_SYMLINK_NOFOLLOW;
6822
cleanup_fopat_args(pglfd, subvol, ret, &loc);
6825
subvol = setup_fopat_args(pglfd, path, !no_follow, &loc, &iatt, reval);
6830
ESTALE_RETRY(ret, errno, reval, &loc, retry);
6843
ret = syncop_access(subvol, &loc, mode, NULL, NULL);
6844
DECODE_SYNCOP_ERR(ret);
6846
ESTALE_RETRY(ret, errno, reval, &loc, retry);
6848
cleanup_fopat_args(pglfd, subvol, ret, &loc);
6855
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchmodat, 11.0)
6857
pub_glfs_fchmodat(struct glfs_fd *pglfd, const char *path, mode_t mode,
6861
xlator_t *subvol = NULL;
6865
struct iatt iatt = {
6871
struct glfs_stat stat = {
6875
stat.glfs_st_mode = mode;
6876
stat.glfs_st_mask = GLFS_STAT_MODE;
6879
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6881
no_follow = (flags & AT_SYMLINK_NOFOLLOW) == AT_SYMLINK_NOFOLLOW;
6882
subvol = setup_fopat_args(pglfd, path, !no_follow, &loc, &iatt, 0);
6895
glfs_iatt_from_statx(&iatt, &stat);
6896
glfsflags_from_gfapiflags(&stat, &glvalid);
6898
ret = syncop_setattr(subvol, &loc, &iatt, glvalid, 0, 0, NULL, NULL);
6899
DECODE_SYNCOP_ERR(ret);
6902
cleanup_fopat_args(pglfd, subvol, ret, &loc);
6909
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_fchownat, 11.0)
6911
pub_glfs_fchownat(struct glfs_fd *pglfd, const char *path, uid_t uid, gid_t gid,
6915
struct glfs_stat stat = {
6919
if (uid != (uid_t)-1) {
6920
stat.glfs_st_uid = uid;
6921
stat.glfs_st_mask = GLFS_STAT_UID;
6924
if (gid != (uid_t)-1) {
6925
stat.glfs_st_gid = gid;
6926
stat.glfs_st_mask = stat.glfs_st_mask | GLFS_STAT_GID;
6929
xlator_t *subvol = NULL;
6933
struct iatt iatt = {
6938
int is_path_empty = 0;
6941
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
6943
no_follow = (flags & AT_SYMLINK_NOFOLLOW) == AT_SYMLINK_NOFOLLOW;
6944
is_path_empty = (flags & AT_EMPTY_PATH) == AT_EMPTY_PATH;
6946
if (is_path_empty && path[0] == '\0') {
6949
subvol = glfs_active_subvol(pglfd->fs);
6956
fd_to_loc(pglfd, &loc);
6958
subvol = setup_fopat_args(pglfd, path, !no_follow, &loc, &iatt, 0);
6972
glfs_iatt_from_statx(&iatt, &stat);
6973
glfsflags_from_gfapiflags(&stat, &glvalid);
6975
if (stat.glfs_st_mask) {
6976
ret = syncop_setattr(subvol, &loc, &iatt, glvalid, 0, 0, NULL, NULL);
6977
DECODE_SYNCOP_ERR(ret);
6981
cleanup_fopat_args(pglfd, subvol, ret, &loc);
6989
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_linkat, 11.0)
6991
pub_glfs_linkat(struct glfs_fd *oldpglfd, const char *oldpath,
6992
struct glfs_fd *newpglfd, const char *newpath, int flags)
6996
xlator_t *oldsubvol = NULL;
6997
xlator_t *newsubvol = NULL;
7004
struct iatt oldiatt = {
7007
struct iatt newiatt = {
7011
int is_path_empty = 0;
7014
__GLFS_ENTRY_VALIDATE_FD(oldpglfd, invalid_fs);
7015
__GLFS_ENTRY_VALIDATE_FD(newpglfd, invalid_fs);
7017
/* Old path will not be de-referenced by default if it is a sym-link.
7018
If 'AT_SYMLINK_FOLLOW' flag is set, then oldpath is deferenced to
7021
If oldpath is a symbolic link and 'AT_SYMLINK_FOLLOW' is set then
7022
a new link created will be a symbolic link to defreferenced oldpath.
7024
follow = (flags & AT_SYMLINK_FOLLOW) == AT_SYMLINK_FOLLOW;
7025
is_path_empty = (flags & AT_EMPTY_PATH) == AT_EMPTY_PATH;
7030
cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7033
if (is_path_empty && oldpath[0] == '\0') {
7034
GF_REF_GET(oldpglfd);
7036
oldsubvol = glfs_active_subvol(oldpglfd->fs);
7043
fd_to_loc(oldpglfd, &oldloc);
7045
if (*&oldloc.inode->ia_type == IA_IFDIR) {
7051
oldsubvol = setup_fopat_args(oldpglfd, oldpath, follow, &oldloc,
7059
ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
7065
if (oldsubvol && !oldloc.inode) {
7074
cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7076
/* The 'AT_SYMLINK_FOLLOW' flag applies only to oldpath.
7078
newsubvol = setup_fopat_args(newpglfd, newpath, 0, &newloc, &newiatt,
7084
ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
7086
if (newsubvol && newloc.inode) {
7092
if (oldiatt.ia_type == IA_IFDIR) {
7098
/* Filling the inode of the hard link to be same as that of the
7102
inode_unref(newloc.inode);
7103
newloc.inode = NULL;
7105
newloc.inode = inode_ref(oldloc.inode);
7107
ret = syncop_link(newsubvol, &oldloc, &newloc, &newiatt, NULL, NULL);
7108
DECODE_SYNCOP_ERR(ret);
7111
ret = glfs_loc_link(&newloc, &newiatt);
7113
cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7114
cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7122
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_mknodat, 11.0)
7124
pub_glfs_mknodat(struct glfs_fd *pglfd, const char *path, mode_t mode,
7129
xlator_t *subvol = NULL;
7133
struct iatt iatt = {
7137
dict_t *xattr_req = NULL;
7140
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7145
cleanup_fopat_args(pglfd, subvol, ret, &loc);
7148
subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7153
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7160
ret = setup_entry_fopat_args(gfid, &xattr_req, &loc);
7165
ret = syncop_mknod(subvol, &loc, mode, dev, &iatt, xattr_req, NULL);
7166
DECODE_SYNCOP_ERR(ret);
7168
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7171
ret = glfs_loc_link(&loc, &iatt);
7175
dict_unref(xattr_req);
7177
cleanup_fopat_args(pglfd, subvol, ret, &loc);
7184
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_readlinkat, 11.0)
7186
pub_glfs_readlinkat(struct glfs_fd *pglfd, const char *path, char *buf,
7191
xlator_t *subvol = NULL;
7195
struct iatt iatt = {
7198
char *linkval = NULL;
7201
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7206
cleanup_fopat_args(pglfd, subvol, ret, &loc);
7209
subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7214
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7225
if (iatt.ia_type != IA_IFLNK) {
7231
ret = syncop_readlink(subvol, &loc, &linkval, bufsiz, NULL, NULL);
7232
DECODE_SYNCOP_ERR(ret);
7234
memcpy(buf, linkval, ret);
7238
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7241
cleanup_fopat_args(pglfd, subvol, ret, &loc);
7249
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_renameat, 11.0)
7251
pub_glfs_renameat(struct glfs_fd *oldpglfd, const char *oldpath,
7252
struct glfs_fd *newpglfd, const char *newpath)
7256
xlator_t *oldsubvol = NULL;
7257
xlator_t *newsubvol = NULL;
7264
struct iatt oldiatt = {
7267
struct iatt newiatt = {
7272
__GLFS_ENTRY_VALIDATE_FD(oldpglfd, invalid_fs);
7273
__GLFS_ENTRY_VALIDATE_FD(newpglfd, invalid_fs);
7278
cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7281
oldsubvol = setup_fopat_args(oldpglfd, oldpath, 0, &oldloc, &oldiatt,
7287
ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
7293
/* subvol is not NULL */
7294
if (!oldloc.inode) {
7303
cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7306
newsubvol = setup_fopat_args(newpglfd, newpath, 0, &newloc, &newiatt,
7312
ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
7318
if (newsubvol && newloc.inode) {
7324
if (errno != ENOENT && newloc.parent)
7327
if (newiatt.ia_type != IA_INVAL) {
7328
if ((oldiatt.ia_type == IA_IFDIR) != (newiatt.ia_type == IA_IFDIR)) {
7329
/* Either both old and new must be dirs,
7330
* or both must be non-dirs. Else, fail.
7338
/* TODO: - check if new or old is a prefix of the other, and fail EINVAL
7341
ret = syncop_rename(newsubvol, &oldloc, &newloc, NULL, NULL);
7342
DECODE_SYNCOP_ERR(ret);
7345
inode_rename(oldloc.parent->table, oldloc.parent, oldloc.name,
7346
newloc.parent, newloc.name, oldloc.inode, &oldiatt);
7348
if (newloc.inode && !inode_has_dentry(newloc.inode))
7349
inode_forget(newloc.inode, 0);
7352
cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7353
cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7361
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_renameat2, 11.0)
7363
pub_glfs_renameat2(struct glfs_fd *oldpglfd, const char *oldpath,
7364
struct glfs_fd *newpglfd, const char *newpath, int flags)
7368
xlator_t *oldsubvol = NULL;
7369
xlator_t *newsubvol = NULL;
7376
struct iatt oldiatt = {
7379
struct iatt newiatt = {
7384
__GLFS_ENTRY_VALIDATE_FD(oldpglfd, invalid_fs);
7385
__GLFS_ENTRY_VALIDATE_FD(newpglfd, invalid_fs);
7390
cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7393
oldsubvol = setup_fopat_args(oldpglfd, oldpath, 0, &oldloc, &oldiatt,
7399
ESTALE_RETRY(ret, errno, reval, &oldloc, retry);
7405
/* subvol is not NULL */
7406
if (!oldloc.inode) {
7415
cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7418
newsubvol = setup_fopat_args(newpglfd, newpath, 0, &newloc, &newiatt,
7424
ESTALE_RETRY(ret, errno, reval, &newloc, retrynew);
7436
if (errno != ENOENT && newloc.parent)
7439
if (newiatt.ia_type != IA_INVAL) {
7440
if ((oldiatt.ia_type == IA_IFDIR) != (newiatt.ia_type == IA_IFDIR)) {
7441
/* Either both old and new must be dirs,
7442
* or both must be non-dirs. Else, fail.
7450
/* TODO: - check if new or old is a prefix of the other, and fail EINVAL
7453
ret = syncop_rename(newsubvol, &oldloc, &newloc, NULL, NULL);
7454
DECODE_SYNCOP_ERR(ret);
7456
if (ret == -1 && errno == ESTALE) {
7457
if (reval < DEFAULT_REVAL_COUNT) {
7466
inode_rename(oldloc.parent->table, oldloc.parent, oldloc.name,
7467
newloc.parent, newloc.name, oldloc.inode, &oldiatt);
7469
if (newloc.inode && !inode_has_dentry(newloc.inode))
7470
inode_forget(newloc.inode, 0);
7473
cleanup_fopat_args(oldpglfd, oldsubvol, ret, &oldloc);
7474
cleanup_fopat_args(newpglfd, newsubvol, ret, &newloc);
7482
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_symlinkat, 11.0)
7484
pub_glfs_symlinkat(const char *data, struct glfs_fd *pglfd, const char *path)
7488
xlator_t *subvol = NULL;
7492
struct iatt iatt = {
7496
dict_t *xattr_req = NULL;
7499
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7504
cleanup_fopat_args(pglfd, subvol, ret, &loc);
7507
subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7512
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7520
if (subvol && loc.inode) {
7526
ret = setup_entry_fopat_args(gfid, &xattr_req, &loc);
7531
ret = syncop_symlink(subvol, &loc, data, &iatt, xattr_req, NULL);
7532
DECODE_SYNCOP_ERR(ret);
7534
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7537
ret = glfs_loc_link(&loc, &iatt);
7540
dict_unref(xattr_req);
7542
cleanup_fopat_args(pglfd, subvol, ret, &loc);
7550
GFAPI_SYMVER_PUBLIC_DEFAULT(glfs_unlinkat, 11.0)
7552
pub_glfs_unlinkat(struct glfs_fd *pglfd, const char *path, int flags)
7557
xlator_t *subvol = NULL;
7561
struct iatt iatt = {
7566
__GLFS_ENTRY_VALIDATE_FD(pglfd, invalid_fs);
7568
is_rmdir = (flags & AT_REMOVEDIR) == AT_REMOVEDIR;
7573
cleanup_fopat_args(pglfd, subvol, ret, &loc);
7576
subvol = setup_fopat_args(pglfd, path, 0, &loc, &iatt, reval);
7581
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7588
/* If a directory is to be unlinked then 'AT_REMOVEDIR'
7589
is to be used mandatorily.
7591
if (iatt.ia_type == IA_IFDIR && !is_rmdir) {
7595
} else if (iatt.ia_type != IA_IFDIR && is_rmdir) {
7601
/* TODO: Add leaseid */
7602
/* Unlink or rmdir based on 'AT_REMOVEDIR' flag */
7604
ret = syncop_unlink(subvol, &loc, NULL, NULL);
7606
ret = syncop_rmdir(subvol, &loc, 0, NULL, NULL);
7608
DECODE_SYNCOP_ERR(ret);
7610
ESTALE_RETRY(ret, errno, reval, &loc, retry);
7613
ret = glfs_loc_unlink(&loc);
7615
cleanup_fopat_args(pglfd, subvol, ret, &loc);