17
#include <kernel/spinlock.h>
18
#include <kernel/thread/sync/mutex.h>
19
#include <lib/libds/dlist.h>
26
static struct mutex inode_mutex = MUTEX_INIT(inode_mutex);
28
void vfs_inode_ops_lock(void) {
29
mutex_lock(&inode_mutex);
32
void vfs_inode_ops_unlock(void) {
33
mutex_unlock(&inode_mutex);
36
void vfs_inode_get_root(struct inode *inode) {
39
inode->ino = VFS_MPT_INO;
40
inode->sb = vfs_get_rootfs();
43
bool vfs_inode_is_root(const struct inode *inode) {
49
vfs_inode_get_root(&root);
51
return (inode->ino == root.ino) && (inode->sb == root.sb);
54
bool vfs_inode_is_bad(const struct inode *inode) {
58
return inode->ino == VFS_BAD_INO;
61
bool vfs_inode_is_mount_point(const struct inode *inode) {
65
return inode->ino == VFS_MPT_INO;
68
bool vfs_inode_is_normal(const struct inode *inode) {
72
return (inode->ino != VFS_BAD_INO) && (inode->ino != VFS_MPT_INO);
75
bool vfs_inode_is_modified(const struct inode *inode, struct timespec *old) {
76
struct timespec mtime;
78
assert(!vfs_inode_is_bad(inode));
80
vfs_inode_get_mtime(inode, &mtime);
82
return (mtime.tv_sec != old->tv_sec) || (mtime.tv_nsec != old->tv_nsec);
85
bool vfs_inode_is_directory(const struct inode *inode) {
86
assert(!vfs_inode_is_bad(inode));
88
return vfs_inode_is_mount_point(inode)
89
|| S_ISDIR(vfs_inode_get_mode(inode));
92
void vfs_inode_init(const struct inode *inode, mode_t mode) {
93
struct inode_info info;
95
clock_gettime(CLOCK_REALTIME, &info.mtime);
96
info.owner = getuid();
97
info.group = getgid();
100
vfs_inode_save_info(inode, &info);
103
void vfs_inode_load_info(const struct inode *inode, struct inode_info *info) {
104
assert(!vfs_inode_is_bad(inode));
106
if (vfs_inode_is_mount_point(inode)) {
107
memcpy(info, &inode->sb->info, sizeof(struct inode_info));
110
inode->sb->drv->ops.load_info(inode, info);
114
void vfs_inode_save_info(const struct inode *inode,
115
const struct inode_info *info) {
116
assert(!vfs_inode_is_bad(inode));
118
if (vfs_inode_is_mount_point(inode)) {
119
memcpy(&inode->sb->info, info, sizeof(struct inode_info));
122
inode->sb->drv->ops.save_info(inode, info);
126
void vfs_inode_get_mtime(const struct inode *inode, struct timespec *mtime) {
127
struct inode_info info;
129
vfs_inode_load_info(inode, &info);
130
memcpy(mtime, &info.mtime, sizeof(struct timespec));
133
void vfs_inode_update_mtime(const struct inode *inode) {
134
struct inode_info info;
135
struct timespec mtime;
137
clock_gettime(CLOCK_REALTIME, &mtime);
139
vfs_inode_load_info(inode, &info);
140
memcpy(&info.mtime, &mtime, sizeof(struct timespec));
141
vfs_inode_save_info(inode, &info);
144
mode_t vfs_inode_get_mode(const struct inode *inode) {
145
struct inode_info info;
147
vfs_inode_load_info(inode, &info);
152
void vfs_inode_set_mode(const struct inode *inode, mode_t mode) {
153
struct inode_info info;
155
vfs_inode_load_info(inode, &info);
156
info.mode &= ~S_IRWXA;
157
info.mode |= mode & S_IRWXA;
158
vfs_inode_save_info(inode, &info);
161
uid_t vfs_inode_get_owner(const struct inode *inode) {
162
struct inode_info info;
164
vfs_inode_load_info(inode, &info);
169
void vfs_inode_set_owner(const struct inode *inode, uid_t owner) {
170
struct inode_info info;
172
vfs_inode_load_info(inode, &info);
174
vfs_inode_save_info(inode, &info);
177
gid_t vfs_inode_get_group(const struct inode *inode) {
178
struct inode_info info;
180
vfs_inode_load_info(inode, &info);
185
void vfs_inode_set_group(const struct inode *inode, gid_t group) {
186
struct inode_info info;
188
vfs_inode_load_info(inode, &info);
190
vfs_inode_save_info(inode, &info);
193
void vfs_inode_lock(const struct inode *inode) {
196
assert(!vfs_inode_is_bad(inode));
197
assert(inode->sb->usage_count >= 0);
199
normal = vfs_inode_is_normal(inode);
201
spin_lock_ipl_disable(&inode->sb->lock);
203
inode->sb->usage_count++;
204
if (normal && inode->sb->drv->ops.lock) {
205
inode->sb->drv->ops.lock(inode);
208
spin_unlock_ipl_enable(&inode->sb->lock);
211
void vfs_inode_unlock(const struct inode *inode) {
214
assert(!vfs_inode_is_bad(inode));
215
assert(inode->sb->usage_count > 0);
217
normal = vfs_inode_is_normal(inode);
219
spin_lock_ipl_disable(&inode->sb->lock);
221
inode->sb->usage_count--;
222
if (normal && inode->sb->drv->ops.unlock) {
223
inode->sb->drv->ops.unlock(inode);
226
spin_unlock_ipl_enable(&inode->sb->lock);
229
bool vfs_inode_is_available(const struct inode *inode, int access_mode) {
230
struct inode_info info;
234
vfs_inode_load_info(inode, &info);
236
if (info.owner == getuid()) {
237
write_access = info.mode & S_IRUSR;
238
read_access = info.mode & S_IWUSR;
240
else if (info.group == getgid()) {
241
write_access = info.mode & S_IRGRP;
242
read_access = info.mode & S_IWGRP;
245
write_access = info.mode & S_IROTH;
246
read_access = info.mode & S_IWOTH;
249
switch (access_mode & O_ACCESS_MASK) {
257
return read_access & write_access;
261
struct idesc *vfs_inode_open(const struct inode *inode, int oflag) {
262
if (!vfs_inode_is_available(inode, oflag)) {
263
return err2ptr(EACCES);
266
return inode->sb->drv->ops.open(inode, oflag);