embox

Форк
0
/
kflock.c 
207 строк · 5.9 Кб
1
/*
2
 * @file
3
 *
4
 * @date Nov 29, 2012
5
 * @author: Anton Bondarev
6
 */
7

8
#include <fcntl.h>
9
#include <sys/file.h>
10

11
#include <fs/file_desc.h>
12
#include <fs/inode.h>
13

14
#include <kernel/spinlock.h>
15
#include <kernel/thread.h>
16

17
#include <mem/misc/pool.h>
18

19
#include <framework/mod/options.h>
20

21
#define MAX_FLOCK_QUANTITY OPTION_GET(NUMBER, flock_quantity)
22
POOL_DEF(flock_pool, struct flock_shared, MAX_FLOCK_QUANTITY);
23

24
static int flock_shared_get(struct node_flock *flock) {
25
	struct flock_shared *shlock;
26
	struct thread *current = thread_self();
27

28
	shlock = pool_alloc(&flock_pool);
29
	if (NULL == shlock) {
30
		return -ENOMEM;
31
	}
32
	dlist_add_next(dlist_head_init(&shlock->flock_link), &flock->shlock_holders);
33
	shlock->holder = current;
34
	flock->shlock_count++;
35

36
	return -ENOERR;
37
}
38

39
static int flock_shared_put(struct node_flock *flock) {
40
	struct flock_shared *shlock;
41
	struct thread *current = thread_self();
42

43
	dlist_foreach_entry(shlock, &flock->shlock_holders, flock_link) {
44
		if (current == shlock->holder) {
45
			dlist_del(&shlock->flock_link);
46
			pool_free(&flock_pool, shlock);
47
			flock->shlock_count--;
48
		}
49
	}
50

51
	return -ENOERR;
52
}
53

54
static inline void flock_exclusive_get(struct mutex *exlock) {
55
	mutex_lock(exlock);
56
}
57

58
static inline int flock_exclusive_tryget(struct mutex *exlock) {
59
	return mutex_trylock(exlock);
60
}
61

62
static inline void flock_exclusive_put(struct mutex *exlock) {
63
	mutex_unlock(exlock);
64
}
65

66
/**
67
 * Apply advisory lock to specified fd
68
 * @param File descriptor number
69
 * @param Operation: one of LOCK_EX, LOCK_SH, LOCK_UN plus possible LOCK_NB
70
 * @return ENOERR if operation succeed or -1 and set errno in other way
71
 */
72
int kflock(int fd, int operation) {
73
	int rc;
74
	struct node_flock *flock;
75
	struct mutex *exlock;
76
	spinlock_t *flock_guard;
77
	long *shlock_count;
78
	struct file_desc *fdesc;
79
	struct thread *current = thread_self();
80

81
	/**
82
	 * Base algorithm:
83
	 * - Validate operation
84
	 * - Validate fd
85
	 * - Get lock pointer and other preparations
86
	 * - Determine operation (total 2 x 2 + 1 = 5)
87
	 *    1. Exclusive lock, blocking
88
	 *        - If shared block is acquired only by current thread convert it
89
	 *          to exclusive
90
	 *        - If shared or exclusive lock is acquired then block
91
	 *        - Else acquire exclusive lock
92
	 *    2. Exclusive lock, non-blocking
93
	 *        - The same as 1 but return EWOULDBLOCK instead of blocking
94
	 *    3. Shared lock, blocking
95
	 *        - If exclusive block is acquired by current thread then convert
96
	 *          it to shared
97
	 *        - If exclusive lock is acquired then block
98
	 *        - Else acquire shared lock
99
	 *    4. Shared lock, non-blocking
100
	 *        - The same as 3 but return EWOULDBLOCK instead of blocking
101
	 *    5. Unlock, blocking and non-blocking
102
	 *        - If any lock is acquired by current thread then remove it
103
	 *          blocking and non-blocking the same because mutex_unlock
104
	 *          never blocks thread in current implementation
105
	 */
106

107
	/* Validate operation */
108
	if (((LOCK_EX | LOCK_SH | LOCK_UN) & operation) != LOCK_EX && \
109
			((LOCK_EX | LOCK_SH | LOCK_UN) & operation) != LOCK_SH && \
110
			((LOCK_EX | LOCK_SH | LOCK_UN) & operation) != LOCK_UN)
111
		return -EINVAL;
112

113
	/* - Find locks and other properties for provided file descriptor number
114
	 * - fd is validated inside task_self_idx_get */
115
	fdesc = file_desc_get(fd);
116
	flock = &(fdesc)->f_inode->flock;
117
	exlock = &flock->exlock;
118
	shlock_count = &flock->shlock_count;
119
	flock_guard = &flock->flock_guard;
120

121
	/* Exclusive locking operation */
122
	if (LOCK_EX & operation) {
123
		spin_lock(flock_guard);
124
		/* If shared lock is acquired by any thread then free up our locks and
125
		 * try to acquire exclusive one */
126
		if (*shlock_count > 0) {
127
			/* We can hold only one type of lock at the moment */
128
			assert(0 == exlock->lock_count);
129
			flock_shared_put(flock);
130
		}
131
		if (LOCK_NB & operation) {
132
			if (-EBUSY == flock_exclusive_tryget(exlock)) {
133
				spin_unlock(flock_guard);
134
				SET_ERRNO(EWOULDBLOCK);
135
				return -1;
136
			} else {
137
				spin_unlock(flock_guard);
138
			}
139
		} else {
140
			/* We should unlock flock_guard here to avoid many processes
141
			 * waiting on spin lock at the enter to this critical section */
142
			spin_unlock(flock_guard);
143
			flock_exclusive_get(exlock);
144
		}
145
	}
146

147
	/* Shared locking operation */
148
	if (LOCK_SH & operation) {
149
		spin_lock(flock_guard);
150
		/* If current thread is holder of exclusive lock then convert it to
151
		 * shared lock */
152
		if (exlock->lock_count > 0) {
153
			if (&current->schedee == exlock->holder) {
154
				/* Again no two different types of lock can be held
155
				 * simultaneously */
156
				assert(0 == *shlock_count);
157
				/* Exclusive lock can be acquired many times by one thread
158
				 * that is because of current implementation of mutexes, so
159
				 * if we converting lock to shared we need to free all of them
160
				 */
161
				while (exlock->lock_count != 0) flock_exclusive_put(exlock);
162
			} else {
163
				if (LOCK_NB & operation) {
164
					if (-EBUSY == flock_exclusive_tryget(exlock)) {
165
						spin_unlock(flock_guard);
166
						SET_ERRNO(EWOULDBLOCK);
167
						return -1;
168
					}
169
				} else {
170
					/* The same as for exclusive lock we are unlocking
171
					 * flock_guard to avoid many processes waiting on
172
					 * spin lock at the enter to this critical section */
173
					spin_unlock(flock_guard);
174
					flock_exclusive_get(exlock);
175
					spin_lock(flock_guard);
176
					flock_exclusive_put(exlock);
177
				}
178
			}
179
		}
180
		/* Acquire shared lock */
181
		rc = flock_shared_get(flock);
182
		if (-ENOERR != rc) {
183
			return rc;
184
		}
185
		spin_unlock(flock_guard);
186
	}
187

188
	/* Unlock operation */
189
	if (LOCK_UN & operation) {
190
		spin_lock(flock_guard);
191
		/* Handle exclusive lock free */
192
		if (exlock->holder == &current->schedee) {
193
			assert(0 == *shlock_count);
194
			/* mutex_unlock can't block the thread
195
			 * so nothing need for LOCK_NB */
196
			while (exlock->lock_count != 0) flock_exclusive_put(exlock);
197
		}
198
		/* Handle shared lock free */
199
		if (*shlock_count > 0) {
200
			assert(0 == exlock->lock_count);
201
			flock_shared_put(flock);
202
		}
203
		spin_unlock(flock_guard);
204
	}
205

206
	return ENOERR;
207
}
208

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.