glusterfs

Форк
0
/
quota-anon-fd-nfs.t 
117 строк · 3.7 Кб
1
#!/bin/bash
2

3
. $(dirname $0)/../include.rc
4
. $(dirname $0)/../volume.rc
5
. $(dirname $0)/../nfs.rc
6
. $(dirname $0)/../fileio.rc
7

8
#G_TESTDEF_TEST_STATUS_CENTOS6=NFS_TEST
9

10
cleanup;
11

12
QDD=$(dirname $0)/quota
13
# compile the test write program and run it
14
build_tester $(dirname $0)/quota.c -o $QDD
15

16
TESTS_EXPECTED_IN_LOOP=16
17
TEST glusterd
18
TEST pidof glusterd
19
TEST $CLI volume info;
20

21
TEST $CLI volume create $V0 $H0:$B0/brick1;
22
EXPECT 'Created' volinfo_field $V0 'Status';
23
TEST $CLI volume set $V0 nfs.disable false
24

25

26
# The test makes use of inode-lru-limit to hit a scenario, where we
27
# find an inode whose ancestry is not there. Following is the
28
# hypothesis (which is confirmed by seeing logs indicating that
29
# codepath has been executed, but not through a good understanding of
30
# NFS internals).
31

32
#     At the end of an fop, the reference count of an inode would be
33
#     zero. The inode (and its ancestry) persists in memory only
34
#     because of non-zero lookup count. These looked up inodes are put
35
#     in an lru queue of size 1 (here). So, there can be at most one
36
#     such inode in memory.
37

38
#     NFS Server makes use of anonymous fds. So, if it cannot find
39
#     valid fd, it does a nameless lookup. This gives us an inode
40
#     whose ancestry is NULL. When a write happens on this inode,
41
#     quota-enforcer/marker finds a NULL ancestry and asks
42
#     storage/posix to build it.
43

44
TEST $CLI volume set $V0 network.inode-lru-limit 1
45
TEST $CLI volume set $V0 performance.nfs.write-behind off
46

47
TEST $CLI volume start $V0;
48
EXPECT 'Started' volinfo_field $V0 'Status';
49

50
TEST $CLI volume quota $V0 enable
51
TEST $CLI volume quota $V0 limit-usage / 1
52
TEST $CLI volume quota $V0 soft-timeout 0
53
TEST $CLI volume quota $V0 hard-timeout 0
54

55
EXPECT_WITHIN $NFS_EXPORT_TIMEOUT "1" is_nfs_export_available;
56
TEST mount_nfs $H0:/$V0 $N0 noac,soft,nolock;
57
deep=/0/1/2/3/4/5/6/7/8/9
58
TEST mkdir -p $N0/$deep
59

60
TEST touch $N0/$deep/file1 $N0/$deep/file2 $N0/$deep/file3 $N0/$deep/file4
61

62
TEST fd_open 3 'w' "$N0/$deep/file1"
63
TEST fd_open 4 'w' "$N0/$deep/file2"
64
TEST fd_open 5 'w' "$N0/$deep/file3"
65
TEST fd_open 6 'w' "$N0/$deep/file4"
66

67
# consume all quota
68
echo "Hello" > $N0/$deep/new_file_1
69
echo "World" >> $N0/$deep/new_file_1
70
echo 1 >> $N0/$deep/new_file_1
71
echo 2 >> $N0/$deep/new_file_1
72

73
# Try to create a 1M file which should fail
74
TEST ! $QDD $N0/$deep/new_file_2 256 4
75

76

77
# At the end of each fop in server, reference count of the
78
# inode associated with each of the file above drops to zero and hence
79
# put into lru queue. Since lru-limit is set to 1, an fop next file
80
# will displace the current inode from itable. This will ensure that
81
# when writes happens on same fd, fd resolution results in
82
# nameless lookup from server and quota_writev encounters an fd
83
# associated with an inode whose parent is not present in itable.
84

85
for j in $(seq 1 2); do
86
    for i in $(seq 3 6); do
87
        # failing writes indicate that we are enforcing quota set on /
88
        # even with anonymous fds.
89
        TEST_IN_LOOP ! fd_write $i "content"
90
        TEST_IN_LOOP sync
91
    done
92
done
93

94
exec 3>&-
95
exec 4>&-
96
exec 5>&-
97
exec 6>&-
98

99
$CLI volume statedump $V0 all
100

101
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $N0
102

103
# This is ugly, but there seems to be a latent race between other actions and
104
# stopping the volume.  The visible symptom is that "umount -l" (run from
105
# gf_umount_lazy in glusterd) hangs.  This happens pretty consistently with the
106
# new mem-pool code, though it's not really anything to do with memory pools -
107
# just with changed timing.  Adding the sleep here makes it work consistently.
108
#
109
# If anyone else wants to debug the race condition, feel free.
110
sleep 3
111

112
TEST $CLI volume stop $V0
113

114
rm -f $QDD
115

116
cleanup;
117
#G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=000000
118

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.