glusterfs

Форк
0
/
bug-1605056.t 
63 строки · 1.9 Кб
1
#!/bin/bash
2

3
. $(dirname $0)/../../include.rc
4
. $(dirname $0)/../../volume.rc
5

6
SHARD_COUNT_TIME=5
7

8
cleanup
9

10
TEST glusterd
11
TEST pidof glusterd
12
TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2}
13
TEST $CLI volume set $V0 features.shard on
14
TEST $CLI volume set $V0 features.shard-block-size 4MB
15
TEST $CLI volume set $V0 features.shard-lru-limit 25
16
TEST $CLI volume set $V0 performance.write-behind off
17

18
TEST $CLI volume start $V0
19

20
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M0
21
TEST $GFS --volfile-id=$V0 --volfile-server=$H0 $M1
22

23
# Perform a write that would cause 25 shards to be created under .shard
24
TEST dd if=/dev/zero of=$M0/foo bs=1M count=104
25

26
# Read the file from $M1, indirectly filling up the lru list.
27
TEST `cat $M1/foo > /dev/null`
28
statedump=$(generate_mount_statedump $V0 $M1)
29
sleep 1
30
EXPECT "25" echo $(grep "inode-count" $statedump | cut -f2 -d'=' | tail -1)
31
rm -f $statedump
32

33
# Delete foo from $M0.
34
TEST unlink $M0/foo
35

36
# Send stat on foo from $M1 to force $M1 to "forget" inode associated with foo.
37
# Now the ghost shards associated with "foo" are still in lru list of $M1.
38
TEST ! stat $M1/foo
39

40
# Let's force the ghost shards of "foo" out of lru list by looking up more shards
41
# through I/O on a file named "bar" from $M1. This should crash if the base inode
42
# had been destroyed by now.
43

44
TEST dd if=/dev/zero of=$M1/bar bs=1M count=104
45

46
###############################################
47
#### Now for some inode ref-leak tests ... ####
48
###############################################
49

50
# Expect there to be 29 active inodes - 26 belonging to "bar", 1 for .shard,
51
# 1 for .shard/remove_me and 1 for '/'
52
EXPECT_WITHIN $SHARD_COUNT_TIME `expr 26 + 3` get_mount_active_size_value $V0 $M1
53

54
TEST rm -f $M1/bar
55
EXPECT_WITHIN $SHARD_COUNT_TIME 3 get_mount_active_size_value $V0 $M1
56

57
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0
58
EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M1
59

60
TEST $CLI volume stop $V0
61
TEST $CLI volume delete $V0
62

63
cleanup
64

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.