glusterfs

Форк
0
/
basic_distribute_rebal-all-nodes-migrate.t 
142 строки · 3.6 Кб
1
#!/bin/bash
2

3
. $(dirname $0)/../include.rc
4
. $(dirname $0)/../cluster.rc
5
. $(dirname $0)/../dht.rc
6

7

8
# Check if every single rebalance process migrated some files
9

10
function cluster_rebal_all_nodes_migrated_files {
11
        val=0
12
        a=$($CLI_1 volume rebalance $V0 status | grep "completed" | awk '{print $2}');
13
        b=($a)
14
        for i in "${b[@]}"
15
        do
16
                if [ "$i" -eq "0" ]; then
17
                        echo "false";
18
                        val=1;
19
                fi
20
        done
21
        echo $val
22
}
23

24
cleanup
25

26
TEST launch_cluster 3;
27
TEST $CLI_1 peer probe $H2;
28
TEST $CLI_1 peer probe $H3;
29
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
30

31

32
#Start with a pure distribute volume (multiple bricks on the same node)
33
TEST $CLI_1 volume create $V0 $H1:$B1/dist1 $H1:$B1/dist2 $H2:$B2/dist3 $H2:$B2/dist4
34

35
TEST $CLI_1 volume start $V0
36
$CLI_1 volume info $V0
37

38
#TEST $CLI_1 volume set $V0 client-log-level DEBUG
39

40
## Mount FUSE
41
TEST glusterfs -s $H1 --volfile-id $V0 $M0;
42

43
TEST mkdir $M0/dir1 2>/dev/null;
44
TEST touch $M0/dir1/file-{1..500}
45

46
## Add-brick and run rebalance to force file migration
47
TEST $CLI_1 volume add-brick $V0 $H1:$B1/dist5 $H2:$B2/dist6
48

49
#Start a rebalance
50
TEST $CLI_1 volume rebalance $V0 start force
51

52
#volume rebalance status should work
53
#TEST $CLI_1 volume rebalance $V0 status
54
#$CLI_1 volume rebalance $V0 status
55

56
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
57
EXPECT "0" cluster_rebal_all_nodes_migrated_files
58
$CLI_1 volume rebalance $V0 status
59

60

61
TEST umount -f $M0
62
TEST $CLI_1 volume stop $V0
63
TEST $CLI_1 volume delete $V0
64

65

66
##############################################################
67

68
# Next, a dist-rep volume
69
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/drep1 $H2:$B2/drep1 $H1:$B1/drep2 $H2:$B2/drep2
70

71
TEST $CLI_1 volume start $V0
72
$CLI_1 volume info $V0
73

74
#TEST $CLI_1 volume set $V0 client-log-level DEBUG
75

76
## Mount FUSE
77
TEST glusterfs -s $H1 --volfile-id $V0 $M0;
78

79
TEST mkdir $M0/dir1 2>/dev/null;
80
TEST touch $M0/dir1/file-{1..500}
81

82
## Add-brick and run rebalance to force file migration
83
TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/drep3 $H2:$B2/drep3
84

85
#Start a rebalance
86
TEST $CLI_1 volume rebalance $V0 start force
87

88
#volume rebalance status should work
89
#TEST $CLI_1 volume rebalance $V0 status
90
#$CLI_1 volume rebalance $V0 status
91

92
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
93
#EXPECT "0" cluster_rebal_all_nodes_migrated_files
94
$CLI_1 volume rebalance $V0 status
95

96

97
TEST umount -f $M0
98
TEST $CLI_1 volume stop $V0
99
TEST $CLI_1 volume delete $V0
100

101
##############################################################
102

103
# Next, a disperse volume
104
TEST $CLI_1 volume create $V0 disperse 3 $H1:$B1/ec1 $H2:$B1/ec2 $H3:$B1/ec3 force
105

106
TEST $CLI_1 volume start $V0
107
$CLI_1 volume info $V0
108

109
#TEST $CLI_1 volume set $V0 client-log-level DEBUG
110

111
## Mount FUSE
112
TEST glusterfs -s $H1 --volfile-id $V0 $M0;
113

114
TEST mkdir $M0/dir1 2>/dev/null;
115
TEST touch $M0/dir1/file-{1..500}
116

117
## Add-brick and run rebalance to force file migration
118
TEST $CLI_1 volume add-brick $V0 $H1:$B2/ec4 $H2:$B2/ec5 $H3:$B2/ec6
119

120
#Start a rebalance
121
TEST $CLI_1 volume rebalance $V0 start force
122

123
#volume rebalance status should work
124
#TEST $CLI_1 volume rebalance $V0 status
125
#$CLI_1 volume rebalance $V0 status
126

127
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
128

129
# this will not work unless EC is changed to return all node-uuids
130
# comment this out once that patch is ready
131
#EXPECT "0" cluster_rebal_all_nodes_migrated_files
132
$CLI_1 volume rebalance $V0 status
133

134

135
TEST umount -f $M0
136
TEST $CLI_1 volume stop $V0
137
TEST $CLI_1 volume delete $V0
138

139
##############################################################
140

141
cleanup
142
#G_TESTDEF_TEST_STATUS_NETBSD7=1501388
143

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.