3
. $(dirname $0)/../include.rc
4
. $(dirname $0)/../cluster.rc
5
. $(dirname $0)/../dht.rc
8
# Check if every single rebalance process migrated some files
10
function cluster_rebal_all_nodes_migrated_files {
12
a=$($CLI_1 volume rebalance $V0 status | grep "completed" | awk '{print $2}');
16
if [ "$i" -eq "0" ]; then
27
TEST $CLI_1 peer probe $H2;
28
TEST $CLI_1 peer probe $H3;
29
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count
32
#Start with a pure distribute volume (multiple bricks on the same node)
33
TEST $CLI_1 volume create $V0 $H1:$B1/dist1 $H1:$B1/dist2 $H2:$B2/dist3 $H2:$B2/dist4
35
TEST $CLI_1 volume start $V0
38
#TEST $CLI_1 volume set $V0 client-log-level DEBUG
41
TEST glusterfs -s $H1 --volfile-id $V0 $M0;
43
TEST mkdir $M0/dir1 2>/dev/null;
44
TEST touch $M0/dir1/file-{1..500}
46
## Add-brick and run rebalance to force file migration
47
TEST $CLI_1 volume add-brick $V0 $H1:$B1/dist5 $H2:$B2/dist6
50
TEST $CLI_1 volume rebalance $V0 start force
52
#volume rebalance status should work
53
#TEST $CLI_1 volume rebalance $V0 status
54
#$CLI_1 volume rebalance $V0 status
56
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
57
EXPECT "0" cluster_rebal_all_nodes_migrated_files
58
$CLI_1 volume rebalance $V0 status
62
TEST $CLI_1 volume stop $V0
63
TEST $CLI_1 volume delete $V0
66
##############################################################
68
# Next, a dist-rep volume
69
TEST $CLI_1 volume create $V0 replica 2 $H1:$B1/drep1 $H2:$B2/drep1 $H1:$B1/drep2 $H2:$B2/drep2
71
TEST $CLI_1 volume start $V0
74
#TEST $CLI_1 volume set $V0 client-log-level DEBUG
77
TEST glusterfs -s $H1 --volfile-id $V0 $M0;
79
TEST mkdir $M0/dir1 2>/dev/null;
80
TEST touch $M0/dir1/file-{1..500}
82
## Add-brick and run rebalance to force file migration
83
TEST $CLI_1 volume add-brick $V0 replica 2 $H1:$B1/drep3 $H2:$B2/drep3
86
TEST $CLI_1 volume rebalance $V0 start force
88
#volume rebalance status should work
89
#TEST $CLI_1 volume rebalance $V0 status
90
#$CLI_1 volume rebalance $V0 status
92
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
93
#EXPECT "0" cluster_rebal_all_nodes_migrated_files
94
$CLI_1 volume rebalance $V0 status
98
TEST $CLI_1 volume stop $V0
99
TEST $CLI_1 volume delete $V0
101
##############################################################
103
# Next, a disperse volume
104
TEST $CLI_1 volume create $V0 disperse 3 $H1:$B1/ec1 $H2:$B1/ec2 $H3:$B1/ec3 force
106
TEST $CLI_1 volume start $V0
107
$CLI_1 volume info $V0
109
#TEST $CLI_1 volume set $V0 client-log-level DEBUG
112
TEST glusterfs -s $H1 --volfile-id $V0 $M0;
114
TEST mkdir $M0/dir1 2>/dev/null;
115
TEST touch $M0/dir1/file-{1..500}
117
## Add-brick and run rebalance to force file migration
118
TEST $CLI_1 volume add-brick $V0 $H1:$B2/ec4 $H2:$B2/ec5 $H3:$B2/ec6
121
TEST $CLI_1 volume rebalance $V0 start force
123
#volume rebalance status should work
124
#TEST $CLI_1 volume rebalance $V0 status
125
#$CLI_1 volume rebalance $V0 status
127
EXPECT_WITHIN $REBALANCE_TIMEOUT "0" cluster_rebalance_completed
129
# this will not work unless EC is changed to return all node-uuids
130
# comment this out once that patch is ready
131
#EXPECT "0" cluster_rebal_all_nodes_migrated_files
132
$CLI_1 volume rebalance $V0 status
136
TEST $CLI_1 volume stop $V0
137
TEST $CLI_1 volume delete $V0
139
##############################################################
142
#G_TESTDEF_TEST_STATUS_NETBSD7=1501388