glusterfs

Форк
0
/
mgmt_v3-locks.t 
120 строк · 2.9 Кб
1
#!/bin/bash
2

3
. $(dirname $0)/../include.rc
4
. $(dirname $0)/../cluster.rc
5

6
function check_peers {
7
        $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
8
}
9

10
function volume_count {
11
        local cli=$1;
12
        if [ $cli -eq '1' ] ; then
13
                $CLI_1 volume info | grep 'Volume Name' | wc -l;
14
        else
15
                $CLI_2 volume info | grep 'Volume Name' | wc -l;
16
        fi
17
}
18

19
function volinfo_field()
20
{
21
    local vol=$1;
22
    local field=$2;
23

24
    $CLI_1 volume info $vol | grep "^$field: " | sed 's/.*: //';
25
}
26

27
function two_diff_vols_create {
28
        # Both volume creates should be successful
29
        $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 &
30
        PID_1=$!
31

32
        $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 $H3:$B3/$V1 &
33
        PID_2=$!
34

35
        wait $PID_1 $PID_2
36
}
37

38
function two_diff_vols_start {
39
        # Both volume starts should be successful
40
        $CLI_1 volume start $V0 &
41
        PID_1=$!
42

43
        $CLI_2 volume start $V1 &
44
        PID_2=$!
45

46
        wait $PID_1 $PID_2
47
}
48

49
function two_diff_vols_stop_force {
50
        # Force stop, so that if rebalance from the
51
        # remove bricks is in progress, stop can
52
        # still go ahead. Both volume stops should
53
        # be successful
54
        $CLI_1 volume stop $V0 force &
55
        PID_1=$!
56

57
        $CLI_2 volume stop $V1 force &
58
        PID_2=$!
59

60
        wait $PID_1 $PID_2
61
}
62

63
function same_vol_remove_brick {
64

65
        # Running two same vol commands at the same time can result in
66
        # two success', two failures, or one success and one failure, all
67
        # of which are valid. The only thing that shouldn't happen is a
68
        # glusterd crash.
69

70
        local vol=$1
71
        local brick=$2
72
        $CLI_1 volume remove-brick $1 $2 start &
73
        PID=$!
74
        $CLI_2 volume remove-brick $1 $2 start
75
        wait $PID
76
}
77

78
cleanup;
79

80
TEST launch_cluster 3;
81
TEST $CLI_1 peer probe $H2;
82
TEST $CLI_1 peer probe $H3;
83

84
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
85

86
two_diff_vols_create
87
EXPECT 'Created' volinfo_field $V0 'Status';
88
EXPECT 'Created' volinfo_field $V1 'Status';
89

90
two_diff_vols_start
91
EXPECT 'Started' volinfo_field $V0 'Status';
92
EXPECT 'Started' volinfo_field $V1 'Status';
93

94
same_vol_remove_brick $V0 $H2:$B2/$V0
95
# Checking glusterd crashed or not after same volume remove brick
96
# on both nodes.
97
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
98

99
same_vol_remove_brick $V1 $H2:$B2/$V1
100
# Checking glusterd crashed or not after same volume remove brick
101
# on both nodes.
102
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers
103

104
$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG &
105
PID=$!
106
$CLI_1 volume set $V1 diagnostics.client-log-level DEBUG
107
wait $PID
108
kill_glusterd 3
109
$CLI_1 volume status $V0
110
$CLI_2 volume status $V1
111
$CLI_1 peer status
112
EXPECT_WITHIN $PROBE_TIMEOUT 1 check_peers
113
EXPECT 'Started' volinfo_field $V0 'Status';
114
EXPECT 'Started' volinfo_field $V1 'Status';
115

116
TEST $glusterd_3
117
$CLI_1 volume status $V0
118
$CLI_2 volume status $V1
119
$CLI_1 peer status
120
cleanup;
121

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.