blob: 27d7439a1807564513e6e403c235468a3d650065 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
|
#!/bin/bash
. $(dirname $0)/../include.rc
. $(dirname $0)/../cluster.rc
. $(dirname $0)/../volume.rc
function peer_count {
eval \$CLI_$1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup
TEST launch_cluster 3
## basic peer commands
TEST $CLI_1 peer probe $H2
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 1
EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count 2
#probe a unreachable node
TEST kill_glusterd 3
TEST ! $CLI_1 peer probe $H3
#detach a node which is not a part of cluster
TEST ! $CLI_1 peer detach $H3
TEST ! $CLI_1 peer detach $H3 force
TEST start_glusterd 3
TEST $CLI_1 peer probe $H3
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 1
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 2
EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count 3
# probe a node which is already part of cluster
TEST $CLI_1 peer probe $H3
#probe an invalid address
TEST ! $CLI_1 peer probe 1024.1024.1024.1024
TEST $CLI_1 pool list
## all help commands
TEST $CLI_1 global help
TEST $CLI_1 help
TEST $CLI_1 peer help
TEST $CLI_1 volume help
TEST $CLI_1 volume bitrot help
TEST $CLI_1 volume quota help
TEST $CLI_1 snapshot help
## volume operations
TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0
# create a volume with already existing volume name
TEST ! $CLI_1 volume create $V0 $H1:$B1/$V1 $H2:$B2/$V1
TEST $CLI_1 volume start $V0
EXPECT 'Started' cluster_volinfo_field 1 $V0 'Status';
# Mount the volume and create files
TEST glusterfs -s $H1 --volfile-id $V0 $M1
TEST touch $M1/file{1..100}
#fails because $V0 is not shd compatible
TEST ! $CLI_1 volume status $V0 shd
cleanup
|