blob: 0e08bd377ae4d412b606d50714fcc5b8a63a15a2 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
|
#!/bin/bash
# Test that a volume becomes unwritable when the cluster loses quorum.
. $(dirname $0)/../include.rc
. $(dirname $0)/../volume.rc
function vglusterd {
wd=$1/wd-$2
cp -r /var/lib/glusterd $wd
rm -rf $wd/peers/* $wd/vols/*
echo -n "UUID=$(uuidgen)\noperating-version=1\n" > $wd/glusterd.info
opt1="management.transport.socket.bind-address=127.0.0.$2"
opt2="management.working-directory=$wd"
glusterd --xlator-option $opt1 --xlator-option $opt2
}
function check_fs {
df $1 &> /dev/null
echo $?
}
function check_peers {
$VCLI peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
cleanup;
topwd=$(mktemp -d)
trap "rm -rf $topwd" EXIT
vglusterd $topwd 100
VCLI="$CLI --remote-host=127.0.0.100"
vglusterd $topwd 101
TEST $VCLI peer probe 127.0.0.101
vglusterd $topwd 102
TEST $VCLI peer probe 127.0.0.102
EXPECT_WITHIN 20 2 check_peers
create_cmd="$VCLI volume create $V0"
for i in $(seq 100 102); do
mkdir -p $B0/$V0$i
create_cmd="$create_cmd 127.0.0.$i:$B0/$V0$i"
done
TEST $create_cmd
TEST $VCLI volume set $V0 cluster.server-quorum-type server
TEST $VCLI volume start $V0
TEST glusterfs --volfile-server=127.0.0.100 --volfile-id=$V0 $M0
# Kill one pseudo-node, make sure the others survive and volume stays up.
kill -9 $(ps -ef | grep gluster | grep 127.0.0.102 | awk '{print $2}')
EXPECT_WITHIN 20 1 check_peers
fs_status=$(check_fs $M0)
nnodes=$(pidof glusterfsd | wc -w)
TEST [ "$fs_status" = 0 -a "$nnodes" = 2 ]
# Kill another pseudo-node, make sure the last one dies and volume goes down.
kill -9 $(ps -ef | grep gluster | grep 127.0.0.101 | awk '{print $2}')
EXPECT_WITHIN 20 0 check_peers
fs_status=$(check_fs $M0)
nnodes=$(pidof glusterfsd | wc -w)
TEST [ "$fs_status" = 1 -a "$nnodes" = 0 ]
cleanup
|