blob: 03a476823ca2df2a48db525c69e8595f80b907c2 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
|
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../traps.rc
. $(dirname $0)/../../volume.rc
function count_brick_processes {
pgrep glusterfsd | wc -l
}
function count_brick_pids {
$CLI --xml volume status all | sed -n '/.*<pid>\([^<]*\).*/s//\1/p' \
| grep -v "N/A" | sort | uniq | wc -l
}
cleanup;
#bug-1451248 - validate brick mux after glusterd reboot
TEST glusterd
TEST $CLI volume set all cluster.brick-multiplex on
TEST $CLI volume create $V0 $H0:$B0/${V0}{1..3}
TEST $CLI volume start $V0
EXPECT 1 count_brick_processes
EXPECT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
pkill gluster
TEST glusterd
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 3 online_brick_count
TEST $CLI volume create $V1 $H0:$B0/${V1}{1..3}
TEST $CLI volume start $V1
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
#bug-1560957 - brick status goes offline after remove-brick followed by add-brick
pkill glusterd
TEST glusterd
TEST $CLI volume remove-brick $V0 $H0:$B0/${V0}1 force
TEST $CLI volume add-brick $V0 $H0:$B0/${V0}1_new force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_processes
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 1 count_brick_pids
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
#bug-1446172 - reset brick with brick multiplexing enabled
TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0;
# Create files
for i in {1..5}
do
echo $i > $M0/file$i.txt
done
TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new start
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
EXPECT 1 count_brick_processes
# Negative case with brick killed but volume-id xattr present
TEST ! $CLI volume reset-brick $V0 $H0:$B0/${V0}1 $H0:$B0/${V0}1 commit
# reset-brick commit force should work and should bring up the brick
TEST $CLI volume reset-brick $V0 $H0:$B0/${V0}1_new $H0:$B0/${V0}1_new commit force
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
TEST glusterfs --volfile-id=$V1 --volfile-server=$H0 $M1;
# Create files
for i in {1..5}
do
echo $i > $M1/file$i.txt
done
TEST $CLI volume reset-brick $V1 $H0:$B0/${V1}1 start
EXPECT_WITHIN $PROCESS_DOWN_TIMEOUT 5 online_brick_count
EXPECT 1 count_brick_processes
# Simulate reset disk
for i in {1..5}
do
rm -rf $B0/${V1}1/file$i.txt
done
setfattr -x trusted.glusterfs.volume-id $B0/${V1}1
setfattr -x trusted.gfid $B0/${V1}1
# Test reset-brick commit. Using CLI_IGNORE_PARTITION since normal CLI uses
# the --wignore flag that essentially makes the command act like "commit force"
TEST $CLI_IGNORE_PARTITION volume reset-brick $V1 $H0:$B0/${V1}1 $H0:$B0/${V1}1 commit
EXPECT_WITHIN $PROCESS_UP_TIMEOUT 6 online_brick_count
EXPECT 1 count_brick_processes
cleanup;
|