blob: 4468c881bacfe21beaea1644796f1ff75976c0b4 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
|
#!/bin/bash
. $(dirname $0)/../include.rc
. $(dirname $0)/../volume.rc
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1,2,3,4,5}
TEST $CLI volume set $V0 cluster.background-self-heal-count 0
TEST $CLI volume set $V0 cluster.eager-lock off
TEST $CLI volume start $V0
TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0
TEST kill_brick $V0 $H0 $B0/${V0}0
TEST kill_brick $V0 $H0 $B0/${V0}2
TEST kill_brick $V0 $H0 $B0/${V0}4
cd $M0
HEAL_FILES=0
for i in {1..10}
do
dd if=/dev/urandom of=f bs=1M count=10 2>/dev/null
HEAL_FILES=$(($HEAL_FILES+1))
mkdir a; cd a;
HEAL_FILES=$(($HEAL_FILES+3)) #As many times as distribute subvols
done
HEAL_FILES=$(($HEAL_FILES + 3)) #Count the brick root dir
cd ~
EXPECT "$HEAL_FILES" afr_get_pending_heal_count $V0
TEST ! $CLI volume heal $V0
TEST $CLI volume set $V0 cluster.self-heal-daemon off
TEST ! $CLI volume heal $V0 info
TEST ! $CLI volume heal $V0
TEST $CLI volume start $V0 force
TEST $CLI volume set $V0 cluster.self-heal-daemon on
EXPECT_WITHIN 20 "Y" glustershd_up_status
EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 0
EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 2
EXPECT_WITHIN 20 "1" afr_child_up_status_in_shd $V0 4
TEST $CLI volume heal $V0
sleep 5 #Until the heal-statistics command implementation
#check that this heals the contents partially
TEST [ $HEAL_FILES -gt $(afr_get_pending_heal_count $V0) ]
TEST $CLI volume heal $V0 full
EXPECT_WITHIN 30 "0" afr_get_pending_heal_count $V0
cleanup
|