blob: c93a05e1d476ea4c2ed59fd6851c6d0b313cde8d (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
|
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
create_files () {
for i in {1..1000}; do
orig=$(printf %s/abc%04d $1 $i)
real=$(printf %s/src%04d $1 $i)
# Make sure lots of these have linkfiles.
echo "This is file $i" > $orig
mv $orig $real
done
sync
}
move_files_inner () {
sfile=$M0/status_$(basename $1)
echo "running" > $sfile
for i in {1..1000}; do
src=$(printf %s/src%04d $1 $i)
dst=$(printf %s/dst%04d $1 $i)
mv $src $dst 2> /dev/null
done
echo "done" > $sfile
}
move_files () {
move_files_inner $* &
}
check_files () {
errors=0
for i in {1..1000}; do
if [ ! -f $(printf %s/dst%04d $1 $i) ]; then
if [ -f $(printf %s/src%04d $1 $i) ]; then
echo "file $i didnt get moved" > /dev/stderr
else
echo "file $i is MISSING" > /dev/stderr
errors=$((errors+1))
fi
fi
done
if [ $((errors)) != 0 ]; then
: ls -l $1 > /dev/stderr
fi
return $errors
}
cleanup;
TEST glusterd
TEST pidof glusterd
TEST $CLI volume info;
TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{1,2,3,4,5,6};
EXPECT "$V0" volinfo_field $V0 'Volume Name';
EXPECT 'Created' volinfo_field $V0 'Status';
EXPECT '6' brick_count $V0
TEST $CLI volume start $V0;
EXPECT 'Started' volinfo_field $V0 'Status';
## Mount FUSE with caching disabled (read-write)
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
TEST create_files $M0
## Mount FUSE with caching disabled (read-write) again
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M1;
TEST move_files $M0
TEST move_files $M1
# It's regrettable that renaming 1000 files might take more than 30 seconds,
# but on our test systems sometimes it does, so double the time from what we'd
# use otherwise. There still seem to be some spurious failures, 1 in 20 when
# this does not complete, added an additional 15 seconds to take false reports
# out of the system, during test runs.
EXPECT_WITHIN 75 "done" cat $M0/status_0
EXPECT_WITHIN 75 "done" cat $M1/status_1
TEST umount $M0
TEST umount $M1
TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0;
TEST check_files $M0
TEST $CLI volume stop $V0;
EXPECT 'Stopped' volinfo_field $V0 'Status';
TEST $CLI volume delete $V0;
TEST ! $CLI volume info $V0;
cleanup;
|