blob: fd4bb951c5ab5855b6b82019fd1f2f2289a40c47 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
|
#!/bin/bash
. $(dirname $0)/../include.rc
. $(dirname $0)/../volume.rc
. $(dirname $0)/../fdl.rc
_check_sizes () {
local n=0
local sz
local total_sz=0
# We don't care about the sizes of the meta files. That would be
# embedding too much of the implementation into the test.
n=$(ls ${log_base}/${log_id}-meta-*.jnl | wc -l)
[ $n = 2 ] || return 1
# We *do* care about the sizes of the data files, which should exactly
# reflect the amount of data written via dd.
n=0
while read sz name; do
G_LOG "found journal ${name} size ${sz}MB"
n=$((n+1))
total_sz=$((total_sz+sz))
done < <(du -sm ${log_base}/${log_id}-data-*.jnl)
[ $n = 2 ] || return 1
# On our CentOS and NetBSD regression-test systems, but not on my Fedora
# development system, each file ends up being slightly larger than its
# data size because of metadata, and 'du' rounds that up to a full extra
# megabyte. We'll allow either result, because what we're really
# looking for is a complete failure to roll over from one file to
# another at the appropriate size.
[ $total_sz = 20 -o $total_sz = $((n+20)) ] || return 1
return 0
}
check_sizes () {
set -x
_check_sizes
ret=$?
set +x
return ret
}
if [ x"$OSTYPE" = x"NetBSD" ]; then
CREAT_OFLAG="creat,"
else
CREAT_OFLAG=""
fi
TEST rm -f ${log_base}/${log_id}-*.log
TEST glusterd
TEST pidof glusterd
# Get a simple volume set up and mounted with FDL active.
TEST $CLI volume create $V0 ${H0}:${B0}/${V0}-0
TEST $CLI volume set $V0 changelog.changelog off
TEST $CLI volume set $V0 features.fdl on
TEST $CLI volume start $V0
TEST $GFS -s $H0 --volfile-id $V0 $M0
# Generate some I/O and unmount/stop so we can see log sizes.
TEST dd if=/dev/zero of=$M0/twentyMB bs=1048576 count=20 \
oflag=${CREAT_OFLAG}sync
TEST umount $M0
TEST $CLI volume stop $V0
TEST _check_sizes
cleanup
|