diff options
author | Jeff Darcy <jdarcy@redhat.com> | 2016-12-08 16:24:15 -0500 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2017-01-30 19:13:58 -0500 |
commit | 1a95fc3036db51b82b6a80952f0908bc2019d24a (patch) | |
tree | b983ac196a8165d5cb5e860a5ef97d3e9a41b5c9 /tests/bitrot/bug-1373520.t | |
parent | 7f7d7a939e46b330a084d974451eee4757ba61b4 (diff) |
core: run many bricks within one glusterfsd process
This patch adds support for multiple brick translator stacks running
in a single brick server process. This reduces our per-brick memory usage by
approximately 3x, and our appetite for TCP ports even more. It also creates
potential to avoid process/thread thrashing, and to improve QoS by scheduling
more carefully across the bricks, but realizing that potential will require
further work.
Multiplexing is controlled by the "cluster.brick-multiplex" global option. By
default it's off, and bricks are started in separate processes as before. If
multiplexing is enabled, then *compatible* bricks (mostly those with the same
transport options) will be started in the same process.
Change-Id: I45059454e51d6f4cbb29a4953359c09a408695cb
BUG: 1385758
Signed-off-by: Jeff Darcy <jdarcy@redhat.com>
Reviewed-on: https://review.gluster.org/14763
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'tests/bitrot/bug-1373520.t')
-rw-r--r-- | tests/bitrot/bug-1373520.t | 42 |
1 files changed, 31 insertions, 11 deletions
diff --git a/tests/bitrot/bug-1373520.t b/tests/bitrot/bug-1373520.t index 5e69ab78fe9..271bb3de287 100644 --- a/tests/bitrot/bug-1373520.t +++ b/tests/bitrot/bug-1373520.t @@ -17,7 +17,7 @@ EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Started' volinfo_field $V0 'Status' TEST $CLI volume set $V0 performance.stat-prefetch off #Mount the volume -TEST glusterfs --entry-timeout=0 --attribute-timeout=0 -s $H0 --volfile-id $V0 $M0 +TEST $GFS -s $H0 --volfile-id $V0 $M0 EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 #Enable bitrot @@ -46,19 +46,39 @@ TEST $CLI volume start $V0 EXPECT_WITHIN $CHILD_UP_TIMEOUT "6" ec_child_up_count $V0 0 EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" get_bitd_count -#Trigger lookup so that bitrot xlator marks file as bad in its inode context. -TEST stat $M0/FILE1 - #Delete file and all links from backend -TEST stat $B0/${V0}5/FILE1 -TEST `ls -li $B0/${V0}5/FILE1 | awk '{print $1}' | xargs find $B0/${V0}5/ -inum | xargs -r rm -rf` +TEST rm -rf $(find $B0/${V0}5 -inum $(stat -c %i $B0/${V0}5/FILE1)) + +# The test for each file below used to look like this: +# +# TEST stat $M0/FILE1 +# EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" stat $B0/${V0}5/FILE1 +# +# That didn't really work, because EXPECT_WITHIN would bail immediately if +# 'stat' returned an error - which it would if the file wasn't there yet. +# Since changing this, I usually see at least a few retries, and sometimes more +# than twenty, before the check for HL_FILE1 succeeds. The 'ls' is also +# necessary, to force a name heal as well as data. With both that and the +# 'stat' on $M0 being done here for every retry, there's no longer any need to +# have them elsewhere. +# +# If we had EW_RETRIES support (https://review.gluster.org/#/c/16451/) we could +# use it here to see how many retries are typical on the machines we use for +# regression, and set an appropriate upper bound. As of right now, though, +# that support does not exist yet. +ugly_stat () { + local client_dir=$1 + local brick_dir=$2 + local bare_file=$3 + + ls $client_dir + stat -c %s $client_dir/$bare_file + stat -c %s $brick_dir/$bare_file 2> /dev/null || echo "UNKNOWN" +} #Access files -TEST cat $M0/FILE1 -EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" stat -c %s $B0/${V0}5/FILE1 - -TEST cat $M0/HL_FILE1 -EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" stat -c %s $B0/${V0}5/HL_FILE1 +EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" ugly_stat $M0 $B0/${V0}5 FILE1 +EXPECT_WITHIN $HEAL_TIMEOUT "$SIZE" ugly_stat $M0 $B0/${V0}5 HL_FILE1 cleanup; #G_TESTDEF_TEST_STATUS_NETBSD7=BAD_TEST,BUG=1417540 |