From 8dfdecf220d1c9365e1f8d6af9ead5e48c61e2eb Mon Sep 17 00:00:00 2001 From: Jeff Darcy Date: Fri, 15 Sep 2017 06:59:01 -0700 Subject: Replace namespace/io-stats/io-threads with 3.6-fb versions This rolls up multiple patches related to namespace identificaton and throttling/QoS. This primarily includes the following, all by Michael Goulet . io-threads: Add weighted round robin queueing by namespace https://phabricator.facebook.com/D5615269 io-threads: Add per-namespaces queue sizes to IO_THREADS_QUEUE_SIZE_KEY https://phabricator.facebook.com/D5683162 io-threads: Implement better slot allocation algorithm https://phabricator.facebook.com/D5683186 io-threads: Only enable weighted queueing on bricks https://phabricator.facebook.com/D5700062 io-threads: Update queue sizes on drain https://phabricator.facebook.com/D5704832 Fix parsing (-1) as default NS weight https://phabricator.facebook.com/D5723383 Parts of the following patches have also been applied to satisfy dependencies. io-throttling: Calculate moving averages and throttle offending hosts https://phabricator.fb.com/D2516161 Shreyas Siravara Hook up ODS logging for FUSE clients. https://phabricator.facebook.com/D3963376 Kevin Vigor Add the flag --skip-nfsd-start to skip the NFS daemon stating, even if it is enabled https://phabricator.facebook.com/D4575368 Alex Lorca There are also some "standard" changes: dealing with code that moved, reindenting to comply with Gluster coding standards, gf_uuid_xxx, etc. This patch *does* revert some changes which have occurred upstream since 3.6; these will be re-applied as apppropriate on top of this new base. Change-Id: I69024115da7a60811e5b86beae781d602bdb558d Signed-off-by: Jeff Darcy --- tests/basic/afr/durability-off.t | 12 ++++++++++-- tests/basic/fop-sampling.t | 4 +++- tests/basic/stats-dump.t | 4 +++- 3 files changed, 16 insertions(+), 4 deletions(-) (limited to 'tests/basic') diff --git a/tests/basic/afr/durability-off.t b/tests/basic/afr/durability-off.t index 155ffa09ef0..0c4f470079a 100644 --- a/tests/basic/afr/durability-off.t +++ b/tests/basic/afr/durability-off.t @@ -5,6 +5,14 @@ . $(dirname $0)/../../include.rc . $(dirname $0)/../../volume.rc +did_fsync () { + local count=$($CLI volume profile $V0 info | grep -w FSYNC | wc -l) + if [ "$count" != "0" ]; then + echo "Y" + else + echo "N" + fi +} cleanup; TEST glusterd @@ -24,7 +32,7 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 TEST $CLI volume heal $V0 EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 -EXPECT "^0$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l) +EXPECT "N" did_fsync #Test that fsyncs happen when durability is on TEST $CLI volume set $V0 cluster.ensure-durability on @@ -39,6 +47,6 @@ EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 TEST $CLI volume heal $V0 EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 -EXPECT "^2$" echo $($CLI volume profile $V0 info | grep -w FSYNC | wc -l) +EXPECT "Y" did_fsync cleanup; diff --git a/tests/basic/fop-sampling.t b/tests/basic/fop-sampling.t index e429fd8cb07..a1b3edc3d5d 100644 --- a/tests/basic/fop-sampling.t +++ b/tests/basic/fop-sampling.t @@ -13,7 +13,7 @@ function check_path { op=$1 path=$2 file=$3 - grep $op $file | awk -F, '{print $11}' | grep $path 2>&1 > /dev/null + grep $op $file | awk -F, '{print $12}' | grep $path 2>&1 > /dev/null if [ $? -eq 0 ]; then echo "Y" else @@ -106,6 +106,8 @@ for dir in "$N0" "$M0"; do rm $dir/file2 done; +read -p "Continue? " nothing + EXPECT_WITHIN 10 "Y" check_path CREATE /file1 $BRICK_SAMPLES EXPECT_WITHIN 10 "Y" check_path LOOKUP /file1 $BRICK_SAMPLES EXPECT_WITHIN 10 "Y" check_path SETATTR /file1 $BRICK_SAMPLES diff --git a/tests/basic/stats-dump.t b/tests/basic/stats-dump.t index af1ad34702b..a188a45eea1 100644 --- a/tests/basic/stats-dump.t +++ b/tests/basic/stats-dump.t @@ -39,7 +39,9 @@ FUSE_RET="$?" # Test that io-stats is getting queue sizes from io-threads TEST grep 'queue_size' ${GLUSTERD_WORKDIR}/stats/glusterfs_nfsd_$V0.dump -TEST ! grep 'queue_size' ${GLUSTERD_WORKDIR}/stats/glusterfsd__d_backends_patchy?.dump + +# We should be getting queue sizes on bricks now too. +TEST grep 'queue_size' ${GLUSTERD_WORKDIR}/stats/glusterfsd__d_backends_patchy?.dump TEST [ 0 -ne "$BRICK_RET" ] TEST [ 0 -ne "$NFSD_RET" ] -- cgit