From 722ed512220395af8a707756b49df67afacda795 Mon Sep 17 00:00:00 2001 From: Richard Wareing Date: Wed, 12 Feb 2014 18:37:55 -0800 Subject: xlators: add JSON FOP statistics dumps every N seconds Summary: - Adds a thread to the io-stats translator which dumps out statistics every N seconds where N is configurable by an option called "diagnostics.stats-dump-interval" - Thread cleanly starts/stops when translator is unloaded - Updates macros to use "Atomic Builtins" (e.g. intel CPU extentions) to use memory barries to update counters vs using locks. This should reduce overhead and prevent any deadlock bugs due to lock contention. Test Plan: - Test on development machine - Run prove -v tests/basic/stats-dump.t Change-Id: If071239d8fdc185e4e8fd527363cc042447a245d BUG: 1266476 Signed-off-by: Jeff Darcy Reviewed-on: http://review.gluster.org/12209 Tested-by: NetBSD Build System Tested-by: Gluster Build System Reviewed-by: Avra Sengupta --- tests/basic/afr/arbiter.t | 8 ++++++++ tests/basic/stats-dump.t | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 tests/basic/stats-dump.t (limited to 'tests/basic') diff --git a/tests/basic/afr/arbiter.t b/tests/basic/afr/arbiter.t index 84d2ccece51..cecbc605541 100644 --- a/tests/basic/afr/arbiter.t +++ b/tests/basic/afr/arbiter.t @@ -9,6 +9,7 @@ TEST glusterd; TEST pidof glusterd # Non arbiter replica 3 volumes should not have arbiter-count option enabled. +TEST mkdir -p $B0/${V0}{0,1,2} TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} TEST $CLI volume start $V0 TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 --entry-timeout=0 $M0; @@ -17,7 +18,14 @@ EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 TEST $CLI volume stop $V0 TEST $CLI volume delete $V0 +# Make sure we clean up *all the way* so we don't get "brick X is already part +# of a volume" errors. +cleanup; +TEST glusterd; +TEST pidof glusterd + # Create and mount a replica 3 arbiter volume. +TEST mkdir -p $B0/${V0}{0,1,2} TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2} TEST $CLI volume set $V0 performance.write-behind off TEST $CLI volume set $V0 cluster.self-heal-daemon off diff --git a/tests/basic/stats-dump.t b/tests/basic/stats-dump.t new file mode 100644 index 00000000000..0a680e44e55 --- /dev/null +++ b/tests/basic/stats-dump.t @@ -0,0 +1,43 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc +. $(dirname $0)/../nfs.rc + +cleanup; + +TEST glusterd +TEST pidof glusterd +TEST $CLI volume create $V0 replica 3 $H0:$B0/${V0}{0,1,2} +TEST $CLI volume set $V0 diagnostics.latency-measurement on +TEST $CLI volume set $V0 diagnostics.count-fop-hits on +TEST $CLI volume set $V0 diagnostics.stats-dump-interval 1 +TEST $CLI volume set $V0 nfs.disable off +TEST $CLI volume start $V0 +sleep 1 +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0 --attribute-timeout=0 --entry-timeout=0 +TEST mount_nfs $H0:/$V0 $N0 nolock,soft,intr + +for i in {1..10};do + dd if=/dev/zero of=$M0/fuse_testfile$i bs=4k count=100 +done + +for i in {1..10};do + dd if=/dev/zero of=$N0/nfs_testfile$i bs=4k count=100 +done +sleep 2 + +# Verify we have non-zero write counts from the bricks, gNFSd +# and the FUSE mount +BRICK_OUTPUT="$(grep 'aggr.fop.write.count": "0"' ${GLUSTERD_WORKDIR}/stats/glusterfsd__d_backends_patchy?.dump)" +BRICK_RET="$?" +NFSD_OUTPUT="$(grep 'aggr.fop.write.count": "0"' ${GLUSTERD_WORKDIR}/stats/glusterfs_nfsd.dump)" +NFSD_RET="$?" +FUSE_OUTPUT="$(grep 'aggr.fop.write.count": "0"' ${GLUSTERD_WORKDIR}/stats/glusterfs_patchy.dump)" +FUSE_RET="$?" + +TEST [ 0 -ne "$BRICK_RET" ] +TEST [ 0 -ne "$NFSD_RET" ] +TEST [ 0 -ne "$FUSE_RET" ] + +cleanup; -- cgit