diff options
author | Mohit Agrawal <moagrawa@redhat.com> | 2018-03-12 19:43:15 +0530 |
---|---|---|
committer | Raghavendra G <rgowdapp@redhat.com> | 2018-04-19 04:31:51 +0000 |
commit | 0043c63f70776444f69667a4ef9596217ecb42b7 (patch) | |
tree | e6c239e4b27198d40bca329edcce317ded59de09 /tests | |
parent | be26b0da2f1a7fe336400de6a1c016716983bd38 (diff) |
gluster: Sometimes Brick process is crashed at the time of stopping brick
Problem: Sometimes brick process is getting crashed at the time
of stop brick while brick mux is enabled.
Solution: Brick process was getting crashed because of rpc connection
was not cleaning properly while brick mux is enabled.In this patch
after sending GF_EVENT_CLEANUP notification to xlator(server)
waits for all rpc client connection destroy for specific xlator.Once rpc
connections are destroyed in server_rpc_notify for all associated client
for that brick then call xlator_mem_cleanup for for brick xlator as well as
all child xlators.To avoid races at the time of cleanup introduce
two new flags at each xlator cleanup_starting, call_cleanup.
BUG: 1544090
Signed-off-by: Mohit Agrawal <moagrawa@redhat.com>
Note: Run all test-cases in separate build (https://review.gluster.org/#/c/19700/)
with same patch after enable brick mux forcefully, all test cases are
passed.
Change-Id: Ic4ab9c128df282d146cf1135640281fcb31997bf
updates: bz#1544090
Diffstat (limited to 'tests')
-rw-r--r-- | tests/bugs/glusterd/rebalance-operations-in-single-node.t | 2 | ||||
-rw-r--r-- | tests/volume.rc | 31 |
2 files changed, 30 insertions, 3 deletions
diff --git a/tests/bugs/glusterd/rebalance-operations-in-single-node.t b/tests/bugs/glusterd/rebalance-operations-in-single-node.t index c0823afebb8..9144b4a5000 100644 --- a/tests/bugs/glusterd/rebalance-operations-in-single-node.t +++ b/tests/bugs/glusterd/rebalance-operations-in-single-node.t @@ -119,7 +119,7 @@ TEST touch $M0/dir{21..30}/files{1..10}; TEST $CLI volume add-brick $V0 $H0:$B0/${V0}{7,8} TEST $CLI volume rebalance $V0 start force -EXPECT_WITHIN 60 "completed" rebalance_status_field $V0 +EXPECT_WITHIN 90 "completed" rebalance_status_field $V0 TEST pkill gluster TEST glusterd diff --git a/tests/volume.rc b/tests/volume.rc index 44428606711..ea8cfb666a1 100644 --- a/tests/volume.rc +++ b/tests/volume.rc @@ -801,26 +801,53 @@ function count_sh_entries() ls $1/.glusterfs/indices/xattrop | grep -v "xattrop-" | wc -l } +function check_brick_multiplex() { + cnt="$(ls /var/log/glusterfs/bricks|wc -l)" + local ret=$($CLI volume info|grep "cluster.brick-multiplex"|cut -d" " -f2) + local bcnt="$(brick_count)" + + if [ $bcnt -ne 1 ]; then + if [ "$ret" = "on" ] || [ $cnt -eq 1 ]; then + echo "Y" + else + echo "N" + fi + else + echo "N" + fi +} + function get_fd_count { local vol=$1 local host=$2 local brick=$3 local fname=$4 + local val="$(check_brick_multiplex)" local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname)) local statedump=$(generate_brick_statedump $vol $host $brick) - local count=$(grep "gfid=$gfid_str" $statedump -A2 -B1 | grep $brick -A3 | grep -w fd-count | cut -f2 -d'=' | tail -1) + if [ $val == "N" ]; then + count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1) + else + count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1) + fi rm -f $statedump echo $count } + function get_active_fd_count { local vol=$1 local host=$2 local brick=$3 local fname=$4 + local val="$(check_brick_multiplex)" local gfid_str=$(gf_gfid_xattr_to_str $(gf_get_gfid_xattr $brick/$fname)) local statedump=$(generate_brick_statedump $vol $host $brick) - local count=$(grep "gfid=$gfid_str" $statedump -A2 -B1 | grep $brick -A3 | grep -w active-fd-count | cut -f2 -d'=' | tail -1) + if [ $val == "N" ]; then + count=$(grep "gfid=$gfid_str" $statedump -A2 | grep fd-count | cut -f2 -d'=' | tail -1) + else + count=$(grep "${brick}.active.1" -A3 $statedump | grep "gfid=$gfid_str" -A2 | grep fd-count | cut -f2 -d'=' | tail -1) + fi rm -f $statedump echo $count } |