From e125e2ae61c31da798ea9a7342ea9292f47c1d6b Mon Sep 17 00:00:00 2001 From: Krutika Dhananjay Date: Tue, 19 Feb 2013 12:11:57 +0530 Subject: glusterd: Mark vol as deleted by renaming voldir before cleaning up the store PROBLEM: During 'volume delete', when glusterd fails to erase all information about a volume from the backend store (for instance because rmdir() failed on non-empty directories), not only does volume delete fail on that node, but also subsequent attempts to restart glusterd fail because the volume store is left in an inconsistent state. FIX: Rename the volume directory path to a new location /trash/.deleted, and then go on to clean up its contents. The volume is considered deleted once rename() succeeds, irrespective of whether the cleanup succeeds or not. Change-Id: Iaf18e1684f0b101808bd5e1cd53a5d55790541a8 BUG: 889630 Signed-off-by: Krutika Dhananjay Reviewed-on: http://review.gluster.org/4639 Reviewed-by: Amar Tumballi Reviewed-by: Kaushal M Reviewed-by: Jeff Darcy Tested-by: Gluster Build System --- tests/bugs/bug-889630.t | 56 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100755 tests/bugs/bug-889630.t (limited to 'tests') diff --git a/tests/bugs/bug-889630.t b/tests/bugs/bug-889630.t new file mode 100755 index 000000000..b04eb3407 --- /dev/null +++ b/tests/bugs/bug-889630.t @@ -0,0 +1,56 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function volume_count { + local cli=$1; + if [ $cli -eq '1' ] ; then + $CLI_1 volume info | grep 'Volume Name' | wc -l; + else + $CLI_2 volume info | grep 'Volume Name' | wc -l; + fi +} + +cleanup; + +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; + +EXPECT_WITHIN 20 1 check_peers + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +TEST $CLI_1 volume start $V0 + +b="B1"; + +#Create an extra file in the originator's volume store +touch ${!b}/glusterd/vols/$V0/run/file + +TEST $CLI_1 volume stop $V0 +#Test for self-commit failure +TEST $CLI_1 volume delete $V0 + +#Check whether delete succeeded on both the nodes +EXPECT "0" volume_count '1' +EXPECT "0" volume_count '2' + +#Check whether the volume name can be reused after deletion +TEST $CLI_1 volume create $V0 $H1:$B1/${V0}1 $H2:$B2/${V0}1 +TEST $CLI_1 volume start $V0 + +#Create an extra file in the peer's volume store +touch ${!b}/glusterd/vols/$V0/run/file + +TEST $CLI_1 volume stop $V0 +#Test for commit failure on the other node +TEST $CLI_2 volume delete $V0 + +EXPECT "0" volume_count '1'; +EXPECT "0" volume_count '2'; + +cleanup; -- cgit