From 29bccc2ed18eedc40e83d2f0d35327037a322384 Mon Sep 17 00:00:00 2001 From: Avra Sengupta Date: Wed, 19 Feb 2014 16:30:11 +0530 Subject: gluster: GlusterFS Volume Snapshot Feature This is the initial patch for the Snapshot feature. Current patch includes following features: * Snapshot create * Snapshot delete * Snapshot restore * Snapshot list * Snapshot info * Snapshot status * Snapshot config Change-Id: I2f46920c0d61c515f6a60e0f8b46fff886d9f6a9 BUG: 1061685 Signed-off-by: shishir gowda Signed-off-by: Sachin Pandit Signed-off-by: Vijaikumar M Signed-off-by: Raghavendra Bhat Signed-off-by: Rajesh Joseph Signed-off-by: Joseph Fernandes Signed-off-by: Avra Sengupta Reviewed-on: http://review.gluster.org/7128 Tested-by: Gluster Build System Reviewed-by: Vijay Bellur --- tests/basic/mgmt_v3-locks.t | 121 ++++++++++++++++++++++++++++++++++++++++++ tests/basic/volume-locks.t | 106 ------------------------------------ tests/basic/volume-snapshot.t | 95 +++++++++++++++++++++++++++++++++ 3 files changed, 216 insertions(+), 106 deletions(-) create mode 100644 tests/basic/mgmt_v3-locks.t delete mode 100755 tests/basic/volume-locks.t create mode 100755 tests/basic/volume-snapshot.t (limited to 'tests/basic') diff --git a/tests/basic/mgmt_v3-locks.t b/tests/basic/mgmt_v3-locks.t new file mode 100644 index 000000000..22ca27b9f --- /dev/null +++ b/tests/basic/mgmt_v3-locks.t @@ -0,0 +1,121 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../cluster.rc + +function check_peers { + $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l +} + +function volume_count { + local cli=$1; + if [ $cli -eq '1' ] ; then + $CLI_1 volume info | grep 'Volume Name' | wc -l; + else + $CLI_2 volume info | grep 'Volume Name' | wc -l; + fi +} + +function volinfo_field() +{ + local vol=$1; + local field=$2; + + $CLI_1 volume info $vol | grep "^$field: " | sed 's/.*: //'; +} + +function two_diff_vols_create { + # Both volume creates should be successful + $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 & + PID_1=$! + + $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 $H3:$B3/$V1 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function two_diff_vols_start { + # Both volume starts should be successful + $CLI_1 volume start $V0 & + PID_1=$! + + $CLI_2 volume start $V1 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function two_diff_vols_stop_force { + # Force stop, so that if rebalance from the + # remove bricks is in progress, stop can + # still go ahead. Both volume stops should + # be successful + $CLI_1 volume stop $V0 force & + PID_1=$! + + $CLI_2 volume stop $V1 force & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function same_vol_remove_brick { + + # Running two same vol commands at the same time can result in + # two success', two failures, or one success and one failure, all + # of which are valid. The only thing that shouldn't happen is a + # glusterd crash. + + local vol=$1 + local brick=$2 + $CLI_1 volume remove-brick $1 $2 start & + $CLI_2 volume remove-brick $1 $2 start +} + +cleanup; + +TEST launch_cluster 3; +TEST $CLI_1 peer probe $H2; +TEST $CLI_1 peer probe $H3; + +EXPECT_WITHIN 20 2 check_peers + +two_diff_vols_create +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Created' volinfo_field $V1 'Status'; + +two_diff_vols_start +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +same_vol_remove_brick $V0 $H2:$B2/$V0 +# Checking glusterd crashed or not after same volume remove brick +# on both nodes. +EXPECT_WITHIN 20 2 check_peers + +same_vol_remove_brick $V1 $H2:$B2/$V1 +# Checking glusterd crashed or not after same volume remove brick +# on both nodes. +EXPECT_WITHIN 20 2 check_peers + +$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG & +$CLI_1 volume set $V1 diagnostics.client-log-level DEBUG +kill_glusterd 3 +$CLI_1 volume status $V0 +$CLI_2 volume status $V1 +$CLI_1 peer status +EXPECT_WITHIN 20 1 check_peers +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +TEST $glusterd_3 +$CLI_1 volume status $V0 +$CLI_2 volume status $V1 +$CLI_1 peer status +#EXPECT_WITHIN 20 2 check_peers +#EXPECT 'Started' volinfo_field $V0 'Status'; +#EXPECT 'Started' volinfo_field $V1 'Status'; +#two_diff_vols_stop_force +#EXPECT_WITHIN 20 2 check_peers +cleanup; diff --git a/tests/basic/volume-locks.t b/tests/basic/volume-locks.t deleted file mode 100755 index b9e94b7e1..000000000 --- a/tests/basic/volume-locks.t +++ /dev/null @@ -1,106 +0,0 @@ -#!/bin/bash - -. $(dirname $0)/../include.rc -. $(dirname $0)/../cluster.rc - -function check_peers { - $CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l -} - -function volume_count { - local cli=$1; - if [ $cli -eq '1' ] ; then - $CLI_1 volume info | grep 'Volume Name' | wc -l; - else - $CLI_2 volume info | grep 'Volume Name' | wc -l; - fi -} - -function volinfo_field() -{ - local vol=$1; - local field=$2; - - $CLI_1 volume info $vol | grep "^$field: " | sed 's/.*: //'; -} - -function two_diff_vols_create { - # Both volume creates should be successful - $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 & - $CLI_2 volume create $V1 $H1:$B1/$V1 $H2:$B2/$V1 $H3:$B3/$V1 -} - -function two_diff_vols_start { - # Both volume starts should be successful - $CLI_1 volume start $V0 & - $CLI_2 volume start $V1 -} - -function two_diff_vols_stop_force { - # Force stop, so that if rebalance from the - # remove bricks is in progress, stop can - # still go ahead. Both volume stops should - # be successful - $CLI_1 volume stop $V0 force & - $CLI_2 volume stop $V1 force -} - -function same_vol_remove_brick { - - # Running two same vol commands at the same time can result in - # two success', two failures, or one success and one failure, all - # of which are valid. The only thing that shouldn't happen is a - # glusterd crash. - - local vol=$1 - local brick=$2 - $CLI_1 volume remove-brick $1 $2 start & - $CLI_2 volume remove-brick $1 $2 start -} - -cleanup; - -TEST launch_cluster 3; -TEST $CLI_1 peer probe $H2; -TEST $CLI_1 peer probe $H3; - -EXPECT_WITHIN 20 2 check_peers - -two_diff_vols_create -EXPECT 'Created' volinfo_field $V0 'Status'; -EXPECT 'Created' volinfo_field $V1 'Status'; - -two_diff_vols_start -EXPECT 'Started' volinfo_field $V0 'Status'; -EXPECT 'Started' volinfo_field $V1 'Status'; - -same_vol_remove_brick $V0 $H2:$B2/$V0 -# Checking glusterd crashed or not after same volume remove brick -# on both nodes. -EXPECT_WITHIN 20 2 check_peers - -same_vol_remove_brick $V1 $H2:$B2/$V1 -# Checking glusterd crashed or not after same volume remove brick -# on both nodes. -EXPECT_WITHIN 20 2 check_peers - -$CLI_1 volume set $V0 diagnostics.client-log-level DEBUG & -$CLI_1 volume set $V1 diagnostics.client-log-level DEBUG -kill_glusterd 3 -$CLI_1 volume status $V0 -$CLI_2 volume status $V1 -$CLI_1 peer status -EXPECT_WITHIN 20 1 check_peers -EXPECT 'Started' volinfo_field $V0 'Status'; -EXPECT 'Started' volinfo_field $V1 'Status'; - -TEST $glusterd_3 -$CLI_1 volume status $V0 -$CLI_2 volume status $V1 -$CLI_1 peer status -#EXPECT_WITHIN 20 2 check_peers -#EXPECT 'Started' volinfo_field $V0 'Status'; -#EXPECT 'Started' volinfo_field $V1 'Status'; -#two_diff_vols_stop_force -#EXPECT_WITHIN 20 2 check_peers -cleanup; diff --git a/tests/basic/volume-snapshot.t b/tests/basic/volume-snapshot.t new file mode 100755 index 000000000..c826631ca --- /dev/null +++ b/tests/basic/volume-snapshot.t @@ -0,0 +1,95 @@ +#!/bin/bash + +. $(dirname $0)/../include.rc +. $(dirname $0)/../volume.rc +. $(dirname $0)/../cluster.rc +. $(dirname $0)/../snapshot.rc + +V1="patchy2" + +function create_volumes() { + $CLI_1 volume create $V0 $H1:$L1 & + PID_1=$! + + $CLI_2 volume create $V1 $H2:$L2 $H3:$L3 & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function create_snapshots() { + $CLI_1 snapshot create ${V0}_snap ${V0}& + PID_1=$! + + $CLI_1 snapshot create ${V1}_snap ${V1}& + PID_2=$! + + wait $PID_1 $PID_2 +} + +function delete_snapshots() { + $CLI_1 snapshot delete ${V0}_snap & + PID_1=$! + + $CLI_1 snapshot delete ${V1}_snap & + PID_2=$! + + wait $PID_1 $PID_2 +} + +function restore_snapshots() { + $CLI_1 snapshot restore ${V0}_snap & + PID_1=$! + + $CLI_1 snapshot restore ${V1}_snap & + PID_2=$! + + wait $PID_1 $PID_2 +} +cleanup; + +#Create cluster with 3 nodes +TEST launch_cluster 3; +TEST setup_lvm 3 + +TEST $CLI_1 peer probe $H2; +TEST $CLI_1 peer probe $H3; +EXPECT_WITHIN 20 2 peer_count; + +create_volumes +EXPECT 'Created' volinfo_field $V0 'Status'; +EXPECT 'Created' volinfo_field $V1 'Status'; + +start_volumes 2 +EXPECT 'Started' volinfo_field $V0 'Status'; +EXPECT 'Started' volinfo_field $V1 'Status'; + +#Snapshot Operations +create_snapshots +TEST snapshot_exists 1 ${V0}_snap +TEST snapshot_exists 1 ${V1}_snap +TEST $CLI_1 snapshot config $V0 snap-max-hard-limit 100 +TEST $CLI_1 snapshot config $V1 snap-max-hard-limit 100 + +TEST glusterfs -s $H1 --volfile-id=/snaps/${V0}_snap/${V0} $M0 +sleep 2 +TEST umount -f $M0 +TEST glusterfs -s $H2 --volfile-id=/snaps/${V1}_snap/${V1} $M0 +sleep 2 +TEST umount -f $M0 + +#Clean up +stop_force_volumes 2 +EXPECT 'Stopped' volinfo_field $V0 'Status'; +EXPECT 'Stopped' volinfo_field $V1 'Status'; + +restore_snapshots +TEST ! snapshot_exists 1 ${V0}_snap +TEST ! snapshot_exists 1 ${V1}_snap + +delete_volumes 2 +TEST ! volume_exists $V0 +TEST ! volume_exists $V1 + +cleanup; + -- cgit