diff options
author | Jeff Darcy <jdarcy@redhat.com> | 2017-02-03 10:51:21 -0500 |
---|---|---|
committer | Jeff Darcy <jdarcy@redhat.com> | 2017-02-10 08:16:48 -0500 |
commit | f1c6ae24361b1bf39794a34ea35a0202a6b49fa6 (patch) | |
tree | 92a7072ef677443ee45bde29b2041e3063fb6347 /tests/bugs | |
parent | 421a098d2acfd4b837d4c03ea6f69987c670d3f7 (diff) |
glusterd: keep snapshot bricks separate from regular ones
The problem here is that a volume's transport options can change, but
any snapshots' bricks don't follow along even though they're now
incompatible (with respect to multiplexing). This was causing the
USS+SSL test to fail. By keeping the snapshot bricks separate
(though still potentially multiplexed with other snapshot bricks
including those for other volumes) we can ensure that they remain
unaffected by changes to their parent volumes.
Also fixed various issues with how the test waits (or more precisely
didn't) for various events to complete before it continues.
Change-Id: Iab4a8a44fac5760373fac36956a3bcc27cf969da
BUG: 1385758
Signed-off-by: Jeff Darcy <jdarcy@redhat.com>
Reviewed-on: https://review.gluster.org/16544
Smoke: Gluster Build System <jenkins@build.gluster.org>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
CentOS-regression: Gluster Build System <jenkins@build.gluster.org>
Reviewed-by: Avra Sengupta <asengupt@redhat.com>
Tested-by: Avra Sengupta <asengupt@redhat.com>
Diffstat (limited to 'tests/bugs')
-rwxr-xr-x | tests/bugs/snapshot/bug-1399598-uss-with-ssl.t | 36 |
1 files changed, 23 insertions, 13 deletions
diff --git a/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t index 1c50f746527..7d6252638b5 100755 --- a/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t +++ b/tests/bugs/snapshot/bug-1399598-uss-with-ssl.t @@ -16,6 +16,13 @@ function volume_online_brick_count $CLI volume status $V0 | awk '$1 == "Brick" && $6 != "N/A" { print $6}' | wc -l; } +function total_online_bricks +{ + # This will count snapd, which isn't really a brick, but callers can + # account for that so it's OK. + find $GLUSTERD_WORKDIR -name '*.pid' | wc -l +} + cleanup; # Initialize the test setup @@ -26,15 +33,17 @@ TEST create_self_signed_certs # Start glusterd TEST glusterd TEST pidof glusterd; +#EST $CLI volume set all cluster.brick-multiplex on # Create and start the volume TEST $CLI volume create $V0 $H0:$L1/b1; TEST $CLI volume start $V0; EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" total_online_bricks # Mount the volume and create some files -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0; TEST touch $M0/file; @@ -43,12 +52,13 @@ TEST $CLI snapshot config activate-on-create enable; # Create a snapshot TEST $CLI snapshot create snap1 $V0 no-timestamp; +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "2" total_online_bricks TEST $CLI volume set $V0 features.uss enable; - +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist -EXPECT "Y" file_exists $M0/file +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file # Volume set can trigger graph switch therefore chances are we send this # req to old graph. Old graph will not have .snaps. Therefore we should # wait for some time. @@ -63,14 +73,14 @@ killall_gluster TEST glusterd TEST pidof glusterd; EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist # Mount the volume -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; - -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0; -EXPECT "Y" file_exists $M0/file -EXPECT "Y" file_exists $M0/.snaps/snap1/file +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file EXPECT_WITHIN $UMOUNT_TIMEOUT "Y" force_umount $M0 @@ -82,14 +92,14 @@ killall_gluster TEST glusterd EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" volume_online_brick_count +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "3" total_online_bricks +EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist # Mount the volume -TEST glusterfs --volfile-server=$H0 --volfile-id=$V0 $M0; +TEST $GFS --volfile-server=$H0 --volfile-id=$V0 $M0; -EXPECT_WITHIN $PROCESS_UP_TIMEOUT 'Y' check_if_snapd_exist - -EXPECT "Y" file_exists $M0/file -EXPECT "Y" file_exists $M0/.snaps/snap1/file +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/file +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" file_exists $M0/.snaps/snap1/file TEST $CLI snapshot delete all TEST $CLI volume stop $V0 |