diff options
author | Atin Mukherjee <amukherj@redhat.com> | 2015-04-20 17:37:21 +0530 |
---|---|---|
committer | Kaushal M <kaushal@redhat.com> | 2015-04-26 21:54:51 -0700 |
commit | 18fd2fdd60839d737ab0ac64f33a444b54bdeee4 (patch) | |
tree | f5bab9a0e010221794615498881a1473d3952ee9 | |
parent | 4c3724f195240e40994b71add255f85ee1b025fb (diff) |
glusterd: initialize snapd svc at volume restore path
In restore path snapd svc was not initialized because of which any glusterd
instance which went down and came back may have uninitialized snapd svc. The
reason I used 'may' is because depending on the nodes in the cluster. In a
single node cluster this wouldn't be a problem since glusterd_spawn_daemon takes
care of initializing it.
Change-Id: I2da1e419a0506d3b2742c1cf39a3b9416eb3c305
BUG: 1213295
Signed-off-by: Atin Mukherjee <amukherj@redhat.com>
Reviewed-on: http://review.gluster.org/10304
Tested-by: Gluster Build System <jenkins@build.gluster.com>
Tested-by: NetBSD Build System
Reviewed-by: Kaushal M <kaushal@redhat.com>
-rw-r--r-- | tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t | 26 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-snapd-svc.c | 11 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-snapshot.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-store.c | 8 |
4 files changed, 35 insertions, 11 deletions
diff --git a/tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t b/tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t new file mode 100644 index 00000000000..1dbfdf8697b --- /dev/null +++ b/tests/bugs/glusterd/bug-1213295-snapd-svc-uninitialized.t @@ -0,0 +1,26 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../cluster.rc + +cleanup + +TEST launch_cluster 2; +TEST $CLI_1 peer probe $H2; +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 +TEST $CLI_1 volume start $V0 + +kill_glusterd 2 +TEST start_glusterd 2 + +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count + +#volume stop should not crash +TEST $CLI_2 volume stop $V0 + +# check whether glusterd instance is running on H2 as this is the node which +# restored the volume configuration after a restart +EXPECT_WITHIN $PROBE_TIMEOUT 1 peer_count +cleanup diff --git a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c index d1a7da7e0ae..7e63929f28e 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapd-svc.c @@ -319,17 +319,6 @@ glusterd_snapdsvc_restart () GF_ASSERT (conf); cds_list_for_each_entry (volinfo, &conf->volumes, vol_list) { - /* Init per volume snapd svc */ - ret = glusterd_snapdsvc_init (volinfo); - if (ret) { - gf_log (this->name, GF_LOG_ERROR, "snapd service " - "initialization failed for volume %s", - volinfo->volname); - goto out; - } - gf_log (this->name, GF_LOG_DEBUG, "snapd service initialized " - "for %s", volinfo->volname); - /* Start per volume snapd svc */ if (volinfo->status == GLUSTERD_STATUS_STARTED && glusterd_is_snapd_enabled (volinfo)) { diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c index 476bfb8e017..7280a12d0cd 100644 --- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c +++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c @@ -6072,6 +6072,7 @@ glusterd_snapshot_clone_commit (dict_t *dict, char **op_errstr, goto out; } ret = glusterd_snapdsvc_init (snap_vol); + glusterd_list_add_order (&snap_vol->vol_list, &priv->volumes, glusterd_compare_volume_name); diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c index fdf3365056b..0beae3e8bbf 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.c +++ b/xlators/mgmt/glusterd/src/glusterd-store.c @@ -2950,6 +2950,14 @@ glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap) if (snap) volinfo->is_snap_volume = _gf_true; + /* Initialize the snapd service */ + ret = glusterd_snapdsvc_init (volinfo); + if (ret) { + gf_log (this->name, GF_LOG_ERROR, "Failed to initialize snapd " + "service for volume %s", volinfo->volname); + goto out; + } + ret = glusterd_store_update_volinfo (volinfo); if (ret) { gf_log (this->name, GF_LOG_ERROR, "Failed to update volinfo " |