diff options
author | Mohammed Rafi KC <rkavunga@redhat.com> | 2019-02-25 10:05:32 +0530 |
---|---|---|
committer | Amar Tumballi <amarts@redhat.com> | 2019-04-01 03:44:23 +0000 |
commit | bc3694d7cfc868a2ed6344ea123faf19fce28d13 (patch) | |
tree | 51764aa4445462081273444d5ff2499b1e5375f7 /xlators/mgmt/glusterd/src/glusterd-shd-svc.c | |
parent | 92ae26ae8039847e38c738ef98835a14be9d4296 (diff) |
mgmt/shd: Implement multiplexing in self heal daemon
Problem:
Shd daemon is per node, which means they create a graph
with all volumes on it. While this is a great for utilizing
resources, it is so good in terms of performance and managebility.
Because self-heal daemons doesn't have capability to automatically
reconfigure their graphs. So each time when any configurations
changes happens to the volumes(replicate/disperse), we need to restart
shd to bring the changes into the graph.
Because of this all on going heal for all other volumes has to be
stopped in the middle, and need to restart all over again.
Solution:
This changes makes shd as a per volume daemon, so that the graph
will be generated for each volumes.
When we want to start/reconfigure shd for a volume, we first search
for an existing shd running on the node, if there is none, we will
start a new process. If already a daemon is running for shd, then
we will simply detach a graph for a volume and reatach the updated
graph for the volume. This won't touch any of the on going operations
for any other volumes on the shd daemon.
Example of an shd graph when it is per volume
graph
-----------------------
| debug-iostat |
-----------------------
/ | \
/ | \
--------- --------- ----------
| AFR-1 | | AFR-2 | | AFR-3 |
-------- --------- ----------
A running shd daemon with 3 volumes will be like-->
graph
-----------------------
| debug-iostat |
-----------------------
/ | \
/ | \
------------ ------------ ------------
| volume-1 | | volume-2 | | volume-3 |
------------ ------------ ------------
Change-Id: Idcb2698be3eeb95beaac47125565c93370afbd99
fixes: bz#1659708
Signed-off-by: Mohammed Rafi KC <rkavunga@redhat.com>
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-shd-svc.c')
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-shd-svc.c | 540 |
1 files changed, 486 insertions, 54 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c index f5379b0270b..47898434380 100644 --- a/xlators/mgmt/glusterd/src/glusterd-shd-svc.c +++ b/xlators/mgmt/glusterd/src/glusterd-shd-svc.c @@ -13,9 +13,10 @@ #include "glusterd.h" #include "glusterd-utils.h" #include "glusterd-volgen.h" -#include "glusterd-svc-mgmt.h" #include "glusterd-shd-svc.h" +#include "glusterd-shd-svc-helper.h" #include "glusterd-svc-helper.h" +#include "glusterd-store.h" #define GD_SHD_PROCESS_NAME "--process-name" char *shd_svc_name = "glustershd"; @@ -23,27 +24,145 @@ char *shd_svc_name = "glustershd"; void glusterd_shdsvc_build(glusterd_svc_t *svc) { + int ret = -1; + ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name); + if (ret < 0) + return; + + CDS_INIT_LIST_HEAD(&svc->mux_svc); svc->manager = glusterd_shdsvc_manager; svc->start = glusterd_shdsvc_start; - svc->stop = glusterd_svc_stop; + svc->stop = glusterd_shdsvc_stop; + svc->reconfigure = glusterd_shdsvc_reconfigure; } int -glusterd_shdsvc_init(glusterd_svc_t *svc) +glusterd_shdsvc_init(void *data, glusterd_conn_t *mux_conn, + glusterd_svc_proc_t *mux_svc) { - return glusterd_svc_init(svc, shd_svc_name); + int ret = -1; + char rundir[PATH_MAX] = { + 0, + }; + char sockpath[PATH_MAX] = { + 0, + }; + char pidfile[PATH_MAX] = { + 0, + }; + char volfile[PATH_MAX] = { + 0, + }; + char logdir[PATH_MAX] = { + 0, + }; + char logfile[PATH_MAX] = { + 0, + }; + char volfileid[256] = {0}; + glusterd_svc_t *svc = NULL; + glusterd_volinfo_t *volinfo = NULL; + glusterd_conf_t *priv = NULL; + glusterd_muxsvc_conn_notify_t notify = NULL; + xlator_t *this = NULL; + char *volfileserver = NULL; + int32_t len = 0; + + this = THIS; + GF_VALIDATE_OR_GOTO(THIS->name, this, out); + + priv = this->private; + GF_VALIDATE_OR_GOTO(this->name, priv, out); + + volinfo = data; + GF_VALIDATE_OR_GOTO(this->name, data, out); + GF_VALIDATE_OR_GOTO(this->name, mux_svc, out); + + svc = &(volinfo->shd.svc); + + ret = snprintf(svc->name, sizeof(svc->name), "%s", shd_svc_name); + if (ret < 0) + goto out; + + notify = glusterd_muxsvc_common_rpc_notify; + glusterd_store_perform_node_state_store(volinfo); + + GLUSTERD_GET_SHD_RUNDIR(rundir, volinfo, priv); + glusterd_svc_create_rundir(rundir); + + glusterd_svc_build_shd_logdir(logdir, volinfo->volname, sizeof(logdir)); + glusterd_svc_build_shd_logfile(logfile, logdir, sizeof(logfile)); + + /* Initialize the connection mgmt */ + if (mux_conn && mux_svc->rpc) { + /* multiplexed svc */ + svc->conn.frame_timeout = mux_conn->frame_timeout; + /* This will be unrefed from glusterd_shd_svcproc_cleanup*/ + svc->conn.rpc = rpc_clnt_ref(mux_svc->rpc); + ret = snprintf(svc->conn.sockpath, sizeof(svc->conn.sockpath), "%s", + mux_conn->sockpath); + } else { + ret = mkdir_p(logdir, 0755, _gf_true); + if ((ret == -1) && (EEXIST != errno)) { + gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_CREATE_DIR_FAILED, + "Unable to create logdir %s", logdir); + goto out; + } + + glusterd_svc_build_shd_socket_filepath(volinfo, sockpath, + sizeof(sockpath)); + ret = glusterd_muxsvc_conn_init(&(svc->conn), mux_svc, sockpath, 600, + notify); + if (ret) + goto out; + /* This will be unrefed when the last svcs is detached from the list */ + if (!mux_svc->rpc) + mux_svc->rpc = rpc_clnt_ref(svc->conn.rpc); + } + + /* Initialize the process mgmt */ + glusterd_svc_build_shd_pidfile(volinfo, pidfile, sizeof(pidfile)); + glusterd_svc_build_shd_volfile_path(volinfo, volfile, PATH_MAX); + len = snprintf(volfileid, sizeof(volfileid), "shd/%s", volinfo->volname); + if ((len < 0) || (len >= sizeof(volfileid))) { + ret = -1; + goto out; + } + + if (dict_get_strn(this->options, "transport.socket.bind-address", + SLEN("transport.socket.bind-address"), + &volfileserver) != 0) { + volfileserver = "localhost"; + } + ret = glusterd_proc_init(&(svc->proc), shd_svc_name, pidfile, logdir, + logfile, volfile, volfileid, volfileserver); + if (ret) + goto out; + +out: + gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret); + return ret; } -static int -glusterd_shdsvc_create_volfile() +int +glusterd_shdsvc_create_volfile(glusterd_volinfo_t *volinfo) { char filepath[PATH_MAX] = { 0, }; + int ret = -1; - glusterd_conf_t *conf = THIS->private; dict_t *mod_dict = NULL; + glusterd_svc_build_shd_volfile_path(volinfo, filepath, PATH_MAX); + if (!glusterd_is_shd_compatible_volume(volinfo)) { + /* If volfile exist, delete it. This case happens when we + * change from replica/ec to distribute. + */ + (void)glusterd_unlink_file(filepath); + ret = 0; + goto out; + } mod_dict = dict_new(); if (!mod_dict) goto out; @@ -64,9 +183,7 @@ glusterd_shdsvc_create_volfile() if (ret) goto out; - glusterd_svc_build_volfile_path(shd_svc_name, conf->workdir, filepath, - sizeof(filepath)); - ret = glusterd_create_global_volfile(build_shd_graph, filepath, mod_dict); + ret = glusterd_shdsvc_generate_volfile(volinfo, filepath, mod_dict); if (ret) { gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_VOLFILE_CREATE_FAIL, "Failed to create volfile"); @@ -81,26 +198,89 @@ out: return ret; } +gf_boolean_t +glusterd_svcs_shd_compatible_volumes_stopped(glusterd_svc_t *svc) +{ + glusterd_svc_proc_t *svc_proc = NULL; + glusterd_shdsvc_t *shd = NULL; + glusterd_svc_t *temp_svc = NULL; + glusterd_volinfo_t *volinfo = NULL; + gf_boolean_t comp = _gf_false; + glusterd_conf_t *conf = THIS->private; + + GF_VALIDATE_OR_GOTO("glusterd", conf, out); + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + pthread_mutex_lock(&conf->attach_lock); + { + svc_proc = svc->svc_proc; + if (!svc_proc) + goto unlock; + cds_list_for_each_entry(temp_svc, &svc_proc->svcs, mux_svc) + { + /* Get volinfo->shd from svc object */ + shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); + if (!shd) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL, + "Failed to get shd object " + "from shd service"); + goto unlock; + } + + /* Get volinfo from shd */ + volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd); + if (!volinfo) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, + "Failed to get volinfo from " + "from shd"); + goto unlock; + } + if (!glusterd_is_shd_compatible_volume(volinfo)) + continue; + if (volinfo->status == GLUSTERD_STATUS_STARTED) + goto unlock; + } + comp = _gf_true; + } +unlock: + pthread_mutex_unlock(&conf->attach_lock); +out: + return comp; +} + int glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) { - int ret = 0; + int ret = -1; glusterd_volinfo_t *volinfo = NULL; - if (!svc->inited) { - ret = glusterd_shdsvc_init(svc); - if (ret) { - gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC, - "Failed to init shd " - "service"); - goto out; - } else { - svc->inited = _gf_true; - gf_msg_debug(THIS->name, 0, "shd service initialized"); + volinfo = data; + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + GF_VALIDATE_OR_GOTO("glusterd", volinfo, out); + + if (volinfo) + glusterd_volinfo_ref(volinfo); + + ret = glusterd_shdsvc_create_volfile(volinfo); + if (ret) + goto out; + + if (!glusterd_is_shd_compatible_volume(volinfo)) { + ret = 0; + if (svc->inited) { + /* This means glusterd was running for this volume and now + * it was converted to a non-shd volume. So just stop the shd + */ + ret = svc->stop(svc, SIGTERM); } + goto out; } - volinfo = data; + ret = glusterd_shd_svc_mux_init(volinfo, svc); + if (ret) { + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_FAILED_INIT_SHDSVC, + "Failed to init shd service"); + goto out; + } /* If all the volumes are stopped or all shd compatible volumes * are stopped then stop the service if: @@ -110,31 +290,26 @@ glusterd_shdsvc_manager(glusterd_svc_t *svc, void *data, int flags) * - volinfo is NULL or * - volinfo is present and volume is shd compatible */ - if (glusterd_are_all_volumes_stopped() || - glusterd_all_shd_compatible_volumes_stopped()) { - if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) { - ret = svc->stop(svc, SIGTERM); - } - } else { - if (!(volinfo && !glusterd_is_shd_compatible_volume(volinfo))) { - ret = glusterd_shdsvc_create_volfile(); - if (ret) - goto out; - - ret = svc->stop(svc, SIGTERM); - if (ret) - goto out; + if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) { + /* TODO + * Take a lock and detach all svc's to stop the process + * also reset the init flag + */ + ret = svc->stop(svc, SIGTERM); + } else if (volinfo) { + ret = svc->stop(svc, SIGTERM); + if (ret) + goto out; + if (volinfo->status == GLUSTERD_STATUS_STARTED) { ret = svc->start(svc, flags); if (ret) goto out; - - ret = glusterd_conn_connect(&(svc->conn)); - if (ret) - goto out; } } out: + if (volinfo) + glusterd_volinfo_unref(volinfo); if (ret) gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name); gf_msg_debug(THIS->name, 0, "Returning %d", ret); @@ -143,7 +318,7 @@ out: } int -glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) +glusterd_new_shd_svc_start(glusterd_svc_t *svc, int flags) { int ret = -1; char glusterd_uuid_option[PATH_MAX] = {0}; @@ -188,31 +363,136 @@ glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) goto out; ret = glusterd_svc_start(svc, flags, cmdline); + if (ret) + goto out; + ret = glusterd_conn_connect(&(svc->conn)); out: if (cmdline) dict_unref(cmdline); + return ret; +} +int +glusterd_recover_shd_attach_failure(glusterd_volinfo_t *volinfo, + glusterd_svc_t *svc, int flags) +{ + int ret = -1; + glusterd_svc_proc_t *mux_proc = NULL; + glusterd_conf_t *conf = NULL; + + conf = THIS->private; + + if (!conf || !volinfo || !svc) + return -1; + glusterd_shd_svcproc_cleanup(&volinfo->shd); + mux_proc = glusterd_svcprocess_new(); + if (!mux_proc) { + return -1; + } + ret = glusterd_shdsvc_init(volinfo, NULL, mux_proc); + if (ret) + return -1; + pthread_mutex_lock(&conf->attach_lock); + { + cds_list_add_tail(&mux_proc->svc_proc_list, &conf->shd_procs); + svc->svc_proc = mux_proc; + cds_list_del_init(&svc->mux_svc); + cds_list_add_tail(&svc->mux_svc, &mux_proc->svcs); + } + pthread_mutex_unlock(&conf->attach_lock); + + ret = glusterd_new_shd_svc_start(svc, flags); + if (!ret) { + volinfo->shd.attached = _gf_true; + } + return ret; +} + +int +glusterd_shdsvc_start(glusterd_svc_t *svc, int flags) +{ + int ret = -1; + glusterd_shdsvc_t *shd = NULL; + glusterd_volinfo_t *volinfo = NULL; + glusterd_conf_t *conf = NULL; + + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + conf = THIS->private; + GF_VALIDATE_OR_GOTO("glusterd", conf, out); + + /* Get volinfo->shd from svc object */ + shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); + if (!shd) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL, + "Failed to get shd object " + "from shd service"); + return -1; + } + + /* Get volinfo from shd */ + volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd); + if (!volinfo) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, + "Failed to get volinfo from " + "from shd"); + return -1; + } + + if (volinfo->status != GLUSTERD_STATUS_STARTED) + return -1; + + glusterd_volinfo_ref(volinfo); + if (!svc->inited) { + ret = glusterd_shd_svc_mux_init(volinfo, svc); + if (ret) + goto out; + } + + if (shd->attached) { + ret = glusterd_attach_svc(svc, volinfo, flags); + if (ret) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, + "Failed to attach shd svc(volume=%s) to pid=%d. Starting" + "a new process", + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); + ret = glusterd_recover_shd_attach_failure(volinfo, svc, flags); + } + goto out; + } + ret = glusterd_new_shd_svc_start(svc, flags); + if (!ret) { + shd->attached = _gf_true; + } +out: + if (volinfo) + glusterd_volinfo_unref(volinfo); gf_msg_debug(THIS->name, 0, "Returning %d", ret); return ret; } int -glusterd_shdsvc_reconfigure() +glusterd_shdsvc_reconfigure(glusterd_volinfo_t *volinfo) { int ret = -1; xlator_t *this = NULL; - glusterd_conf_t *priv = NULL; gf_boolean_t identical = _gf_false; + dict_t *mod_dict = NULL; + glusterd_svc_t *svc = NULL; this = THIS; GF_VALIDATE_OR_GOTO("glusterd", this, out); - priv = this->private; - GF_VALIDATE_OR_GOTO(this->name, priv, out); + if (!volinfo) { + /* reconfigure will be called separately*/ + ret = 0; + goto out; + } - if (glusterd_all_shd_compatible_volumes_stopped()) + glusterd_volinfo_ref(volinfo); + svc = &(volinfo->shd.svc); + if (glusterd_svcs_shd_compatible_volumes_stopped(svc)) goto manager; /* @@ -220,8 +500,42 @@ glusterd_shdsvc_reconfigure() * and cksum i.e. "character-by-character". If YES, then * NOTHING has been changed, just return. */ - ret = glusterd_svc_check_volfile_identical(priv->shd_svc.name, - build_shd_graph, &identical); + + if (!glusterd_is_shd_compatible_volume(volinfo)) { + if (svc->inited) + goto manager; + + /* Nothing to do if not shd compatible */ + ret = 0; + goto out; + } + mod_dict = dict_new(); + if (!mod_dict) + goto out; + + ret = dict_set_uint32(mod_dict, "cluster.background-self-heal-count", 0); + if (ret) + goto out; + + ret = dict_set_str(mod_dict, "cluster.data-self-heal", "on"); + if (ret) + goto out; + + ret = dict_set_str(mod_dict, "cluster.metadata-self-heal", "on"); + if (ret) + goto out; + + ret = dict_set_int32(mod_dict, "graph-check", 1); + if (ret) + goto out; + + ret = dict_set_str(mod_dict, "cluster.entry-self-heal", "on"); + if (ret) + goto out; + + ret = glusterd_volume_svc_check_volfile_identical( + "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile, + &identical); if (ret) goto out; @@ -236,8 +550,9 @@ glusterd_shdsvc_reconfigure() * changed, then inform the xlator to reconfigure the options. */ identical = _gf_false; /* RESET the FLAG */ - ret = glusterd_svc_check_topology_identical(priv->shd_svc.name, - build_shd_graph, &identical); + ret = glusterd_volume_svc_check_topology_identical( + "glustershd", mod_dict, volinfo, glusterd_shdsvc_generate_volfile, + &identical); if (ret) goto out; @@ -245,7 +560,7 @@ glusterd_shdsvc_reconfigure() * options to shd volfile, so that shd will be reconfigured. */ if (identical) { - ret = glusterd_shdsvc_create_volfile(); + ret = glusterd_shdsvc_create_volfile(volinfo); if (ret == 0) { /* Only if above PASSES */ ret = glusterd_fetchspec_notify(THIS); } @@ -253,12 +568,129 @@ glusterd_shdsvc_reconfigure() } manager: /* - * shd volfile's topology has been changed. shd server needs - * to be RESTARTED to ACT on the changed volfile. + * shd volfile's topology has been changed. volfile needs + * to be RECONFIGURED to ACT on the changed volfile. */ - ret = priv->shd_svc.manager(&(priv->shd_svc), NULL, PROC_START_NO_WAIT); + ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT); out: + if (volinfo) + glusterd_volinfo_unref(volinfo); + if (mod_dict) + dict_unref(mod_dict); gf_msg_debug(this ? this->name : "glusterd", 0, "Returning %d", ret); return ret; } + +int +glusterd_shdsvc_restart() +{ + glusterd_volinfo_t *volinfo = NULL; + glusterd_volinfo_t *tmp = NULL; + int ret = -1; + xlator_t *this = THIS; + glusterd_conf_t *conf = NULL; + glusterd_svc_t *svc = NULL; + + GF_VALIDATE_OR_GOTO("glusterd", this, out); + + conf = this->private; + GF_VALIDATE_OR_GOTO(this->name, conf, out); + + pthread_mutex_lock(&conf->volume_lock); + cds_list_for_each_entry_safe(volinfo, tmp, &conf->volumes, vol_list) + { + glusterd_volinfo_ref(volinfo); + pthread_mutex_unlock(&conf->volume_lock); + /* Start per volume shd svc */ + if (volinfo->status == GLUSTERD_STATUS_STARTED) { + svc = &(volinfo->shd.svc); + ret = svc->manager(svc, volinfo, PROC_START_NO_WAIT); + if (ret) { + gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_SHD_START_FAIL, + "Couldn't start shd for " + "vol: %s on restart", + volinfo->volname); + gf_event(EVENT_SVC_MANAGER_FAILED, "volume=%s;svc_name=%s", + volinfo->volname, svc->name); + glusterd_volinfo_unref(volinfo); + goto out; + } + } + glusterd_volinfo_unref(volinfo); + pthread_mutex_lock(&conf->volume_lock); + } + pthread_mutex_unlock(&conf->volume_lock); +out: + return ret; +} + +int +glusterd_shdsvc_stop(glusterd_svc_t *svc, int sig) +{ + int ret = -1; + glusterd_svc_proc_t *svc_proc = NULL; + glusterd_shdsvc_t *shd = NULL; + glusterd_volinfo_t *volinfo = NULL; + gf_boolean_t empty = _gf_false; + glusterd_conf_t *conf = NULL; + int pid = -1; + + conf = THIS->private; + GF_VALIDATE_OR_GOTO("glusterd", svc, out); + svc_proc = svc->svc_proc; + GF_VALIDATE_OR_GOTO("glusterd", svc_proc, out); + GF_VALIDATE_OR_GOTO("glusterd", conf, out); + + /* Get volinfo->shd from svc object */ + shd = cds_list_entry(svc, glusterd_shdsvc_t, svc); + if (!shd) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_SHD_OBJ_GET_FAIL, + "Failed to get shd object " + "from shd service"); + return -1; + } + + /* Get volinfo from shd */ + volinfo = cds_list_entry(shd, glusterd_volinfo_t, shd); + if (!volinfo) { + gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_VOLINFO_GET_FAIL, + "Failed to get volinfo from " + "from shd"); + return -1; + } + + glusterd_volinfo_ref(volinfo); + pthread_mutex_lock(&conf->attach_lock); + { + gf_is_service_running(svc->proc.pidfile, &pid); + cds_list_del_init(&svc->mux_svc); + empty = cds_list_empty(&svc_proc->svcs); + } + pthread_mutex_unlock(&conf->attach_lock); + if (empty) { + /* Unref will happen when destroying the connection */ + glusterd_volinfo_ref(volinfo); + svc_proc->data = volinfo; + ret = glusterd_svc_stop(svc, sig); + } + if (!empty && pid != -1) { + ret = glusterd_detach_svc(svc, volinfo, sig); + if (ret) + gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL, + "shd service is failed to detach volume %s from pid %d", + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); + else + gf_msg(THIS->name, GF_LOG_INFO, 0, GD_MSG_SVC_STOP_SUCCESS, + "Shd service is detached for volume %s from pid %d", + volinfo->volname, glusterd_proc_get_pid(&svc->proc)); + } + svc->online = _gf_false; + (void)glusterd_unlink_file((char *)svc->proc.pidfile); + glusterd_shd_svcproc_cleanup(shd); + ret = 0; + glusterd_volinfo_unref(volinfo); +out: + gf_msg_debug(THIS->name, 0, "Returning %d", ret); + return ret; +} |