summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-utils.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-utils.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c137
1 files changed, 116 insertions, 21 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index e95bf9665c9..3570b018eeb 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -61,6 +61,7 @@
#include "glusterd-server-quorum.h"
#include <glusterfs/quota-common-utils.h>
#include <glusterfs/common-utils.h>
+#include "glusterd-shd-svc-helper.h"
#include "xdr-generic.h"
#include <sys/resource.h>
@@ -580,13 +581,17 @@ glusterd_volinfo_t *
glusterd_volinfo_unref(glusterd_volinfo_t *volinfo)
{
int refcnt = -1;
+ glusterd_conf_t *conf = THIS->private;
- pthread_mutex_lock(&volinfo->reflock);
+ pthread_mutex_lock(&conf->volume_lock);
{
- refcnt = --volinfo->refcnt;
+ pthread_mutex_lock(&volinfo->reflock);
+ {
+ refcnt = --volinfo->refcnt;
+ }
+ pthread_mutex_unlock(&volinfo->reflock);
}
- pthread_mutex_unlock(&volinfo->reflock);
-
+ pthread_mutex_unlock(&conf->volume_lock);
if (!refcnt) {
glusterd_volinfo_delete(volinfo);
return NULL;
@@ -658,6 +663,7 @@ glusterd_volinfo_new(glusterd_volinfo_t **volinfo)
glusterd_snapdsvc_build(&new_volinfo->snapd.svc);
glusterd_tierdsvc_build(&new_volinfo->tierd.svc);
glusterd_gfproxydsvc_build(&new_volinfo->gfproxyd.svc);
+ glusterd_shdsvc_build(&new_volinfo->shd.svc);
pthread_mutex_init(&new_volinfo->reflock, NULL);
*volinfo = glusterd_volinfo_ref(new_volinfo);
@@ -1023,11 +1029,11 @@ glusterd_volinfo_delete(glusterd_volinfo_t *volinfo)
gf_store_handle_destroy(volinfo->snapd.handle);
glusterd_auth_cleanup(volinfo);
+ glusterd_shd_svcproc_cleanup(&volinfo->shd);
pthread_mutex_destroy(&volinfo->reflock);
GF_FREE(volinfo);
ret = 0;
-
out:
gf_msg_debug(THIS->name, 0, "Returning %d", ret);
return ret;
@@ -3553,6 +3559,7 @@ glusterd_spawn_daemons(void *opaque)
ret = glusterd_snapdsvc_restart();
ret = glusterd_tierdsvc_restart();
ret = glusterd_gfproxydsvc_restart();
+ ret = glusterd_shdsvc_restart();
return ret;
}
@@ -4503,6 +4510,9 @@ glusterd_delete_stale_volume(glusterd_volinfo_t *stale_volinfo,
svc = &(stale_volinfo->snapd.svc);
(void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
}
+ svc = &(stale_volinfo->shd.svc);
+ (void)svc->manager(svc, stale_volinfo, PROC_START_NO_WAIT);
+
(void)glusterd_volinfo_remove(stale_volinfo);
return 0;
@@ -4617,6 +4627,15 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
glusterd_volinfo_unref(old_volinfo);
}
+ ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
+ if (ret) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
+ "Failed to store "
+ "volinfo for volume %s",
+ new_volinfo->volname);
+ goto out;
+ }
+
if (glusterd_is_volume_started(new_volinfo)) {
(void)glusterd_start_bricks(new_volinfo);
if (glusterd_is_snapd_enabled(new_volinfo)) {
@@ -4625,15 +4644,10 @@ glusterd_import_friend_volume(dict_t *peer_data, int count)
gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
}
}
- }
-
- ret = glusterd_store_volinfo(new_volinfo, GLUSTERD_VOLINFO_VER_AC_NONE);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_VOLINFO_STORE_FAIL,
- "Failed to store "
- "volinfo for volume %s",
- new_volinfo->volname);
- goto out;
+ svc = &(new_volinfo->shd.svc);
+ if (svc->manager(svc, new_volinfo, PROC_START_NO_WAIT)) {
+ gf_event(EVENT_SVC_MANAGER_FAILED, "svc_name=%s", svc->name);
+ }
}
ret = glusterd_create_volfiles_and_notify_services(new_volinfo);
@@ -5108,9 +5122,7 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
glusterd_svc_build_pidfile_path(server, priv->rundir, pidfile,
sizeof(pidfile));
- if (strcmp(server, priv->shd_svc.name) == 0)
- svc = &(priv->shd_svc);
- else if (strcmp(server, priv->nfs_svc.name) == 0)
+ if (strcmp(server, priv->nfs_svc.name) == 0)
svc = &(priv->nfs_svc);
else if (strcmp(server, priv->quotad_svc.name) == 0)
svc = &(priv->quotad_svc);
@@ -5141,9 +5153,6 @@ glusterd_add_node_to_dict(char *server, dict_t *dict, int count,
if (!strcmp(server, priv->nfs_svc.name))
ret = dict_set_nstrn(dict, key, keylen, "NFS Server",
SLEN("NFS Server"));
- else if (!strcmp(server, priv->shd_svc.name))
- ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
- SLEN("Self-heal Daemon"));
else if (!strcmp(server, priv->quotad_svc.name))
ret = dict_set_nstrn(dict, key, keylen, "Quota Daemon",
SLEN("Quota Daemon"));
@@ -8709,6 +8718,21 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
"to stop snapd daemon service");
}
}
+
+ if (glusterd_is_shd_compatible_volume(volinfo)) {
+ /*
+ * Sending stop request for all volumes. So it is fine
+ * to send stop for mux shd
+ */
+ svc = &(volinfo->shd.svc);
+ ret = svc->stop(svc, SIGTERM);
+ if (ret) {
+ gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
+ "Failed "
+ "to stop shd daemon service");
+ }
+ }
+
if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
svc = &(volinfo->tierd.svc);
ret = svc->stop(svc, SIGTERM);
@@ -8734,7 +8758,7 @@ glusterd_friend_remove_cleanup_vols(uuid_t uuid)
}
/* Reconfigure all daemon services upon peer detach */
- ret = glusterd_svcs_reconfigure();
+ ret = glusterd_svcs_reconfigure(NULL);
if (ret) {
gf_msg(THIS->name, GF_LOG_ERROR, 0, GD_MSG_SVC_STOP_FAIL,
"Failed to reconfigure all daemon services.");
@@ -14286,3 +14310,74 @@ glusterd_is_profile_on(glusterd_volinfo_t *volinfo)
return _gf_true;
return _gf_false;
}
+
+int32_t
+glusterd_add_shd_to_dict(glusterd_volinfo_t *volinfo, dict_t *dict,
+ int32_t count)
+{
+ int ret = -1;
+ int32_t pid = -1;
+ int32_t brick_online = -1;
+ char key[64] = {0};
+ int keylen;
+ char *pidfile = NULL;
+ xlator_t *this = NULL;
+ char *uuid_str = NULL;
+
+ this = THIS;
+ GF_VALIDATE_OR_GOTO(THIS->name, this, out);
+
+ GF_VALIDATE_OR_GOTO(this->name, volinfo, out);
+ GF_VALIDATE_OR_GOTO(this->name, dict, out);
+
+ keylen = snprintf(key, sizeof(key), "brick%d.hostname", count);
+ ret = dict_set_nstrn(dict, key, keylen, "Self-heal Daemon",
+ SLEN("Self-heal Daemon"));
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "brick%d.path", count);
+ uuid_str = gf_strdup(uuid_utoa(MY_UUID));
+ if (!uuid_str) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstrn(dict, key, keylen, uuid_str);
+ if (ret)
+ goto out;
+ uuid_str = NULL;
+
+ /* shd doesn't have a port. but the cli needs a port key with
+ * a zero value to parse.
+ * */
+
+ keylen = snprintf(key, sizeof(key), "brick%d.port", count);
+ ret = dict_set_int32n(dict, key, keylen, 0);
+ if (ret)
+ goto out;
+
+ pidfile = volinfo->shd.svc.proc.pidfile;
+
+ brick_online = gf_is_service_running(pidfile, &pid);
+
+ /* If shd is not running, then don't print the pid */
+ if (!brick_online)
+ pid = -1;
+ keylen = snprintf(key, sizeof(key), "brick%d.pid", count);
+ ret = dict_set_int32n(dict, key, keylen, pid);
+ if (ret)
+ goto out;
+
+ keylen = snprintf(key, sizeof(key), "brick%d.status", count);
+ ret = dict_set_int32n(dict, key, keylen, brick_online);
+
+out:
+ if (uuid_str)
+ GF_FREE(uuid_str);
+ if (ret)
+ gf_msg(this ? this->name : "glusterd", GF_LOG_ERROR, 0,
+ GD_MSG_DICT_SET_FAILED,
+ "Returning %d. adding values to dict failed", ret);
+
+ return ret;
+}