diff options
Diffstat (limited to 'libglusterfs/src/graph.c')
| -rw-r--r-- | libglusterfs/src/graph.c | 704 |
1 files changed, 663 insertions, 41 deletions
diff --git a/libglusterfs/src/graph.c b/libglusterfs/src/graph.c index 2a213d2c48e..13f298eb3bd 100644 --- a/libglusterfs/src/graph.c +++ b/libglusterfs/src/graph.c @@ -8,16 +8,32 @@ cases as published by the Free Software Foundation. */ -#include "xlator.h" -#include <dlfcn.h> -#include <netdb.h> -#include <fnmatch.h> -#include <stdlib.h> -#include "defaults.h" -#include <unistd.h> -#include "syscall.h" -#include <regex.h> -#include "libglusterfs-messages.h" +#include <stdint.h> // for uint32_t +#include <sys/time.h> // for timeval +#include <errno.h> // for EIO, errno, EINVAL, ENOMEM +#include <fnmatch.h> // for fnmatch, FNM_NOESCAPE +#include <openssl/sha.h> // for SHA256_DIGEST_LENGTH +#include <regex.h> // for regmatch_t, regcomp +#include <stdio.h> // for fclose, fopen, snprintf +#include <stdlib.h> // for NULL, atoi, mkstemp +#include <string.h> // for strcmp, strerror, memcpy +#include <strings.h> // for rindex +#include <sys/stat.h> // for stat +#include <sys/time.h> // for gettimeofday +#include <unistd.h> // for gethostname, getpid +#include "glusterfs/common-utils.h" // for gf_strncpy, gf_time_fmt +#include "glusterfs/defaults.h" +#include "glusterfs/dict.h" // for dict_foreach, dict_set_... +#include "glusterfs/globals.h" // for xlator_t, xlator_list_t +#include "glusterfs/glusterfs.h" // for glusterfs_graph_t, glus... +#include "glusterfs/glusterfs-fops.h" // for GF_EVENT_GRAPH_NEW, GF_... +#include "glusterfs/libglusterfs-messages.h" // for LG_MSG_GRAPH_ERROR, LG_... +#include "glusterfs/list.h" // for list_add, list_del_init +#include "glusterfs/logging.h" // for gf_msg, GF_LOG_ERROR +#include "glusterfs/mem-pool.h" // for GF_FREE, gf_strdup, GF_... +#include "glusterfs/mem-types.h" // for gf_common_mt_xlator_list_t +#include "glusterfs/options.h" // for xlator_tree_reconfigure +#include "glusterfs/syscall.h" // for sys_close, sys_stat #if 0 static void @@ -25,7 +41,7 @@ _gf_dump_details (int argc, char **argv) { extern FILE *gf_log_logfile; int i = 0; - char timestr[64]; + char timestr[GF_TIMESTR_SIZE]; time_t utime = 0; pid_t mypid = 0; struct utsname uname_buf = {{0, }, }; @@ -114,6 +130,53 @@ out: return cert_depth; } +xlator_t * +glusterfs_get_last_xlator(glusterfs_graph_t *graph) +{ + xlator_t *trav = graph->first; + if (!trav) + return NULL; + + while (trav->next) + trav = trav->next; + + return trav; +} + +xlator_t * +glusterfs_mux_xlator_unlink(xlator_t *pxl, xlator_t *cxl) +{ + xlator_list_t *unlink = NULL; + xlator_list_t *prev = NULL; + xlator_list_t **tmp = NULL; + xlator_t *next_child = NULL; + xlator_t *xl = NULL; + + for (tmp = &pxl->children; *tmp; tmp = &(*tmp)->next) { + if ((*tmp)->xlator == cxl) { + unlink = *tmp; + *tmp = (*tmp)->next; + if (*tmp) + next_child = (*tmp)->xlator; + break; + } + prev = *tmp; + } + + if (!prev) + xl = pxl; + else if (prev->xlator) + xl = prev->xlator->graph->last_xl; + + if (xl) + xl->next = next_child; + if (next_child) + next_child->prev = xl; + + GF_FREE(unlink); + return next_child; +} + int glusterfs_xlator_link(xlator_t *pxl, xlator_t *cxl) { @@ -181,7 +244,7 @@ glusterfs_graph_insert(glusterfs_graph_t *graph, glusterfs_ctx_t *ctx, ixl->ctx = ctx; ixl->graph = graph; - ixl->options = get_new_dict(); + ixl->options = dict_new(); if (!ixl->options) goto err; @@ -393,7 +456,7 @@ _log_if_unknown_option(dict_t *dict, char *key, data_t *value, void *data) found = xlator_volume_option_get(xl, key); if (!found) { - gf_msg(xl->name, GF_LOG_WARNING, 0, LG_MSG_XLATOR_OPTION_INVALID, + gf_msg(xl->name, GF_LOG_DEBUG, 0, LG_MSG_XLATOR_OPTION_INVALID, "option '%s' is not recognized", key); } @@ -406,39 +469,29 @@ _xlator_check_unknown_options(xlator_t *xl, void *data) dict_foreach(xl->options, _log_if_unknown_option, xl); } -int +static int glusterfs_graph_unknown_options(glusterfs_graph_t *graph) { xlator_foreach(graph->first, _xlator_check_unknown_options, NULL); return 0; } -void -fill_uuid(char *uuid, int size) +static void +fill_uuid(char *uuid, int size, struct timeval tv) { - char hostname[256] = { - 0, - }; - struct timeval tv = { + char hostname[50] = { 0, }; - char now_str[64]; - - if (gettimeofday(&tv, NULL) == -1) { - gf_msg("graph", GF_LOG_ERROR, errno, LG_MSG_GETTIMEOFDAY_FAILED, - "gettimeofday: " - "failed"); - } + char now_str[GF_TIMESTR_SIZE]; - if (gethostname(hostname, 256) == -1) { + if (gethostname(hostname, sizeof(hostname) - 1) != 0) { gf_msg("graph", GF_LOG_ERROR, errno, LG_MSG_GETHOSTNAME_FAILED, - "gethostname: " - "failed"); + "gethostname failed"); + hostname[sizeof(hostname) - 1] = '\0'; } - gf_time_fmt(now_str, sizeof now_str, tv.tv_sec, gf_timefmt_dirent); - snprintf(uuid, size, "%s-%d-%s:%" GF_PRI_SUSECONDS, hostname, getpid(), - now_str, tv.tv_usec); + gf_time_fmt_tv(now_str, sizeof now_str, &tv, gf_timefmt_dirent); + snprintf(uuid, size, "%s-%d-%s", hostname, getpid(), now_str); return; } @@ -514,14 +567,13 @@ glusterfs_graph_prepare(glusterfs_graph_t *graph, glusterfs_ctx_t *ctx, } else { ret = glusterfs_graph_settop(graph, volume_name, _gf_false); } - if (!ret) { - goto ok; - } - gf_msg("graph", GF_LOG_ERROR, 0, LG_MSG_GRAPH_ERROR, - "glusterfs graph settop failed"); - return -1; -ok: + if (ret) { + gf_msg("graph", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_ERROR, + "glusterfs graph settop failed"); + errno = EINVAL; + return -1; + } /* XXX: WORM VOLUME */ ret = glusterfs_graph_worm(graph, ctx); @@ -569,7 +621,7 @@ ok: /* XXX: DOB setting */ gettimeofday(&graph->dob, NULL); - fill_uuid(graph->graph_uuid, 128); + fill_uuid(graph->graph_uuid, sizeof(graph->graph_uuid), graph->dob); graph->id = ctx->graph_id++; @@ -1092,6 +1144,8 @@ glusterfs_graph_destroy_residual(glusterfs_graph_t *graph) ret = xlator_tree_free_memacct(graph->first); list_del_init(&graph->list); + pthread_mutex_destroy(&graph->mutex); + pthread_cond_destroy(&graph->child_down_cond); GF_FREE(graph); return ret; @@ -1134,6 +1188,33 @@ out: } int +glusterfs_graph_fini(glusterfs_graph_t *graph) +{ + xlator_t *trav = NULL; + + trav = graph->first; + + while (trav) { + if (trav->init_succeeded) { + trav->cleanup_starting = 1; + trav->fini(trav); + if (trav->local_pool) { + mem_pool_destroy(trav->local_pool); + trav->local_pool = NULL; + } + if (trav->itable) { + inode_table_destroy(trav->itable); + trav->itable = NULL; + } + trav->init_succeeded = 0; + } + trav = trav->next; + } + + return 0; +} + +int glusterfs_graph_attach(glusterfs_graph_t *orig_graph, char *path, glusterfs_graph_t **newgraph) { @@ -1256,3 +1337,544 @@ glusterfs_graph_attach(glusterfs_graph_t *orig_graph, char *path, return 0; } +int +glusterfs_muxsvc_cleanup_parent(glusterfs_ctx_t *ctx, + glusterfs_graph_t *parent_graph) +{ + if (parent_graph) { + if (parent_graph->first) { + xlator_destroy(parent_graph->first); + } + ctx->active = NULL; + GF_FREE(parent_graph); + parent_graph = NULL; + } + return 0; +} + +void * +glusterfs_graph_cleanup(void *arg) +{ + glusterfs_graph_t *graph = NULL; + glusterfs_ctx_t *ctx = THIS->ctx; + int ret = -1; + graph = arg; + + if (!graph) + return NULL; + + /* To destroy the graph, fitst sent a GF_EVENT_PARENT_DOWN + * Then wait for GF_EVENT_CHILD_DOWN to get on the top + * xl. Once we have GF_EVENT_CHILD_DOWN event, then proceed + * to fini. + * + * During fini call, this will take a last unref on rpc and + * rpc_transport_object. + */ + if (graph->first) + default_notify(graph->first, GF_EVENT_PARENT_DOWN, graph->first); + + ret = pthread_mutex_lock(&graph->mutex); + if (ret != 0) { + gf_msg("glusterfs", GF_LOG_ERROR, EAGAIN, LG_MSG_GRAPH_CLEANUP_FAILED, + "Failed to acquire a lock"); + goto out; + } + /* check and wait for CHILD_DOWN for top xlator*/ + while (graph->used) { + ret = pthread_cond_wait(&graph->child_down_cond, &graph->mutex); + if (ret != 0) + gf_msg("glusterfs", GF_LOG_INFO, 0, LG_MSG_GRAPH_CLEANUP_FAILED, + "cond wait failed "); + } + + ret = pthread_mutex_unlock(&graph->mutex); + if (ret != 0) { + gf_msg("glusterfs", GF_LOG_ERROR, EAGAIN, LG_MSG_GRAPH_CLEANUP_FAILED, + "Failed to release a lock"); + } + + /* Though we got a child down on top xlator, we have to wait until + * all the notifier to exit. Because there should not be any threads + * that access xl variables. + */ + pthread_mutex_lock(&ctx->notify_lock); + { + while (ctx->notifying) + pthread_cond_wait(&ctx->notify_cond, &ctx->notify_lock); + } + pthread_mutex_unlock(&ctx->notify_lock); + + pthread_mutex_lock(&ctx->cleanup_lock); + { + glusterfs_graph_fini(graph); + glusterfs_graph_destroy(graph); + } + pthread_mutex_unlock(&ctx->cleanup_lock); +out: + return NULL; +} + +glusterfs_graph_t * +glusterfs_muxsvc_setup_parent_graph(glusterfs_ctx_t *ctx, char *name, + char *type) +{ + glusterfs_graph_t *parent_graph = NULL; + xlator_t *ixl = NULL; + int ret = -1; + parent_graph = GF_CALLOC(1, sizeof(*parent_graph), + gf_common_mt_glusterfs_graph_t); + if (!parent_graph) + goto out; + + INIT_LIST_HEAD(&parent_graph->list); + + ctx->active = parent_graph; + ixl = GF_CALLOC(1, sizeof(*ixl), gf_common_mt_xlator_t); + if (!ixl) + goto out; + + ixl->ctx = ctx; + ixl->graph = parent_graph; + ixl->options = dict_new(); + if (!ixl->options) + goto out; + + ixl->name = gf_strdup(name); + if (!ixl->name) + goto out; + + ixl->is_autoloaded = 1; + + if (xlator_set_type(ixl, type) == -1) { + gf_msg("glusterfs", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, + "%s (%s) set type failed", name, type); + goto out; + } + + glusterfs_graph_set_first(parent_graph, ixl); + parent_graph->top = ixl; + ixl = NULL; + + gettimeofday(&parent_graph->dob, NULL); + fill_uuid(parent_graph->graph_uuid, 128, parent_graph->dob); + parent_graph->id = ctx->graph_id++; + ret = 0; +out: + if (ixl) + xlator_destroy(ixl); + + if (ret) { + glusterfs_muxsvc_cleanup_parent(ctx, parent_graph); + parent_graph = NULL; + } + return parent_graph; +} + +int +glusterfs_svc_mux_pidfile_cleanup(gf_volfile_t *volfile_obj) +{ + if (!volfile_obj || !volfile_obj->pidfp) + return 0; + + gf_msg_trace("glusterfsd", 0, "pidfile %s cleanup", volfile_obj->vol_id); + + lockf(fileno(volfile_obj->pidfp), F_ULOCK, 0); + fclose(volfile_obj->pidfp); + volfile_obj->pidfp = NULL; + + return 0; +} + +int +glusterfs_process_svc_detach(glusterfs_ctx_t *ctx, gf_volfile_t *volfile_obj) +{ + xlator_t *last_xl = NULL; + glusterfs_graph_t *graph = NULL; + glusterfs_graph_t *parent_graph = NULL; + pthread_t clean_graph = { + 0, + }; + int ret = -1; + xlator_t *xl = NULL; + + if (!ctx || !ctx->active || !volfile_obj) + goto out; + + pthread_mutex_lock(&ctx->cleanup_lock); + { + parent_graph = ctx->active; + graph = volfile_obj->graph; + if (!graph) + goto unlock; + if (graph->first) + xl = graph->first; + + last_xl = graph->last_xl; + if (last_xl) + last_xl->next = NULL; + if (!xl || xl->cleanup_starting) + goto unlock; + + xl->cleanup_starting = 1; + gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_DETACH_STARTED, + "detaching child %s", volfile_obj->vol_id); + + list_del_init(&volfile_obj->volfile_list); + glusterfs_mux_xlator_unlink(parent_graph->top, xl); + glusterfs_svc_mux_pidfile_cleanup(volfile_obj); + parent_graph->last_xl = glusterfs_get_last_xlator(parent_graph); + parent_graph->xl_count -= graph->xl_count; + parent_graph->leaf_count -= graph->leaf_count; + parent_graph->id++; + ret = 0; + } +unlock: + pthread_mutex_unlock(&ctx->cleanup_lock); +out: + if (!ret) { + list_del_init(&volfile_obj->volfile_list); + if (graph) { + ret = gf_thread_create_detached( + &clean_graph, glusterfs_graph_cleanup, graph, "graph_clean"); + if (ret) { + gf_msg("glusterfs", GF_LOG_ERROR, EINVAL, + LG_MSG_GRAPH_CLEANUP_FAILED, + "%s failed to create clean " + "up thread", + volfile_obj->vol_id); + ret = 0; + } + } + GF_FREE(volfile_obj); + } + return ret; +} + +int +glusterfs_svc_mux_pidfile_setup(gf_volfile_t *volfile_obj, const char *pid_file) +{ + int ret = -1; + FILE *pidfp = NULL; + + if (!pid_file || !volfile_obj) + goto out; + + if (volfile_obj->pidfp) { + ret = 0; + goto out; + } + pidfp = fopen(pid_file, "a+"); + if (!pidfp) { + goto out; + } + volfile_obj->pidfp = pidfp; + + ret = lockf(fileno(pidfp), F_TLOCK, 0); + if (ret) { + ret = 0; + goto out; + } +out: + return ret; +} + +int +glusterfs_svc_mux_pidfile_update(gf_volfile_t *volfile_obj, + const char *pid_file, pid_t pid) +{ + int ret = 0; + FILE *pidfp = NULL; + int old_pid; + + if (!volfile_obj->pidfp) { + ret = glusterfs_svc_mux_pidfile_setup(volfile_obj, pid_file); + if (ret == -1) + goto out; + } + pidfp = volfile_obj->pidfp; + ret = fscanf(pidfp, "%d", &old_pid); + if (ret <= 0) { + goto update; + } + if (old_pid == pid) { + ret = 0; + goto out; + } else { + gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, + "Old pid=%d found in pidfile %s. Cleaning the old pid and " + "Updating new pid=%d", + old_pid, pid_file, pid); + } +update: + ret = sys_ftruncate(fileno(pidfp), 0); + if (ret) { + gf_msg("glusterfsd", GF_LOG_ERROR, errno, + LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, + "pidfile %s truncation failed", pid_file); + goto out; + } + + ret = fprintf(pidfp, "%d\n", pid); + if (ret <= 0) { + gf_msg("glusterfsd", GF_LOG_ERROR, errno, + LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed", + pid_file); + goto out; + } + + ret = fflush(pidfp); + if (ret) { + gf_msg("glusterfsd", GF_LOG_ERROR, errno, + LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, "pidfile %s write failed", + pid_file); + goto out; + } +out: + return ret; +} + +int +glusterfs_update_mux_pid(dict_t *dict, gf_volfile_t *volfile_obj) +{ + char *file = NULL; + int ret = -1; + + GF_VALIDATE_OR_GOTO("graph", dict, out); + GF_VALIDATE_OR_GOTO("graph", volfile_obj, out); + + ret = dict_get_str(dict, "pidfile", &file); + if (ret < 0) { + gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, + "Failed to get pidfile from dict for volfile_id=%s", + volfile_obj->vol_id); + } + + ret = glusterfs_svc_mux_pidfile_update(volfile_obj, file, getpid()); + if (ret < 0) { + ret = -1; + gf_msg("mgmt", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_SETUP_FAILED, + "Failed to update " + "the pidfile for volfile_id=%s", + volfile_obj->vol_id); + + goto out; + } + + if (ret == 1) + gf_msg("mgmt", GF_LOG_INFO, 0, LG_MSG_GRAPH_ATTACH_PID_FILE_UPDATED, + "PID %d updated in pidfile=%s", getpid(), file); + ret = 0; +out: + return ret; +} +int +glusterfs_process_svc_attach_volfp(glusterfs_ctx_t *ctx, FILE *fp, + char *volfile_id, char *checksum, + dict_t *dict) +{ + glusterfs_graph_t *graph = NULL; + glusterfs_graph_t *parent_graph = NULL; + glusterfs_graph_t *clean_graph = NULL; + int ret = -1; + xlator_t *xl = NULL; + xlator_t *last_xl = NULL; + gf_volfile_t *volfile_obj = NULL; + pthread_t thread_id = { + 0, + }; + + if (!ctx) + goto out; + parent_graph = ctx->active; + graph = glusterfs_graph_construct(fp); + if (!graph) { + gf_msg("glusterfsd", GF_LOG_ERROR, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, + "failed to construct the graph"); + goto out; + } + graph->parent_down = 0; + graph->last_xl = glusterfs_get_last_xlator(graph); + + for (xl = graph->first; xl; xl = xl->next) { + if (strcmp(xl->type, "mount/fuse") == 0) { + gf_msg("glusterfsd", GF_LOG_ERROR, EINVAL, + LG_MSG_GRAPH_ATTACH_FAILED, + "fuse xlator cannot be specified in volume file"); + goto out; + } + } + + graph->leaf_count = glusterfs_count_leaves(glusterfs_root(graph)); + xl = graph->first; + /* TODO memory leaks everywhere need to free graph in case of error */ + if (glusterfs_graph_prepare(graph, ctx, xl->name)) { + gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, + "failed to prepare graph for xlator %s", xl->name); + ret = -1; + goto out; + } else if (glusterfs_graph_init(graph)) { + gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, + "failed to initialize graph for xlator %s", xl->name); + ret = -1; + goto out; + } else if (glusterfs_graph_parent_up(graph)) { + gf_msg("glusterfsd", GF_LOG_WARNING, EINVAL, LG_MSG_GRAPH_ATTACH_FAILED, + "failed to link the graphs for xlator %s ", xl->name); + ret = -1; + goto out; + } + + if (!parent_graph) { + parent_graph = glusterfs_muxsvc_setup_parent_graph(ctx, "glustershd", + "debug/io-stats"); + if (!parent_graph) + goto out; + ((xlator_t *)parent_graph->top)->next = xl; + clean_graph = parent_graph; + } else { + last_xl = parent_graph->last_xl; + if (last_xl) + last_xl->next = xl; + xl->prev = last_xl; + } + parent_graph->last_xl = graph->last_xl; + + ret = glusterfs_xlator_link(parent_graph->top, xl); + if (ret) { + gf_msg("graph", GF_LOG_ERROR, 0, LG_MSG_EVENT_NOTIFY_FAILED, + "parent up notification failed"); + goto out; + } + parent_graph->xl_count += graph->xl_count; + parent_graph->leaf_count += graph->leaf_count; + parent_graph->id++; + + volfile_obj = GF_CALLOC(1, sizeof(gf_volfile_t), gf_common_volfile_t); + if (!volfile_obj) { + ret = -1; + goto out; + } + volfile_obj->pidfp = NULL; + snprintf(volfile_obj->vol_id, sizeof(volfile_obj->vol_id), "%s", + volfile_id); + + if (strcmp(ctx->cmd_args.process_name, "glustershd") == 0) { + ret = glusterfs_update_mux_pid(dict, volfile_obj); + if (ret == -1) { + GF_FREE(volfile_obj); + goto out; + } + } + + graph->used = 1; + parent_graph->id++; + list_add(&graph->list, &ctx->graphs); + INIT_LIST_HEAD(&volfile_obj->volfile_list); + volfile_obj->graph = graph; + memcpy(volfile_obj->volfile_checksum, checksum, + sizeof(volfile_obj->volfile_checksum)); + list_add_tail(&volfile_obj->volfile_list, &ctx->volfile_list); + gf_log_dump_graph(fp, graph); + graph = NULL; + + ret = 0; +out: + if (ret) { + if (graph) { + gluster_graph_take_reference(graph->first); + ret = gf_thread_create_detached(&thread_id, glusterfs_graph_cleanup, + graph, "graph_clean"); + if (ret) { + gf_msg("glusterfs", GF_LOG_ERROR, EINVAL, + LG_MSG_GRAPH_CLEANUP_FAILED, + "%s failed to create clean " + "up thread", + volfile_id); + ret = 0; + } + } + if (clean_graph) + glusterfs_muxsvc_cleanup_parent(ctx, clean_graph); + } + return ret; +} + +int +glusterfs_mux_volfile_reconfigure(FILE *newvolfile_fp, glusterfs_ctx_t *ctx, + gf_volfile_t *volfile_obj, char *checksum, + dict_t *dict) +{ + glusterfs_graph_t *oldvolfile_graph = NULL; + glusterfs_graph_t *newvolfile_graph = NULL; + char vol_id[NAME_MAX + 1]; + + int ret = -1; + + if (!ctx) { + gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, 0, LG_MSG_CTX_NULL, + "ctx is NULL"); + goto out; + } + + /* Change the message id */ + if (!volfile_obj) { + gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, 0, LG_MSG_CTX_NULL, + "failed to get volfile object"); + goto out; + } + + oldvolfile_graph = volfile_obj->graph; + if (!oldvolfile_graph) { + goto out; + } + + newvolfile_graph = glusterfs_graph_construct(newvolfile_fp); + + if (!newvolfile_graph) { + goto out; + } + newvolfile_graph->last_xl = glusterfs_get_last_xlator(newvolfile_graph); + + glusterfs_graph_prepare(newvolfile_graph, ctx, newvolfile_graph->first); + + if (!is_graph_topology_equal(oldvolfile_graph, newvolfile_graph)) { + ret = snprintf(vol_id, sizeof(vol_id), "%s", volfile_obj->vol_id); + if (ret < 0) + goto out; + ret = glusterfs_process_svc_detach(ctx, volfile_obj); + if (ret) { + gf_msg("glusterfsd-mgmt", GF_LOG_ERROR, EINVAL, + LG_MSG_GRAPH_CLEANUP_FAILED, + "Could not detach " + "old graph. Aborting the reconfiguration operation"); + goto out; + } + volfile_obj = NULL; + ret = glusterfs_process_svc_attach_volfp(ctx, newvolfile_fp, vol_id, + checksum, dict); + goto out; + } + + gf_msg_debug("glusterfsd-mgmt", 0, + "Only options have changed in the" + " new graph"); + + ret = glusterfs_graph_reconfigure(oldvolfile_graph, newvolfile_graph); + if (ret) { + gf_msg_debug("glusterfsd-mgmt", 0, + "Could not reconfigure " + "new options in old graph"); + goto out; + } + memcpy(volfile_obj->volfile_checksum, checksum, + sizeof(volfile_obj->volfile_checksum)); + + ret = 0; +out: + + if (newvolfile_graph) + glusterfs_graph_destroy(newvolfile_graph); + + return ret; +} |
