summaryrefslogtreecommitdiffstats
path: root/cli/src/cli-cmd-snapshot.c
diff options
context:
space:
mode:
Diffstat (limited to 'cli/src/cli-cmd-snapshot.c')
0 files changed, 0 insertions, 0 deletions
/tr> -rw-r--r--xlators/mgmt/glusterd/src/glusterd-mem-types.h2
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c224
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c985
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.h12
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.c148
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-op-sm.h5
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-rpc-ops.c96
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-snapshot.c6421
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.c1922
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-store.h103
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.c114
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-syncop.h6
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.c1123
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-utils.h62
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.c277
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volgen.h10
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c74
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.c134
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h209
-rw-r--r--xlators/protocol/server/src/server-helpers.c58
-rw-r--r--xlators/protocol/server/src/server.c4
-rw-r--r--xlators/protocol/server/src/server.h4
26 files changed, 8318 insertions, 4620 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index 259b82a81..71d076624 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -338,7 +338,9 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo,
char *volume_id_str = NULL;
struct args_pack pack = {0,};
xlator_t *this = NULL;
+#ifdef HAVE_BD_XLATOR
int caps = 0;
+#endif
GF_ASSERT (volinfo);
GF_ASSERT (volumes);
@@ -611,7 +613,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
uuid_generate (*txn_id);
ret = dict_set_bin (dict, "transaction_id",
- txn_id, sizeof (uuid_t));
+ txn_id, sizeof(*txn_id));
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Failed to set transaction id.");
@@ -640,7 +642,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
goto out;
}
- /* Based on the op_version, acquire a cluster or volume lock */
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
ret = glusterd_lock (MY_UUID);
if (ret) {
@@ -669,7 +671,7 @@ glusterd_op_txn_begin (rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
goto out;
}
- ret = glusterd_volume_lock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol");
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire lock for %s", volname);
@@ -695,14 +697,14 @@ local_locking_done:
}
/* Save opinfo for this transaction with the transaction id */
- txn_op_info.op = op;
- txn_op_info.op_ctx = ctx;
- txn_op_info.req = req;
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, ctx, req);
ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to set transaction's opinfo");
+ if (ctx)
+ dict_unref (ctx);
goto out;
}
@@ -716,11 +718,12 @@ local_locking_done:
out:
if (locked && ret) {
/* Based on the op-version, we release the
- * cluster or volume lock */
+ * cluster or mgmt_v3 lock */
if (priv->op_version < 3)
glusterd_unlock (MY_UUID);
else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
+ "vol");
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s",
@@ -739,13 +742,15 @@ out:
int
__glusterd_handle_cluster_lock (rpcsvc_request_t *req)
{
- gd1_mgmt_cluster_lock_req lock_req = {{0},};
- int32_t ret = -1;
- glusterd_op_lock_ctx_t *ctx = NULL;
- glusterd_peerinfo_t *peerinfo = NULL;
- xlator_t *this = NULL;
- uuid_t *txn_id = &global_txn_id;
+ dict_t *op_ctx = NULL;
+ int32_t ret = -1;
+ gd1_mgmt_cluster_lock_req lock_req = {{0},};
+ glusterd_op_lock_ctx_t *ctx = NULL;
+ glusterd_op_t op = GD_OP_EVENT_LOCK;
+ glusterd_peerinfo_t *peerinfo = NULL;
glusterd_op_info_t txn_op_info = {{0},};
+ uuid_t *txn_id = &global_txn_id;
+ xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
@@ -782,20 +787,20 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
ctx->req = req;
ctx->dict = NULL;
- txn_op_info.op = GD_OP_EVENT_LOCK;
- txn_op_info.op_ctx = dict_new ();
- if (!txn_op_info.op_ctx) {
+ op_ctx = dict_new ();
+ if (!op_ctx) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to set new dict");
goto out;
}
- txn_op_info.req = req;
+
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &op, op_ctx, req);
ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to set transaction's opinfo");
- dict_destroy (txn_op_info.op_ctx);
+ dict_unref (txn_op_info.op_ctx);
goto out;
}
@@ -880,6 +885,7 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
xlator_t *this = NULL;
uuid_t *txn_id = &global_txn_id;
glusterd_op_info_t txn_op_info = {{0},};
+ glusterd_op_sm_state_info_t state;
this = THIS;
GF_ASSERT (this);
@@ -917,19 +923,18 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
* phase where the transaction opinfos are created, won't be called. */
ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to get transaction's opinfo");
+ gf_log (this->name, GF_LOG_DEBUG,
+ "No transaction's opinfo set");
- txn_op_info.op = op_req.op;
- txn_op_info.state.state = GD_OP_STATE_LOCKED;
- txn_op_info.op_ctx = req_ctx->dict;
- txn_op_info.req = req;
+ state.state = GD_OP_STATE_LOCKED;
+ glusterd_txn_opinfo_init (&txn_op_info, &state,
+ &op_req.op, req_ctx->dict, req);
ret = glusterd_set_txn_opinfo (txn_id, &txn_op_info);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to set transaction's opinfo");
- dict_destroy (req_ctx->dict);
+ dict_unref (req_ctx->dict);
goto out;
}
}
@@ -2049,11 +2054,11 @@ glusterd_op_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
}
int
-glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
- int32_t status)
+glusterd_op_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
{
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
int ret = -1;
GF_ASSERT (req);
@@ -2065,20 +2070,20 @@ glusterd_op_volume_lock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
uuid_copy (rsp.txn_id, *txn_id);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume lock, ret: %d",
+ gf_log (THIS->name, GF_LOG_DEBUG, "Responded to mgmt_v3 lock, ret: %d",
ret);
return ret;
}
int
-glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
- int32_t status)
+glusterd_op_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
+ int32_t status)
{
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
int ret = -1;
GF_ASSERT (req);
@@ -2090,9 +2095,10 @@ glusterd_op_volume_unlock_send_resp (rpcsvc_request_t *req, uuid_t *txn_id,
uuid_copy (rsp.txn_id, *txn_id);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG, "Responded to volume unlock, ret: %d",
+ gf_log (THIS->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 unlock, ret: %d",
ret);
return ret;
@@ -3859,8 +3865,12 @@ get_brickinfo_from_brickid (char *brickid, glusterd_brickinfo_t **brickinfo)
uuid_parse (volid_str, volid);
ret = glusterd_volinfo_find_by_volume_id (volid, &volinfo);
- if (ret)
- goto out;
+ if (ret) {
+ /* Check if it a snapshot volume */
+ ret = glusterd_snap_volinfo_find_by_volume_id (volid, &volinfo);
+ if (ret)
+ goto out;
+ }
ret = glusterd_volume_brickinfo_get_by_brick (brick, volinfo,
brickinfo);
@@ -4034,6 +4044,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_peerctx_t *peerctx = NULL;
gf_boolean_t quorum_action = _gf_false;
+ glusterd_volinfo_t *volinfo = NULL;
peerctx = mydata;
if (!peerctx)
@@ -4061,6 +4072,20 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
gf_log (this->name, GF_LOG_DEBUG, "got RPC_CLNT_DISCONNECT %d",
peerinfo->state.state);
+ if (peerinfo->connected) {
+ list_for_each_entry (volinfo, &conf->volumes, vol_list) {
+ ret = glusterd_mgmt_v3_unlock (volinfo->volname,
+ peerinfo->uuid,
+ "vol");
+ if (ret)
+ gf_log (this->name, GF_LOG_TRACE,
+ "Lock not released for %s",
+ volinfo->volname);
+ }
+
+ ret = 0;
+ }
+
if ((peerinfo->quorum_contrib != QUORUM_DOWN) &&
(peerinfo->state.state == GD_FRIEND_STATE_BEFRIENDED)) {
peerinfo->quorum_contrib = QUORUM_DOWN;
@@ -4176,8 +4201,6 @@ rpcsvc_actor_t gd_svc_cli_actors[] = {
[GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME", GLUSTER_CLI_STATEDUMP_VOLUME, glusterd_handle_cli_statedump_volume, NULL, 0, DRC_NA},
[GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME, glusterd_handle_cli_list_volume, NULL, 0, DRC_NA},
[GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME", GLUSTER_CLI_CLRLOCKS_VOLUME, glusterd_handle_cli_clearlocks_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
[GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE, glusterd_handle_copy_file, NULL, 0, DRC_NA},
[GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC, glusterd_handle_sys_exec, NULL, 0, DRC_NA},
[GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot, NULL, 0, DRC_NA},
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index 714695bc1..0f0357c4c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -38,25 +38,139 @@ extern struct rpc_clnt_program gd_mgmt_v3_prog;
typedef ssize_t (*gfs_serialize_t) (struct iovec outmsg, void *data);
+static int
+get_snap_volname_and_volinfo (const char *volpath, char **volname,
+ glusterd_volinfo_t **volinfo)
+{
+ int ret = -1;
+ char *save_ptr = NULL;
+ char *str_token = NULL;
+ char *snapname = NULL;
+ char *volname_token = NULL;
+ char *vol = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (volpath);
+ GF_ASSERT (volinfo);
+
+ str_token = gf_strdup (volpath);
+ if (NULL == str_token) {
+ goto out;
+ }
+
+ /* Input volname will have below formats:
+ * /snaps/<snapname>/<volname>.<hostname>
+ * or
+ * /snaps/<snapname>/<parent-volname>
+ * We need to extract snapname and parent_volname */
+
+ /*split string by "/" */
+ strtok_r (str_token, "/", &save_ptr);
+ snapname = strtok_r(NULL, "/", &save_ptr);
+ if (!snapname) {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid path: %s", volpath);
+ goto out;
+ }
+
+ volname_token = strtok_r(NULL, "/", &save_ptr);
+ if (!volname_token) {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid path: %s", volpath);
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to "
+ "fetch snap %s", snapname);
+ goto out;
+ }
+
+ /* Find if its a parent volume name or snap volume
+ * name. This function will succeed if volname_token
+ * is a parent volname
+ */
+ ret = glusterd_volinfo_find (volname_token, volinfo);
+ if (ret) {
+ *volname = gf_strdup (volname_token);
+ if (NULL == *volname) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_snap_volinfo_find (volname_token, snap,
+ volinfo);
+ if (ret) {
+ /* Split the volume name */
+ vol = strtok_r (volname_token, ".", &save_ptr);
+ if (!vol) {
+ gf_log(this->name, GF_LOG_ERROR, "Invalid "
+ "volname (%s)", volname_token);
+ goto out;
+ }
+
+ ret = glusterd_snap_volinfo_find (vol, snap, volinfo);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to "
+ "fetch snap volume from volname (%s)",
+ vol);
+ goto out;
+ }
+ }
+ } else {
+ /*volname_token is parent volname*/
+ ret = glusterd_snap_volinfo_find_from_parent_volname (
+ volname_token, snap, volinfo);
+ if (ret) {
+ gf_log(this->name, GF_LOG_ERROR, "Failed to "
+ "fetch snap volume from parent "
+ "volname (%s)", volname_token);
+ goto out;
+ }
+
+ /* Since volname_token is a parent volname we should
+ * get the snap volname here*/
+ *volname = gf_strdup ((*volinfo)->volname);
+ if (NULL == *volname) {
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ if (ret && NULL != *volname) {
+ GF_FREE (*volname);
+ *volname = NULL;
+ }
+ return ret;
+}
+
static size_t
build_volfile_path (const char *volname, char *path,
size_t path_len, char *trusted_str)
{
- struct stat stbuf = {0,};
- int32_t ret = -1;
- glusterd_conf_t *priv = NULL;
- char *vol = NULL;
- char *dup_volname = NULL;
- char *free_ptr = NULL;
- char *tmp = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char *server = NULL;
- gf_boolean_t snap_volume = _gf_false;
- char *str = NULL;
- char *tmp_volname = NULL;
- char *input_vol = NULL;
-
- priv = THIS->private;
+ struct stat stbuf = {0,};
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ char *vol = NULL;
+ char *dup_volname = NULL;
+ char *free_ptr = NULL;
+ char *save_ptr = NULL;
+ char *str_token = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *server = NULL;
+ const char *volname_ptr = NULL;
+ char path_prefix [PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (volname);
+ GF_ASSERT (path);
if (strstr (volname, "gluster/")) {
server = strchr (volname, '/') + 1;
@@ -64,17 +178,22 @@ build_volfile_path (const char *volname, char *path,
path, path_len);
ret = 1;
goto out;
- } else if (strstr (volname, "/snaps/")) {
- input_vol = gf_strdup (volname);
- snap_volume = _gf_true;
- str = strrchr (volname, '/');
- if (str)
- str++;
- dup_volname = gf_strdup (str);
- str = NULL;
- tmp_volname = strtok_r (input_vol, "/", &str);
- if (!tmp_volname)
+ } else if ((str_token = strstr (volname, "/snaps/"))) {
+ ret = get_snap_volname_and_volinfo (str_token, &dup_volname,
+ &volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get snap"
+ " volinfo from path (%s)", volname);
+ ret = -1;
goto out;
+ }
+
+ snprintf (path_prefix, sizeof (path_prefix), "%s/snaps/%s",
+ priv->workdir, volinfo->snapshot->snapname);
+
+ free_ptr = dup_volname;
+ volname_ptr = dup_volname;
+ goto gotvolinfo;
} else if (volname[0] != '/') {
/* Normal behavior */
dup_volname = gf_strdup (volname);
@@ -85,64 +204,58 @@ build_volfile_path (const char *volname, char *path,
dup_volname = gf_strdup (&volname[1]);
}
+ if (!dup_volname) {
+ gf_log(THIS->name, GF_LOG_ERROR, "strdup failed");
+ ret = -1;
+ goto out;
+ }
free_ptr = dup_volname;
+ volname_ptr = volname;
+
+ snprintf (path_prefix, sizeof (path_prefix), "%s/vols",
+ priv->workdir);
ret = glusterd_volinfo_find (dup_volname, &volinfo);
+
if (ret) {
/* Split the volume name */
- vol = strtok_r (dup_volname, ".", &tmp);
+ vol = strtok_r (dup_volname, ".", &save_ptr);
if (!vol)
goto out;
+
ret = glusterd_volinfo_find (vol, &volinfo);
if (ret)
goto out;
}
+gotvolinfo:
if (!glusterd_auth_get_username (volinfo))
trusted_str = NULL;
- if (!snap_volume)
- ret = snprintf (path, path_len, "%s/vols/%s/%s.vol",
- priv->workdir, volinfo->volname, volname);
- else
- ret = snprintf (path, path_len, "%s/vols/%s/snaps/%s/%s.vol",
- priv->workdir, tmp_volname, volinfo->volname,
- volname);
+ ret = snprintf (path, path_len, "%s/%s/%s.vol", path_prefix,
+ volinfo->volname, volname_ptr);
if (ret == -1)
goto out;
ret = stat (path, &stbuf);
if ((ret == -1) && (errno == ENOENT)) {
- if (!snap_volume)
- snprintf (path, path_len, "%s/vols/%s/%s%s-fuse.vol",
- priv->workdir, volinfo->volname,
- (trusted_str ? trusted_str : ""),
- dup_volname);
- else
- snprintf (path, path_len,
- "%s/vols/%s/snaps/%s/%s%s-fuse.vol",
- priv->workdir, tmp_volname, volinfo->volname,
- (trusted_str ? trusted_str:""), dup_volname);
+ snprintf (path, path_len, "%s/%s/%s%s-fuse.vol",
+ path_prefix, volinfo->volname,
+ (trusted_str ? trusted_str : ""),
+ dup_volname);
ret = stat (path, &stbuf);
}
if ((ret == -1) && (errno == ENOENT)) {
- if (!snap_volume)
- snprintf (path, path_len, "%s/vols/%s/%s-tcp.vol",
- priv->workdir, volinfo->volname, volname);
- else
- snprintf (path, path_len,
- "%s/vols/%s/snaps/%s/%s-tcp.vol",
- priv->workdir, tmp_volname, volinfo->volname,
- volname);
+ snprintf (path, path_len, "%s/%s/%s-tcp.vol",
+ path_prefix, volinfo->volname, volname_ptr);
}
ret = 1;
out:
GF_FREE (free_ptr);
- GF_FREE (input_vol);
return ret;
}
@@ -1054,12 +1167,6 @@ glusterd_set_clnt_mgmt_program (glusterd_peerinfo_t *peerinfo,
ret = 0;
}
- if ((gd_mgmt_v3_prog.prognum == trav->prognum) &&
- (gd_mgmt_v3_prog.progver == trav->progver)) {
- peerinfo->mgmt_v3 = &gd_mgmt_v3_prog;
- ret = 0;
- }
-
if (ret) {
gf_log ("", GF_LOG_DEBUG,
"%s (%"PRId64":%"PRId64") not supported",
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.c b/xlators/mgmt/glusterd/src/glusterd-locks.c
index f0658da3a..0af2a186f 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.c
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.c
@@ -26,17 +26,47 @@
#include <signal.h>
-static dict_t *vol_lock;
+#define GF_MAX_LOCKING_ENTITIES 2
+
+/* Valid entities that the mgmt_v3 lock can hold locks upon *
+ * To add newer entities to be locked, we can just add more *
+ * entries to this table along with the type and default value */
+valid_entities valid_types[] = {
+ { "vol", _gf_true },
+ { "snap", _gf_false },
+ { NULL },
+};
+
+static dict_t *mgmt_v3_lock;
+
+/* Checks if the lock request is for a valid entity */
+gf_boolean_t
+glusterd_mgmt_v3_is_type_valid (char *type)
+{
+ int32_t i = 0;
+ gf_boolean_t ret = _gf_false;
+
+ GF_ASSERT (type);
+
+ for (i = 0; valid_types[i].type; i++) {
+ if (!strcmp (type, valid_types[i].type)) {
+ ret = _gf_true;
+ break;
+ }
+ }
+
+ return ret;
+}
-/* Initialize the global vol-lock list(dict) when
+/* Initialize the global mgmt_v3 lock list(dict) when
* glusterd is spawned */
int32_t
-glusterd_vol_lock_init ()
+glusterd_mgmt_v3_lock_init ()
{
int32_t ret = -1;
- vol_lock = dict_new ();
- if (!vol_lock) {
+ mgmt_v3_lock = dict_new ();
+ if (!mgmt_v3_lock) {
ret = -1;
goto out;
}
@@ -46,29 +76,33 @@ out:
return ret;
}
-/* Destroy the global vol-lock list(dict) when
+/* Destroy the global mgmt_v3 lock list(dict) when
* glusterd cleanup is performed */
void
-glusterd_vol_lock_fini ()
+glusterd_mgmt_v3_lock_fini ()
{
- if (vol_lock)
- dict_destroy (vol_lock);
+ if (mgmt_v3_lock)
+ dict_destroy (mgmt_v3_lock);
}
int32_t
-glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid)
+glusterd_get_mgmt_v3_lock_owner (char *key, uuid_t *uuid)
{
- int32_t ret = -1;
- vol_lock_obj *lock_obj = NULL;
- uuid_t no_owner = {"\0"};
+ int32_t ret = -1;
+ mgmt_v3_lock_obj *lock_obj = NULL;
+ uuid_t no_owner = {"\0"};
+ xlator_t *this = NULL;
- if (!volname || !uuid) {
- gf_log ("", GF_LOG_ERROR, "volname or uuid is null.");
+ GF_ASSERT(THIS);
+ this = THIS;
+
+ if (!key || !uuid) {
+ gf_log (this->name, GF_LOG_ERROR, "key or uuid is null.");
ret = -1;
goto out;
}
- ret = dict_get_bin(vol_lock, volname, (void **) &lock_obj);
+ ret = dict_get_bin(mgmt_v3_lock, key, (void **) &lock_obj);
if (!ret)
uuid_copy (*uuid, lock_obj->lock_owner);
else
@@ -76,167 +110,430 @@ glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid)
ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
-int32_t
-glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid)
+/* This function is called with the locked_count and type, to *
+ * release all the acquired locks. */
+static int32_t
+glusterd_release_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
+ int32_t locked_count,
+ char *type)
{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t i = -1;
+ int32_t op_ret = 0;
int32_t ret = -1;
- int32_t op_ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ if (locked_count == 0) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "No %s locked as part of this transaction",
+ type);
+ goto out;
+ }
+
+ /* Release all the locks held */
+ for (i = 0; i < locked_count; i++) {
+ snprintf (name_buf, sizeof(name_buf),
+ "%sname%d", type, i+1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get %s locked_count = %d",
+ name_buf, locked_count);
+ op_ret = ret;
+ continue;
+ }
+
+ ret = glusterd_mgmt_v3_unlock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release lock for %s.",
+ name);
+ op_ret = ret;
+ }
+ }
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", op_ret);
+ return op_ret;
+}
+
+/* Given the count and type of the entity this function acquires *
+ * locks on multiple elements of the same entity. For example: *
+ * If type is "vol" this function tries to acquire locks on multiple *
+ * volumes */
+static int32_t
+glusterd_acquire_multiple_locks_per_entity (dict_t *dict, uuid_t uuid,
+ int32_t count, char *type)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
int32_t i = -1;
- int32_t volcount = -1;
- char volname_buf[PATH_MAX] = "";
- char *volname = NULL;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ /* Locking one element after other */
+ for (i = 0; i < count; i++) {
+ snprintf (name_buf, sizeof(name_buf),
+ "%sname%d", type, i+1);
+
+ /* Looking for volname1, volname2 or snapname1, *
+ * as key in the dict snapname2 */
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get %s count = %d",
+ name_buf, count);
+ break;
+ }
- if (!dict) {
- gf_log ("", GF_LOG_ERROR, "dict is null.");
- ret = -1;
+ ret = glusterd_mgmt_v3_lock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s. Reversing "
+ "this transaction", type, name,
+ uuid_utoa(uuid));
+ break;
+ }
+ locked_count++;
+ }
+
+ if (count == locked_count) {
+ /* If all locking ops went successfuly, return as success */
+ ret = 0;
goto out;
}
- ret = dict_get_int32 (dict, "volcount", &volcount);
+ /* If we failed to lock one element, unlock others and return failure */
+ ret = glusterd_release_multiple_locks_per_entity (dict, uuid,
+ locked_count,
+ type);
if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Failed to get volcount"
- "name");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release multiple %s locks",
+ type);
+ }
+ ret = -1;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Given the type of entity, this function figures out if it should unlock a *
+ * single element of multiple elements of the said entity. For example: *
+ * if the type is "vol", this function will accordingly unlock a single volume *
+ * or multiple volumes */
+static int32_t
+glusterd_mgmt_v3_unlock_entity (dict_t *dict, uuid_t uuid, char *type,
+ gf_boolean_t default_value)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
+
+ if (hold_locks == _gf_false) {
+ /* Locks were not held for this particular entity *
+ * Hence nothing to release */
+ ret = 0;
goto out;
}
- /* Unlocking one volume after other */
- for (i = 1; i <= volcount; i++) {
- ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
- "volname%d", i);
- volname_buf[ret] = '\0';
+ /* Looking for volcount or snapcount in the dict */
+ snprintf (name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32 (dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be unlocked */
+ snprintf (name_buf, sizeof(name_buf), "%sname",
+ type);
+ ret = dict_get_str (dict, name_buf, &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch %sname", type);
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_unlock (name, uuid, type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release lock for %s %s "
+ "on behalf of %s.", type, name,
+ uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Unlocking one element name after another */
+ ret = glusterd_release_multiple_locks_per_entity (dict,
+ uuid,
+ count,
+ type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release all %s locks", type);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Given the type of entity, this function figures out if it should lock a *
+ * single element or multiple elements of the said entity. For example: *
+ * if the type is "vol", this function will accordingly lock a single volume *
+ * or multiple volumes */
+static int32_t
+glusterd_mgmt_v3_lock_entity (dict_t *dict, uuid_t uuid, char *type,
+ gf_boolean_t default_value)
+{
+ char name_buf[PATH_MAX] = "";
+ char *name = NULL;
+ int32_t count = -1;
+ int32_t ret = -1;
+ gf_boolean_t hold_locks = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT (dict);
+ GF_ASSERT (type);
+
+ snprintf (name_buf, sizeof(name_buf), "hold_%s_locks", type);
+ hold_locks = dict_get_str_boolean (dict, name_buf, default_value);
- ret = dict_get_str (dict, volname_buf, &volname);
+ if (hold_locks == _gf_false) {
+ /* Not holding locks for this particular entity */
+ ret = 0;
+ goto out;
+ }
+
+ /* Looking for volcount or snapcount in the dict */
+ snprintf (name_buf, sizeof(name_buf), "%scount", type);
+ ret = dict_get_int32 (dict, name_buf, &count);
+ if (ret) {
+ /* count is not present. Only one *
+ * element name needs to be locked */
+ snprintf (name_buf, sizeof(name_buf), "%sname",
+ type);
+ ret = dict_get_str (dict, name_buf, &name);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
- volname_buf, volcount);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch %sname", type);
goto out;
}
- ret = glusterd_volume_unlock (volname, uuid);
+ ret = glusterd_mgmt_v3_lock (name, uuid, type);
if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to release lock for %s. ", volname);
- op_ret = ret;
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire lock for %s %s "
+ "on behalf of %s.", type, name,
+ uuid_utoa(uuid));
+ goto out;
+ }
+ } else {
+ /* Locking one element name after another */
+ ret = glusterd_acquire_multiple_locks_per_entity (dict,
+ uuid,
+ count,
+ type);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire all %s locks", type);
+ goto out;
}
}
- ret = op_ret;
+ ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
+/* Try to release locks of multiple entities like *
+ * volume, snaps etc. */
int32_t
-glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid)
+glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid)
{
- int32_t ret = -1;
int32_t i = -1;
- int32_t volcount = -1;
- char volname_buf[PATH_MAX] = "";
- char *volname = NULL;
- int32_t locked_volcount = 0;
+ int32_t ret = -1;
+ int32_t op_ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
if (!dict) {
- gf_log ("", GF_LOG_ERROR, "dict is null.");
+ gf_log (this->name, GF_LOG_ERROR, "dict is null.");
ret = -1;
goto out;
}
- ret = dict_get_int32 (dict, "volcount", &volcount);
- if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Failed to get volcount"
- "name");
- goto out;
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to unlock all %s",
+ valid_types[i].type);
+ op_ret = ret;
+ }
}
- /* Locking one volume after other */
- for (i = 1; i <= volcount; i++) {
- ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
- "volname%d", i);
- volname_buf[ret] = '\0';
+ ret = op_ret;
+out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
- ret = dict_get_str (dict, volname_buf, &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to get %s Volcount = %d",
- volname_buf, volcount);
- goto out;
- }
+/* Try to acquire locks on multiple entities like *
+ * volume, snaps etc. */
+int32_t
+glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid)
+{
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t locked_count = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
- ret = glusterd_volume_lock (volname, uuid);
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR, "dict is null.");
+ ret = -1;
+ goto out;
+ }
+
+ /* Locking one entity after other */
+ for (i = 0; valid_types[i].type; i++) {
+ ret = glusterd_mgmt_v3_lock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to acquire lock for %s. "
- "Unlocking other volumes locked "
- "by this transaction", volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to lock all %s",
+ valid_types[i].type);
break;
}
- locked_volcount ++;
+ locked_count++;
}
- /* If we failed to lock one volume, unlock others and return failure */
- if (volcount != locked_volcount) {
- for (i = 1; i <= locked_volcount; i++) {
- ret = snprintf (volname_buf, sizeof(volname_buf) - 1,
- "volname%d", i);
- volname_buf[ret] = '\0';
-
- ret = dict_get_str (dict, volname_buf, &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Unable to get %s lockd_volcount = %d",
- volname_buf, volcount);
- goto out;
- }
-
- ret = glusterd_volume_unlock (volname, uuid);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release lock for %s.",
- volname);
- }
- ret = -1;
+ if (locked_count == GF_MAX_LOCKING_ENTITIES) {
+ /* If all locking ops went successfuly, return as success */
+ ret = 0;
+ goto out;
}
+ /* If we failed to lock one entity, unlock others and return failure */
+ for (i = 0; i < locked_count; i++) {
+ ret = glusterd_mgmt_v3_unlock_entity
+ (dict, uuid,
+ valid_types[i].type,
+ valid_types[i].default_value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to unlock all %s",
+ valid_types[i].type);
+ }
+ }
+ ret = -1;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-glusterd_volume_lock (char *volname, uuid_t uuid)
+glusterd_mgmt_v3_lock (const char *name, uuid_t uuid, char *type)
{
- int32_t ret = -1;
- vol_lock_obj *lock_obj = NULL;
- uuid_t owner = {0};
+ char key[PATH_MAX] = "";
+ int32_t ret = -1;
+ mgmt_v3_lock_obj *lock_obj = NULL;
+ gf_boolean_t is_valid = _gf_true;
+ uuid_t owner = {0};
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ this = THIS;
+
+ if (!name || !type) {
+ gf_log (this->name, GF_LOG_ERROR, "name or type is null.");
+ ret = -1;
+ goto out;
+ }
+
+ is_valid = glusterd_mgmt_v3_is_type_valid (type);
+ if (is_valid != _gf_true) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid entity. Cannot perform locking "
+ "operation on %s types", type);
+ ret = -1;
+ goto out;
+ }
- if (!volname) {
- gf_log ("", GF_LOG_ERROR, "volname is null.");
+ ret = snprintf (key, sizeof(key), "%s_%s", name, type);
+ if (ret != strlen(name) + 1 + strlen(type)) {
ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create key");
goto out;
}
- ret = glusterd_get_vol_lock_owner (volname, &owner);
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Trying to acquire lock of %s %s for %s as %s",
+ type, name, uuid_utoa (uuid), key);
+
+ ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
if (ret) {
- gf_log ("", GF_LOG_DEBUG, "Unable to get volume lock owner");
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unable to get mgmt_v3 lock owner");
goto out;
}
/* If the lock has already been held for the given volume
* we fail */
if (!uuid_is_null (owner)) {
- gf_log ("", GF_LOG_ERROR, "Unable to acquire lock. "
- "Lock for %s held by %s", volname,
- uuid_utoa (owner));
+ gf_log (this->name, GF_LOG_ERROR, "Lock for %s held by %s",
+ name, uuid_utoa (owner));
ret = -1;
goto out;
}
- lock_obj = GF_CALLOC (1, sizeof(vol_lock_obj),
- gf_common_mt_vol_lock_obj_t);
+ lock_obj = GF_CALLOC (1, sizeof(mgmt_v3_lock_obj),
+ gf_common_mt_mgmt_v3_lock_obj_t);
if (!lock_obj) {
ret = -1;
goto out;
@@ -244,62 +541,97 @@ glusterd_volume_lock (char *volname, uuid_t uuid)
uuid_copy (lock_obj->lock_owner, uuid);
- ret = dict_set_bin (vol_lock, volname, lock_obj, sizeof(vol_lock_obj));
+ ret = dict_set_bin (mgmt_v3_lock, key, lock_obj,
+ sizeof(*lock_obj));
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to set lock owner "
- "in volume lock");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set lock owner in mgmt_v3 lock");
if (lock_obj)
GF_FREE (lock_obj);
goto out;
}
- gf_log ("", GF_LOG_DEBUG, "Lock for %s successfully held by %s",
- volname, uuid_utoa (uuid));
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Lock for %s %s successfully held by %s",
+ type, name, uuid_utoa (uuid));
ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int32_t
-glusterd_volume_unlock (char *volname, uuid_t uuid)
+glusterd_mgmt_v3_unlock (const char *name, uuid_t uuid, char *type)
{
- int32_t ret = -1;
- uuid_t owner = {0};
+ char key[PATH_MAX] = "";
+ int32_t ret = -1;
+ gf_boolean_t is_valid = _gf_true;
+ uuid_t owner = {0};
+ xlator_t *this = NULL;
+
+ GF_ASSERT(THIS);
+ this = THIS;
- if (!volname) {
- gf_log ("", GF_LOG_ERROR, "volname is null.");
+ if (!name || !type) {
+ gf_log (this->name, GF_LOG_ERROR, "name is null.");
ret = -1;
goto out;
}
- ret = glusterd_get_vol_lock_owner (volname, &owner);
- if (ret)
+ is_valid = glusterd_mgmt_v3_is_type_valid (type);
+ if (is_valid != _gf_true) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid entity. Cannot perform unlocking "
+ "operation on %s types", type);
+ ret = -1;
goto out;
+ }
+
+ ret = snprintf (key, sizeof(key), "%s_%s",
+ name, type);
+ if (ret != strlen(name) + 1 + strlen(type)) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create key");
+ ret = -1;
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Trying to release lock of %s %s for %s as %s",
+ type, name, uuid_utoa (uuid), key);
+
+ ret = glusterd_get_mgmt_v3_lock_owner (key, &owner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Unable to get mgmt_v3 lock owner");
+ goto out;
+ }
if (uuid_is_null (owner)) {
- gf_log ("", GF_LOG_ERROR, "Lock for %s not held", volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Lock for %s %s not held", type, name);
ret = -1;
goto out;
}
ret = uuid_compare (uuid, owner);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Lock for %s held by %s. "
- "Unlock req received from %s", volname,
- uuid_utoa (owner), uuid_utoa (uuid));
+
+ gf_log (this->name, GF_LOG_ERROR, "Lock owner mismatch. "
+ "Lock for %s %s held by %s",
+ type, name, uuid_utoa (owner));
goto out;
}
- /* Removing the volume lock from the global list */
- dict_del (vol_lock, volname);
+ /* Removing the mgmt_v3 lock from the global list */
+ dict_del (mgmt_v3_lock, key);
- gf_log ("", GF_LOG_DEBUG, "Lock for %s successfully released",
- volname);
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Lock for %s %s successfully released",
+ type, name);
ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-locks.h b/xlators/mgmt/glusterd/src/glusterd-locks.h
index 956ae7565..83eb8c997 100644
--- a/xlators/mgmt/glusterd/src/glusterd-locks.h
+++ b/xlators/mgmt/glusterd/src/glusterd-locks.h
@@ -15,30 +15,37 @@
#include "config.h"
#endif
-struct volume_lock_object_ {
+typedef struct mgmt_v3_lock_object_ {
uuid_t lock_owner;
-};
-typedef struct volume_lock_object_ vol_lock_obj;
+} mgmt_v3_lock_obj;
+
+typedef struct mgmt_v3_lock_valid_entities {
+ char *type; /* Entity type like vol, snap */
+ gf_boolean_t default_value; /* The default value that *
+ * determines if the locks *
+ * should be held for that *
+ * entity */
+} valid_entities;
int32_t
-glusterd_vol_lock_init ();
+glusterd_mgmt_v3_lock_init ();
void
-glusterd_vol_lock_fini ();
+glusterd_mgmt_v3_lock_fini ();
int32_t
-glusterd_get_vol_lock_owner (char *volname, uuid_t *uuid);
+glusterd_get_mgmt_v3_lock_owner (char *volname, uuid_t *uuid);
int32_t
-glusterd_volume_lock (char *volname, uuid_t uuid);
+glusterd_mgmt_v3_lock (const char *key, uuid_t uuid, char *type);
int32_t
-glusterd_volume_unlock (char *volname, uuid_t uuid);
+glusterd_mgmt_v3_unlock (const char *key, uuid_t uuid, char *type);
int32_t
-glusterd_multiple_volumes_lock (dict_t *dict, uuid_t uuid);
+glusterd_multiple_mgmt_v3_lock (dict_t *dict, uuid_t uuid);
int32_t
-glusterd_multiple_volumes_unlock (dict_t *dict, uuid_t uuid);
+glusterd_multiple_mgmt_v3_unlock (dict_t *dict, uuid_t uuid);
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-mem-types.h b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
index c1009d66f..e6f6a0333 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mem-types.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mem-types.h
@@ -68,7 +68,7 @@ typedef enum gf_gld_mem_types_ {
gf_gld_mt_mop_commit_req_t = gf_common_mt_end + 52,
gf_gld_mt_int = gf_common_mt_end + 53,
gf_gld_mt_snap_t = gf_common_mt_end + 54,
- gf_gld_mt_snap_cg_t = gf_common_mt_end + 55,
+ gf_gld_mt_missed_snapinfo_t = gf_common_mt_end + 55,
gf_gld_mt_end = gf_common_mt_end + 56,
} gf_gld_mem_types_t;
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
index 63bbc6687..a5f38ce9c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt-handler.c
@@ -27,12 +27,15 @@ glusterd_mgmt_v3_null (rpcsvc_request_t *req)
}
static int
-glusterd_mgmt_v3_vol_lock_send_resp (rpcsvc_request_t *req, int32_t status)
+glusterd_mgmt_v3_lock_send_resp (rpcsvc_request_t *req, int32_t status)
{
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
- int ret = -1;
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
rsp.op_ret = status;
@@ -42,60 +45,45 @@ glusterd_mgmt_v3_vol_lock_send_resp (rpcsvc_request_t *req, int32_t status)
glusterd_get_uuid (&rsp.uuid);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG,
- "Responded to volume lock, ret: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 lock, ret: %d", ret);
return ret;
}
static int
-glusterd_syctasked_volume_lock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_lock_req *lock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_synctasked_mgmt_v3_lock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_lock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
- int32_t volcount = -1;
xlator_t *this = NULL;
- char *volname = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (req);
+ GF_ASSERT (ctx);
+ GF_ASSERT (ctx->dict);
- ret = dict_get_int32 (ctx->dict, "volcount", &volcount);
- if (ret) {
- ret = dict_get_str (ctx->dict, "volname", &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to get volname");
- goto out;
- }
- ret = glusterd_volume_lock (volname, MY_UUID);
-
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- } else {
- /* Trying to acquire volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_lock (ctx->dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to acquire volume locks on localhost");
- }
+ /* Trying to acquire multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_lock (ctx->dict, ctx->uuid);
+ if (ret)
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire mgmt_v3 locks for %s",
+ uuid_utoa (ctx->uuid));
-out:
- glusterd_mgmt_v3_vol_lock_send_resp (req, ret);
+ ret = glusterd_mgmt_v3_lock_send_resp (req, ret);
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
static int
-glusterd_op_state_machine_volume_lock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_lock_req *lock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_op_state_machine_mgmt_v3_lock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_lock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -105,9 +93,8 @@ glusterd_op_state_machine_volume_lock (rpcsvc_request_t *req,
GF_ASSERT (this);
GF_ASSERT (req);
- txn_op_info.op = lock_req->op;
- txn_op_info.op_ctx = ctx->dict;
- txn_op_info.req = req;
+ glusterd_txn_opinfo_init (&txn_op_info, NULL, &lock_req->op,
+ ctx->dict, req);
ret = glusterd_set_txn_opinfo (&lock_req->txn_id, &txn_op_info);
if (ret) {
@@ -126,14 +113,14 @@ out:
glusterd_friend_sm ();
glusterd_op_sm ();
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
static int
-glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_lock_fn (rpcsvc_request_t *req)
{
- gd1_mgmt_v3_vol_lock_req lock_req = {{0},};
+ gd1_mgmt_v3_lock_req lock_req = {{0},};
int32_t ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_op_lock_ctx_t *ctx = NULL;
@@ -145,7 +132,7 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
GF_ASSERT (req);
ret = xdr_to_generic (req->msg[0], &lock_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_req);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR, "Failed to decode lock "
"request received from peer");
@@ -153,7 +140,7 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_DEBUG, "Received volume lock req "
+ gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 lock req "
"from uuid: %s", uuid_utoa (lock_req.uuid));
if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
@@ -187,22 +174,26 @@ glusterd_handle_volume_lock_fn (rpcsvc_request_t *req)
goto out;
}
- is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false);
+ is_synctasked = dict_get_str_boolean (ctx->dict,
+ "is_synctasked", _gf_false);
if (is_synctasked)
- ret = glusterd_syctasked_volume_lock (req, &lock_req, ctx);
+ ret = glusterd_synctasked_mgmt_v3_lock (req, &lock_req, ctx);
else
- ret = glusterd_op_state_machine_volume_lock (req, &lock_req, ctx);
+ ret = glusterd_op_state_machine_mgmt_v3_lock (req, &lock_req,
+ ctx);
out:
if (ret) {
if (ctx->dict)
- dict_destroy (ctx->dict);
+ dict_unref (ctx->dict);
if (ctx)
GF_FREE (ctx);
}
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ free (lock_req.dict.dict_val);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -263,7 +254,8 @@ glusterd_handle_pre_validate_fn (rpcsvc_request_t *req)
ret = xdr_to_generic (req->msg[0], &op_req,
(xdrproc_t)xdr_gd1_mgmt_v3_pre_val_req);
if (ret < 0) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to decode pre validation "
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to decode pre validation "
"request received from peer");
req->rpc_err = GARBAGE_ARGS;
goto out;
@@ -303,7 +295,6 @@ glusterd_handle_pre_validate_fn (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"Pre Validation failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_pre_validate_send_resp (req, op_req.op,
@@ -323,10 +314,13 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -427,7 +421,6 @@ glusterd_handle_brick_op_fn (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"Brick Op failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_brick_op_send_resp (req, op_req.op,
@@ -447,10 +440,13 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -550,7 +546,6 @@ glusterd_handle_commit_fn (rpcsvc_request_t *req)
gf_log (this->name, GF_LOG_ERROR,
"commit failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_commit_send_resp (req, op_req.op,
@@ -570,10 +565,13 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -668,14 +666,13 @@ glusterd_handle_post_validate_fn (rpcsvc_request_t *req)
return -1;
}
- ret = gd_mgmt_v3_post_validate_fn (op_req.op, dict, &op_errstr,
- rsp_dict);
+ ret = gd_mgmt_v3_post_validate_fn (op_req.op, op_req.op_ret, dict,
+ &op_errstr, rsp_dict);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Post Validation failed on operation %s",
gd_op_list[op_req.op]);
- goto out;
}
ret = glusterd_mgmt_v3_post_validate_send_resp (req, op_req.op,
@@ -695,20 +692,26 @@ out:
free (op_req.dict.dict_val);
+ if (dict)
+ dict_unref (dict);
+
if (rsp_dict)
dict_unref (rsp_dict);
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
static int
-glusterd_mgmt_v3_vol_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
+glusterd_mgmt_v3_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
{
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
- int ret = -1;
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
+ int ret = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
GF_ASSERT (req);
rsp.op_ret = status;
@@ -718,61 +721,46 @@ glusterd_mgmt_v3_vol_unlock_send_resp (rpcsvc_request_t *req, int32_t status)
glusterd_get_uuid (&rsp.uuid);
ret = glusterd_submit_reply (req, &rsp, NULL, 0, NULL,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
- gf_log (THIS->name, GF_LOG_DEBUG,
- "Responded to volume unlock, ret: %d", ret);
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Responded to mgmt_v3 unlock, ret: %d", ret);
return ret;
}
static int
-glusterd_syctasked_volume_unlock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_unlock_req *unlock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_synctasked_mgmt_v3_unlock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_unlock_req *unlock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
- int32_t volcount = -1;
xlator_t *this = NULL;
- char *volname = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (req);
GF_ASSERT (ctx);
- ret = dict_get_int32 (ctx->dict, "volcount", &volcount);
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (ctx->dict, ctx->uuid);
if (ret) {
- ret = dict_get_str (ctx->dict, "volname", &volname);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to get volname");
- goto out;
- }
- ret = glusterd_volume_unlock (volname, MY_UUID);
- if (ret)
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- } else {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (ctx->dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks on localhost");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to release mgmt_v3 locks for %s",
+ uuid_utoa(ctx->uuid));
}
-out:
- glusterd_mgmt_v3_vol_unlock_send_resp (req, ret);
+ ret = glusterd_mgmt_v3_unlock_send_resp (req, ret);
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
static int
-glusterd_op_state_machine_volume_unlock (rpcsvc_request_t *req,
- gd1_mgmt_v3_vol_unlock_req *lock_req,
- glusterd_op_lock_ctx_t *ctx)
+glusterd_op_state_machine_mgmt_v3_unlock (rpcsvc_request_t *req,
+ gd1_mgmt_v3_unlock_req *lock_req,
+ glusterd_op_lock_ctx_t *ctx)
{
int32_t ret = -1;
xlator_t *this = NULL;
@@ -790,14 +778,14 @@ glusterd_op_state_machine_volume_unlock (rpcsvc_request_t *req,
glusterd_friend_sm ();
glusterd_op_sm ();
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
static int
-glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_unlock_fn (rpcsvc_request_t *req)
{
- gd1_mgmt_v3_vol_unlock_req lock_req = {{0},};
+ gd1_mgmt_v3_unlock_req lock_req = {{0},};
int32_t ret = -1;
glusterd_op_lock_ctx_t *ctx = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -809,7 +797,7 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
GF_ASSERT (req);
ret = xdr_to_generic (req->msg[0], &lock_req,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_req);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR, "Failed to decode unlock "
"request received from peer");
@@ -817,7 +805,7 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
goto out;
}
- gf_log (this->name, GF_LOG_DEBUG, "Received volume unlock req "
+ gf_log (this->name, GF_LOG_DEBUG, "Received mgmt_v3 unlock req "
"from uuid: %s", uuid_utoa (lock_req.uuid));
if (glusterd_friend_find_by_uuid (lock_req.uuid, &peerinfo)) {
@@ -851,30 +839,34 @@ glusterd_handle_volume_unlock_fn (rpcsvc_request_t *req)
goto out;
}
- is_synctasked = dict_get_str_boolean (ctx->dict, "is_synctasked", _gf_false);
+ is_synctasked = dict_get_str_boolean (ctx->dict,
+ "is_synctasked", _gf_false);
if (is_synctasked)
- ret = glusterd_syctasked_volume_unlock (req, &lock_req, ctx);
+ ret = glusterd_synctasked_mgmt_v3_unlock (req, &lock_req, ctx);
else
- ret = glusterd_op_state_machine_volume_unlock (req, &lock_req, ctx);
+ ret = glusterd_op_state_machine_mgmt_v3_unlock (req, &lock_req,
+ ctx);
out:
if (ret) {
if (ctx->dict)
- dict_destroy (ctx->dict);
+ dict_unref (ctx->dict);
if (ctx)
GF_FREE (ctx);
}
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
+ free (lock_req.dict.dict_val);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
-glusterd_handle_volume_lock (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_lock (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req,
- glusterd_handle_volume_lock_fn);
+ glusterd_handle_mgmt_v3_lock_fn);
}
static int
@@ -906,20 +898,20 @@ glusterd_handle_post_validate (rpcsvc_request_t *req)
}
int
-glusterd_handle_volume_unlock (rpcsvc_request_t *req)
+glusterd_handle_mgmt_v3_unlock (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req,
- glusterd_handle_volume_unlock_fn);
+ glusterd_handle_mgmt_v3_unlock_fn);
}
rpcsvc_actor_t gd_svc_mgmt_v3_actors[] = {
- [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_VOLUME_LOCK] = { "VOL_LOCK", GLUSTERD_MGMT_V3_VOLUME_LOCK, glusterd_handle_volume_lock, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_V3_VOLUME_UNLOCK] = { "VOL_UNLOCK", GLUSTERD_MGMT_V3_VOLUME_UNLOCK, glusterd_handle_volume_unlock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_NULL] = { "NULL", GLUSTERD_MGMT_V3_NULL, glusterd_mgmt_v3_null, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_LOCK] = { "MGMT_V3_LOCK", GLUSTERD_MGMT_V3_LOCK, glusterd_handle_mgmt_v3_lock, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_PRE_VALIDATE] = { "PRE_VAL", GLUSTERD_MGMT_V3_PRE_VALIDATE, glusterd_handle_pre_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_BRICK_OP] = { "BRCK_OP", GLUSTERD_MGMT_V3_BRICK_OP, glusterd_handle_brick_op, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_COMMIT] = { "COMMIT", GLUSTERD_MGMT_V3_COMMIT, glusterd_handle_commit, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_POST_VALIDATE] = { "POST_VAL", GLUSTERD_MGMT_V3_POST_VALIDATE, glusterd_handle_post_validate, NULL, 0, DRC_NA},
+ [GLUSTERD_MGMT_V3_UNLOCK] = { "MGMT_V3_UNLOCK", GLUSTERD_MGMT_V3_UNLOCK, glusterd_handle_mgmt_v3_unlock, NULL, 0, DRC_NA},
};
struct rpcsvc_program gd_svc_mgmt_v3_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 8e2c0c689..d52532e54 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -25,13 +25,19 @@ extern struct rpc_clnt_program gd_mgmt_v3_prog;
static void
gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
- char *op_errstr, int op_code,
- glusterd_peerinfo_t *peerinfo, u_char *uuid)
+ char *op_errstr, int op_code,
+ glusterd_peerinfo_t *peerinfo, u_char *uuid)
{
- char err_str[PATH_MAX] = "Please check log file for details.";
- char op_err[PATH_MAX] = "";
- int len = -1;
- char *peer_str = NULL;
+ char *peer_str = NULL;
+ char err_str[PATH_MAX] = "Please check log file for details.";
+ char op_err[PATH_MAX] = "";
+ int32_t len = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (args);
+ GF_ASSERT (uuid);
if (op_ret) {
args->op_ret = op_ret;
@@ -49,10 +55,10 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
}
switch (op_code){
- case GLUSTERD_MGMT_V3_VOLUME_LOCK:
+ case GLUSTERD_MGMT_V3_LOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Locking volume failed "
+ "Locking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -84,10 +90,10 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
"on %s. %s", peer_str, err_str);
break;
}
- case GLUSTERD_MGMT_V3_VOLUME_UNLOCK:
+ case GLUSTERD_MGMT_V3_UNLOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Unlocking volume failed "
+ "Unlocking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -105,7 +111,7 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
"%s", op_err);
err_str[len] = '\0';
- gf_log ("", GF_LOG_ERROR, "%s", op_err);
+ gf_log (this->name, GF_LOG_ERROR, "%s", op_err);
args->errstr = gf_strdup (err_str);
}
@@ -114,128 +120,171 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
int32_t
gd_mgmt_v3_pre_validate_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
switch (op) {
case GD_OP_SNAP:
{
ret = glusterd_snapshot_prevalidate (dict, op_errstr,
rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Snapshot Prevalidate Failed");
+ goto out;
+ }
+
break;
}
default:
break;
}
+ ret = 0;
+out:
gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
return ret;
}
int32_t
gd_mgmt_v3_brick_op_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
- int64_t vol_count = 0;
- int64_t count = 1;
- char key[1024] = {0,};
- char *volname = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
switch (op) {
case GD_OP_SNAP:
{
-
- ret = dict_get_int64 (dict, "volcount", &vol_count);
- if (ret)
+ ret = glusterd_snapshot_brickop (dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "snapshot brickop "
+ "failed");
goto out;
- while (count <= vol_count) {
- snprintf (key, 1024, "volname%"PRId64, count);
- ret = dict_get_str (dict, key, &volname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Unable to get"
- " volname");
- goto out;
- }
- ret = dict_set_str (dict, "volname", volname);
- if (ret)
- goto out;
-
- ret = gd_brick_op_phase (op, NULL, dict, op_errstr);
- if (ret)
- goto out;
- volname = NULL;
- count++;
}
-
- dict_del (dict, "volname");
-
break;
}
default:
break;
}
+
+ ret = 0;
out:
- gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+ gf_log (this->name, GF_LOG_TRACE, "OP = %d. Returning %d", op, ret);
return ret;
}
int32_t
gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
switch (op) {
case GD_OP_SNAP:
{
ret = glusterd_snapshot (dict, op_errstr, rsp_dict);
- if (ret)
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Snapshot Commit Failed");
goto out;
+ }
break;
}
default:
break;
}
- ret = 0;
+ ret = 0;
out:
gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
return ret;
}
int32_t
-gd_mgmt_v3_post_validate_fn (glusterd_op_t op, dict_t *dict,
- char **op_errstr, dict_t *rsp_dict)
+gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,
+ char **op_errstr, dict_t *rsp_dict)
{
- int ret = -1;
- xlator_t *this = THIS;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+
+ switch (op) {
+ case GD_OP_SNAP:
+ {
+ ret = glusterd_snapshot_postvalidate (dict, op_ret,
+ op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "postvalidate operation failed");
+ goto out;
+ }
+ break;
+ }
+ default:
+ break;
+ }
ret = 0;
- gf_log (this->name, GF_LOG_DEBUG, "OP = %d. Returning %d", op, ret);
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "OP = %d. Returning %d", op, ret);
return ret;
}
int32_t
-gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
-
- GF_ASSERT(req);
- GF_ASSERT(iov);
- GF_ASSERT(myframe);
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
+
+ /* Even though the lock command has failed, while collating the errors
+ (gd_mgmt_v3_collate_errors), args->op_ret and args->op_errno will be
+ used. @args is obtained from frame->local. So before checking the
+ status of the request and going out if its a failure, args should be
+ set to frame->local. Otherwise, while collating args will be NULL.
+ This applies to other phases such as prevalidate, brickop, commit and
+ postvalidate also.
+ */
frame = myframe;
args = frame->local;
peerinfo = frame->cookie;
@@ -247,8 +296,14 @@ gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
goto out;
}
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0)
goto out;
@@ -256,36 +311,41 @@ gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
+
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- peerinfo, rsp.uuid);
+ GLUSTERD_MGMT_V3_LOCK,
+ peerinfo, rsp.uuid);
+ free (rsp.dict.dict_val);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
}
int32_t
-gd_mgmt_v3_vol_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_vol_lock_cbk_fn);
+ gd_mgmt_v3_lock_cbk_fn);
}
int
-gd_mgmt_v3_vol_lock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
- int ret = -1;
- gd1_mgmt_v3_vol_lock_req req = {{0},};
+ gd1_mgmt_v3_lock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -298,49 +358,45 @@ gd_mgmt_v3_vol_lock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- gd_mgmt_v3_vol_lock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_lock_req);
+ GLUSTERD_MGMT_V3_LOCK,
+ gd_mgmt_v3_lock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char **op_errstr, int npeers,
- gf_boolean_t *is_acquired)
+ dict_t *dict, char **op_errstr, int npeers,
+ gf_boolean_t *is_acquired)
{
- int ret = -1;
- int peer_cnt = 0;
char *volname = NULL;
- uuid_t peer_uuid = {0};
- xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
struct syncargs args = {0};
struct list_head *peers = NULL;
+ uuid_t peer_uuid = {0};
+ xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (is_acquired);
+
peers = &conf->xaction_peers;
- /* Volume(s) lock on local node */
- ret = dict_get_str (dict, "volname", &volname);
+ /* Trying to acquire multiple mgmt_v3 locks on local node */
+ ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
if (ret) {
- /* Trying to acquire volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_lock (dict, MY_UUID);
- if (ret) {
- gf_log ("", GF_LOG_ERROR,
- "Failed to acquire volume locks on localhost");
- goto out;
- }
- } else {
- ret = glusterd_volume_lock (volname, MY_UUID);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Unable to acquire local lock for %s", volname);
- goto out;
- }
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to acquire mgmt_v3 locks on localhost");
+ goto out;
}
*is_acquired = _gf_true;
@@ -350,18 +406,19 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
- /* Sending Volume lock req to other nodes in the cluster */
+ /* Sending mgmt_v3 lock req to other nodes in the cluster */
+ gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_vol_lock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_lock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
if (args.errstr)
- *op_errstr = gf_strdup (args.errstr);
+ *op_errstr = gf_strdup (args.errstr);
ret = args.op_ret;
@@ -376,11 +433,11 @@ out:
if (volname)
ret = gf_asprintf (op_errstr,
"Another transaction is in progress "
- "for %s. Please try again after sometime.",
- volname);
+ "for %s. Please try again after "
+ "sometime.", volname);
else
ret = gf_asprintf (op_errstr,
- "Another transaction is in progress. "
+ "Another transaction is in progress "
"Please try again after sometime.");
if (ret == -1)
@@ -392,21 +449,55 @@ out:
return ret;
}
+int
+glusterd_pre_validate_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
+{
+ int32_t ret = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (aggr);
+ GF_ASSERT (rsp);
+
+ switch (op) {
+ case GD_OP_SNAP:
+ ret = glusterd_snap_pre_validate_use_rsp_dict (aggr, rsp);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to aggregate prevalidate "
+ "response dictionaries.");
+ goto out;
+ }
+ break;
+ default:
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Invalid op (%s)", gd_op_list[op]);
+
+ break;
+ }
+out:
+ return ret;
+}
+
int32_t
gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_pre_val_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(iov);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -419,19 +510,65 @@ gd_mgmt_v3_pre_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
goto out;
}
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ }
+
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gd1_mgmt_v3_pre_val_rsp);
if (ret < 0)
goto out;
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new ();
+
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret < 0) {
+ GF_FREE (rsp.dict.dict_val);
+ goto out;
+ } else {
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
uuid_copy (args->uuid, rsp.uuid);
+ pthread_mutex_lock (&args->lock_dict);
+ {
+ ret = glusterd_pre_validate_aggr_rsp_dict (rsp.op, args->dict,
+ rsp_dict);
+ }
+ pthread_mutex_unlock (&args->lock_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
out:
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_PRE_VALIDATE,
peerinfo, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -439,25 +576,28 @@ out:
int32_t
gd_mgmt_v3_pre_validate_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_pre_validate_cbk_fn);
}
int
-gd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *op_ctx,
+gd_mgmt_v3_pre_validate_req (glusterd_op_t op, dict_t *op_ctx,
glusterd_peerinfo_t *peerinfo,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
- gd1_mgmt_v3_pre_val_req req = {{0},};
- glusterd_conf_t *conf = THIS->private;
+ int32_t ret = -1;
+ gd1_mgmt_v3_pre_val_req req = {{0},};
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -475,16 +615,17 @@ gd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_pre_val_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *req_dict, char **op_errstr, int npeers)
+ dict_t *req_dict, char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -493,6 +634,11 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
rsp_dict = dict_new ();
@@ -504,7 +650,7 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
/* Pre Validation on local node */
ret = gd_mgmt_v3_pre_validate_fn (op, req_dict, op_errstr,
- rsp_dict);
+ rsp_dict);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@@ -515,7 +661,8 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
if (*op_errstr == NULL) {
ret = gf_asprintf (op_errstr,
"Pre-validation failed "
- "on localhost");
+ "on localhost. Please "
+ "check log file for details");
if (ret == -1)
*op_errstr = NULL;
@@ -524,6 +671,15 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
+ ret = glusterd_pre_validate_aggr_rsp_dict (op, req_dict,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+
dict_unref (rsp_dict);
rsp_dict = NULL;
@@ -533,10 +689,11 @@ glusterd_mgmt_v3_pre_validate (glusterd_conf_t *conf, glusterd_op_t op,
}
/* Sending Pre Validation req to other nodes in the cluster */
+ gd_syncargs_init (&args, req_dict);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_pre_validate (op, req_dict, peerinfo, &args,
+ gd_mgmt_v3_pre_validate_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
@@ -560,22 +717,22 @@ out:
int
glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
- glusterd_op_t op)
+ glusterd_op_t op)
{
- int ret = -1;
+ int32_t ret = -1;
dict_t *req_dict = NULL;
xlator_t *this = NULL;
- GF_ASSERT (req);
-
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (dict);
req_dict = dict_new ();
if (!req_dict)
goto out;
-
switch (op) {
case GD_OP_SNAP:
dict_copy (dict, req_dict);
@@ -586,26 +743,27 @@ glusterd_mgmt_v3_build_payload (dict_t **req, char **op_errstr, dict_t *dict,
*req = req_dict;
ret = 0;
-
out:
return ret;
}
int32_t
gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_brick_op_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(iov);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -613,11 +771,21 @@ gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
frame->local = NULL;
frame->cookie = NULL;
+ /* If the operation failed, then iov can be NULL. So better check the
+ status of the operation and then worry about iov (if the status of
+ the command is success)
+ */
if (-1 == req->rpc_status) {
op_errno = ENOTCONN;
goto out;
}
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gd1_mgmt_v3_brick_op_rsp);
if (ret < 0)
@@ -627,10 +795,17 @@ gd_mgmt_v3_brick_op_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
+
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_BRICK_OP,
- peerinfo, rsp.uuid);
+ GLUSTERD_MGMT_V3_BRICK_OP,
+ peerinfo, rsp.uuid);
+
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ free (rsp.dict.dict_val);
+
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -638,25 +813,28 @@ out:
int32_t
gd_mgmt_v3_brick_op_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_brick_op_cbk_fn);
}
int
-gd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *op_ctx,
+gd_mgmt_v3_brick_op_req (glusterd_op_t op, dict_t *op_ctx,
glusterd_peerinfo_t *peerinfo,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
+ int32_t ret = -1;
gd1_mgmt_v3_brick_op_req req = {{0},};
- glusterd_conf_t *conf = THIS->private;
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -674,16 +852,17 @@ gd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_brick_op_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *req_dict, char **op_errstr, int npeers)
+ dict_t *req_dict, char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -692,6 +871,11 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
rsp_dict = dict_new ();
@@ -714,7 +898,8 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
if (*op_errstr == NULL) {
ret = gf_asprintf (op_errstr,
"Brick ops failed "
- "on localhost");
+ "on localhost. Please "
+ "check log file for details");
if (ret == -1)
*op_errstr = NULL;
@@ -732,10 +917,11 @@ glusterd_mgmt_v3_brick_op (glusterd_conf_t *conf, glusterd_op_t op,
}
/* Sending brick op req to other nodes in the cluster */
+ gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_brick_op (op, req_dict, peerinfo, &args,
+ gd_mgmt_v3_brick_op_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
@@ -759,19 +945,22 @@ out:
int32_t
gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_commit_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ dict_t *rsp_dict = NULL;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(iov);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -784,19 +973,63 @@ gd_mgmt_v3_commit_cbk_fn (struct rpc_req *req, struct iovec *iov,
goto out;
}
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gd1_mgmt_v3_commit_rsp);
if (ret < 0)
goto out;
+ if (rsp.dict.dict_len) {
+ /* Unserialize the dictionary */
+ rsp_dict = dict_new ();
+
+ ret = dict_unserialize (rsp.dict.dict_val,
+ rsp.dict.dict_len,
+ &rsp_dict);
+ if (ret < 0) {
+ GF_FREE (rsp.dict.dict_val);
+ goto out;
+ } else {
+ rsp_dict->extra_stdfree = rsp.dict.dict_val;
+ }
+ }
+
uuid_copy (args->uuid, rsp.uuid);
+ pthread_mutex_lock (&args->lock_dict);
+ {
+ ret = glusterd_syncop_aggr_rsp_dict (rsp.op, args->dict,
+ rsp_dict);
+ }
+ pthread_mutex_unlock (&args->lock_dict);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ if (!rsp.op_ret)
+ op_ret = ret;
+ else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
+ } else {
+ op_ret = rsp.op_ret;
+ op_errno = rsp.op_errno;
+ }
- op_ret = rsp.op_ret;
- op_errno = rsp.op_errno;
out:
+ if (rsp_dict)
+ dict_unref (rsp_dict);
+
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_COMMIT,
peerinfo, rsp.uuid);
+
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -804,25 +1037,28 @@ out:
int32_t
gd_mgmt_v3_commit_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_commit_cbk_fn);
}
int
-gd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx,
+gd_mgmt_v3_commit_req (glusterd_op_t op, dict_t *op_ctx,
glusterd_peerinfo_t *peerinfo,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
- gd1_mgmt_v3_commit_req req = {{0},};
+ int32_t ret = -1;
+ gd1_mgmt_v3_commit_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -840,16 +1076,18 @@ gd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_commit_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *req_dict, char **op_errstr, int npeers)
+ dict_t *op_ctx, dict_t *req_dict,
+ char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -858,6 +1096,12 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
rsp_dict = dict_new ();
@@ -880,7 +1124,8 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
if (*op_errstr == NULL) {
ret = gf_asprintf (op_errstr,
"Commit failed "
- "on localhost");
+ "on localhost. Please "
+ "check log file for details.");
if (ret == -1)
*op_errstr = NULL;
@@ -889,6 +1134,15 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
+ ret = glusterd_syncop_aggr_rsp_dict (op, op_ctx,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ "Failed to aggregate response from "
+ " node/brick");
+ goto out;
+ }
+
dict_unref (rsp_dict);
rsp_dict = NULL;
@@ -898,10 +1152,11 @@ glusterd_mgmt_v3_commit (glusterd_conf_t *conf, glusterd_op_t op,
}
/* Sending commit req to other nodes in the cluster */
+ gd_syncargs_init (&args, op_ctx);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_commit (op, req_dict, peerinfo, &args,
+ gd_mgmt_v3_commit_req (op, req_dict, peerinfo, &args,
MY_UUID, peer_uuid);
peer_cnt++;
}
@@ -925,19 +1180,21 @@ out:
int32_t
gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
gd1_mgmt_v3_post_val_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(iov);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -950,6 +1207,12 @@ gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
goto out;
}
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
ret = xdr_to_generic (*iov, &rsp,
(xdrproc_t)xdr_gd1_mgmt_v3_post_val_rsp);
if (ret < 0)
@@ -959,10 +1222,15 @@ gd_mgmt_v3_post_validate_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
+
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
GLUSTERD_MGMT_V3_POST_VALIDATE,
peerinfo, rsp.uuid);
+ if (rsp.op_errstr)
+ free (rsp.op_errstr);
+
+ free (rsp.dict.dict_val);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
@@ -970,25 +1238,28 @@ out:
int32_t
gd_mgmt_v3_post_validate_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
gd_mgmt_v3_post_validate_cbk_fn);
}
int
-gd_mgmt_v3_post_validate (glusterd_op_t op, dict_t *op_ctx,
+gd_mgmt_v3_post_validate_req (glusterd_op_t op, int32_t op_ret, dict_t *op_ctx,
glusterd_peerinfo_t *peerinfo,
struct syncargs *args, uuid_t my_uuid,
uuid_t recv_uuid)
{
- int ret = -1;
+ int32_t ret = -1;
gd1_mgmt_v3_post_val_req req = {{0},};
- glusterd_conf_t *conf = THIS->private;
+ glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -998,6 +1269,7 @@ gd_mgmt_v3_post_validate (glusterd_op_t op, dict_t *op_ctx,
uuid_copy (req.uuid, my_uuid);
req.op = op;
+ req.op_ret = op_ret;
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
@@ -1006,16 +1278,18 @@ gd_mgmt_v3_post_validate (glusterd_op_t op, dict_t *op_ctx,
(xdrproc_t) xdr_gd1_mgmt_v3_post_val_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *req_dict, char **op_errstr, int npeers)
+ int32_t op_ret, dict_t *dict, dict_t *req_dict,
+ char **op_errstr, int npeers)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
dict_t *rsp_dict = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
struct syncargs args = {0};
@@ -1024,7 +1298,14 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
xlator_t *this = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (req_dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
+ GF_ASSERT (peers);
rsp_dict = dict_new ();
if (!rsp_dict) {
@@ -1033,9 +1314,12 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
+ /* Copy the contents of dict like missed snaps info to req_dict */
+ dict_copy (dict, req_dict);
+
/* Post Validation on local node */
- ret = gd_mgmt_v3_post_validate_fn (op, req_dict, op_errstr,
- rsp_dict);
+ ret = gd_mgmt_v3_post_validate_fn (op, op_ret, req_dict, op_errstr,
+ rsp_dict);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
@@ -1046,7 +1330,8 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
if (*op_errstr == NULL) {
ret = gf_asprintf (op_errstr,
"Post-validation failed "
- "on localhost");
+ "on localhost. Please check "
+ "log file for details");
if (ret == -1)
*op_errstr = NULL;
@@ -1064,11 +1349,12 @@ glusterd_mgmt_v3_post_validate (glusterd_conf_t *conf, glusterd_op_t op,
}
/* Sending Post Validation req to other nodes in the cluster */
+ gd_syncargs_init (&args, req_dict);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_post_validate (op, req_dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_post_validate_req (op, op_ret, req_dict, peerinfo,
+ &args, MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1090,20 +1376,22 @@ out:
}
int32_t
-gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- int ret = -1;
+ int32_t ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
- int op_ret = -1;
- int op_errno = -1;
+ int32_t op_ret = -1;
+ int32_t op_errno = -1;
+ xlator_t *this = NULL;
- GF_ASSERT(req);
- GF_ASSERT(iov);
- GF_ASSERT(myframe);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (myframe);
frame = myframe;
args = frame->local;
@@ -1116,8 +1404,14 @@ gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
goto out;
}
+ if (!iov) {
+ gf_log (this->name, GF_LOG_ERROR, "iov is NULL");
+ op_errno = EINVAL;
+ goto out;
+ }
+
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0)
goto out;
@@ -1125,36 +1419,41 @@ gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
+
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- peerinfo, rsp.uuid);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ peerinfo, rsp.uuid);
+ free (rsp.dict.dict_val);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
}
int32_t
-gd_mgmt_v3_vol_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_vol_unlock_cbk_fn);
+ gd_mgmt_v3_unlock_cbk_fn);
}
int
-gd_mgmt_v3_vol_unlock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_unlock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
- int ret = -1;
- gd1_mgmt_v3_vol_unlock_req req = {{0},};
+ int32_t ret = -1;
+ gd1_mgmt_v3_unlock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
+ xlator_t *this = NULL;
- GF_ASSERT(op_ctx);
- GF_ASSERT(peerinfo);
- GF_ASSERT(args);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_ctx);
+ GF_ASSERT (peerinfo);
+ GF_ASSERT (args);
ret = dict_allocate_and_serialize (op_ctx,
&req.dict.dict_val,
@@ -1167,22 +1466,24 @@ gd_mgmt_v3_vol_unlock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- gd_mgmt_v3_vol_unlock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_unlock_req);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ gd_mgmt_v3_unlock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
synclock_lock (&conf->big_lock);
out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
+ GF_FREE (req.dict.dict_val);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
- dict_t *dict, char **op_errstr, int npeers,
- gf_boolean_t is_acquired)
+ dict_t *dict, int32_t op_ret,
+ char **op_errstr, int npeers,
+ gf_boolean_t is_acquired)
{
- int ret = -1;
- int peer_cnt = 0;
+ int32_t ret = -1;
+ int32_t peer_cnt = 0;
uuid_t peer_uuid = {0};
xlator_t *this = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
@@ -1190,6 +1491,11 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
struct list_head *peers = NULL;
this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (conf);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
peers = &conf->xaction_peers;
/* If the lock has not been held during this
@@ -1202,12 +1508,13 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
- /* Sending Volume unlock req to other nodes in the cluster */
+ /* Sending mgmt_v3 unlock req to other nodes in the cluster */
+ gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_vol_unlock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_unlock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1216,7 +1523,7 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
gf_log (this->name, GF_LOG_ERROR,
"Unlock failed on peers");
- if (args.errstr)
+ if (!op_ret && args.errstr)
*op_errstr = gf_strdup (args.errstr);
}
@@ -1233,19 +1540,21 @@ int32_t
glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *dict)
{
- int ret = -1;
- int npeers = 0;
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t npeers = 0;
dict_t *req_dict = NULL;
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *originator_uuid = NULL;
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
conf = this->private;
GF_ASSERT (conf);
@@ -1280,7 +1589,7 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
* the unlock and the volname in the dict might be removed */
tmp_dict = dict_new();
if (!tmp_dict) {
- gf_log ("", GF_LOG_ERROR, "Unable to create dict");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create dict");
goto out;
}
dict_copy (dict, tmp_dict);
@@ -1289,12 +1598,11 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- /* LOCKDOWN PHASE - Based on the number of volumes either single
- * or multiple volume locks is acquired */
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
- npeers, &is_acquired);
+ npeers, &is_acquired);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
+ gf_log (this->name, GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
}
@@ -1310,56 +1618,54 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
/* PRE-COMMIT VALIDATE PHASE */
ret = glusterd_mgmt_v3_pre_validate (conf, op, req_dict,
- &op_errstr, npeers);
+ &op_errstr, npeers);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Pre Validation Failed");
+ gf_log (this->name, GF_LOG_ERROR, "Pre Validation Failed");
goto out;
}
/* COMMIT OP PHASE */
- ret = glusterd_mgmt_v3_commit (conf, op, req_dict,
- &op_errstr, npeers);
+ ret = glusterd_mgmt_v3_commit (conf, op, dict, req_dict,
+ &op_errstr, npeers);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Commit Op Failed");
+ gf_log (this->name, GF_LOG_ERROR, "Commit Op Failed");
goto out;
}
/* POST-COMMIT VALIDATE PHASE */
- ret = glusterd_mgmt_v3_post_validate (conf, op, req_dict,
- &op_errstr, npeers);
+ /* As of now, post_validate is not handling any other
+ commands other than snapshot. So as of now, I am
+ sending 0 (op_ret as 0).
+ */
+ ret = glusterd_mgmt_v3_post_validate (conf, op, 0, dict, req_dict,
+ &op_errstr, npeers);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Post Validation Failed");
+ gf_log (this->name, GF_LOG_ERROR, "Post Validation Failed");
goto out;
}
ret = 0;
out:
+ op_ret = ret;
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr,
- npeers, is_acquired);
-
- /* SEND CLI RESPONSE */
- glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr);
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ op_ret, &op_errstr,
+ npeers, is_acquired);
/* LOCAL VOLUME(S) UNLOCK */
- if (!is_acquired)
- goto cleanup;
-
- ret = dict_get_str (tmp_dict, "volname", &volname);
- if (ret) {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks on localhost");
- } else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
- if (ret)
+ if (is_acquired) {
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
+ "Failed to release mgmt_v3 locks on localhost");
+ op_ret = ret;
+ }
}
-cleanup:
+ /* SEND CLI RESPONSE */
+ glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr);
+
if (req_dict)
dict_unref (req_dict);
@@ -1376,15 +1682,15 @@ cleanup:
int32_t
glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
- dict_t *dict)
+ dict_t *dict)
{
- int ret = -1;
- int npeers = 0;
+ int32_t ret = -1;
+ int32_t op_ret = -1;
+ int32_t npeers = 0;
dict_t *req_dict = NULL;
dict_t *tmp_dict = NULL;
glusterd_conf_t *conf = NULL;
char *op_errstr = NULL;
- char *volname = NULL;
xlator_t *this = NULL;
gf_boolean_t is_acquired = _gf_false;
uuid_t *originator_uuid = NULL;
@@ -1393,6 +1699,8 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
conf = this->private;
GF_ASSERT (conf);
@@ -1415,11 +1723,19 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
goto out;
}
+ /* Marking the operation as complete synctasked */
+ ret = dict_set_int32 (dict, "is_synctasked", _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set synctasked flag.");
+ goto out;
+ }
+
/* Use a copy at local unlock as cli response will be sent before
* the unlock and the volname in the dict might be removed */
tmp_dict = dict_new();
if (!tmp_dict) {
- gf_log ("", GF_LOG_ERROR, "Unable to create dict");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to create dict");
goto out;
}
dict_copy (dict, tmp_dict);
@@ -1428,12 +1744,11 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- /* LOCKDOWN PHASE - Based on the number of volumes either single
- * or multiple volume locks is acquired */
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
- npeers, &is_acquired);
+ npeers, &is_acquired);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
+ gf_log (this->name, GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
}
@@ -1451,7 +1766,7 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
ret = glusterd_mgmt_v3_pre_validate (conf, op, req_dict,
&op_errstr, npeers);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Pre Validation Failed");
+ gf_log (this->name, GF_LOG_ERROR, "Pre Validation Failed");
goto out;
}
@@ -1462,15 +1777,32 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
ret = glusterd_mgmt_v3_brick_op (conf, op, req_dict,
&op_errstr, npeers);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Brick Ops Failed");
+ gf_log (this->name, GF_LOG_ERROR, "Brick Ops Failed");
goto unbarrier;
}
/* COMMIT OP PHASE */
- ret = glusterd_mgmt_v3_commit (conf, op, req_dict,
- &op_errstr, npeers);
+ /* TODO: As of now, the plan is to do quorum check before sending the
+ commit fop and if the quorum succeeds, then commit is sent to all
+ the other glusterds.
+ snap create functionality now creates the in memory and on disk
+ objects for the snapshot (marking them as incomplete), takes the lvm
+ snapshot and then updates the status of the in memory and on disk
+ snap objects as complete. Suppose one of the glusterds goes down
+ after taking the lvm snapshot, but before updating the snap object,
+ then treat it as a snapshot create failure and trigger cleanup.
+ i.e the number of commit responses received by the originator
+ glusterd shold be the same as the number of peers it has sent the
+ request to (i.e npeers variable). If not, then originator glusterd
+ will initiate cleanup in post-validate fop.
+ Question: What if one of the other glusterds goes down as explained
+ above and along with it the originator glusterd also goes down?
+ Who will initiate the cleanup?
+ */
+ ret = glusterd_mgmt_v3_commit (conf, op, dict, req_dict,
+ &op_errstr, npeers);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Commit Op Failed");
+ gf_log (this->name, GF_LOG_ERROR, "Commit Op Failed");
/* If the main op fails, we should save the error string.
Because, op_errstr will be used for unbarrier and
unlock ops also. We might lose the actual error that
@@ -1488,26 +1820,33 @@ unbarrier:
if (ret)
goto out;
ret = glusterd_mgmt_v3_brick_op (conf, op, req_dict,
- &op_errstr, npeers);
- if (ret || (success == _gf_false)) {
- gf_log ("", GF_LOG_ERROR, "Brick Ops Failed");
- ret = -1;
- goto out;
- }
- /* POST-COMMIT VALIDATE PHASE */
- ret = glusterd_mgmt_v3_post_validate (conf, op, req_dict,
- &op_errstr, npeers);
+ &op_errstr, npeers);
+
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Post Validation Failed");
+ gf_log (this->name, GF_LOG_ERROR, "Brick Ops Failed");
goto out;
}
ret = 0;
+
out:
+ op_ret = ret;
+
+ if (success == _gf_false)
+ op_ret = -1;
+
+ /* POST-COMMIT VALIDATE PHASE */
+ ret = glusterd_mgmt_v3_post_validate (conf, op, op_ret, dict, req_dict,
+ &op_errstr, npeers);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Post Validation Failed");
+ op_ret = -1;
+ }
/* UNLOCK PHASE FOR PEERS*/
- (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict, &op_errstr,
- npeers, is_acquired);
+ (void) glusterd_mgmt_v3_release_peer_locks (conf, op, dict,
+ op_ret, &op_errstr,
+ npeers, is_acquired);
/* If the commit op (snapshot taking) failed, then the error is stored
in tmp_errstr and unbarrier is called. Suppose, if unbarrier also
@@ -1516,7 +1855,7 @@ out:
is sent to cli.
*/
if (tmp_errstr) {
- if (ret && op_errstr) {
+ if (op_errstr) {
gf_log (this->name, GF_LOG_ERROR, "unbarrier brick op"
"failed with the error %s", op_errstr);
GF_FREE (op_errstr);
@@ -1525,28 +1864,20 @@ out:
op_errstr = tmp_errstr;
}
- /* SEND CLI RESPONSE */
- glusterd_op_send_cli_response (op, ret, 0, req, dict, op_errstr);
-
/* LOCAL VOLUME(S) UNLOCK */
- if (!is_acquired)
- goto cleanup;
-
- ret = dict_get_str (tmp_dict, "volname", &volname);
- if (ret) {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
- if (ret)
- gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks on localhost");
- } else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
- if (ret)
+ if (is_acquired) {
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
+ if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "Unable to release local lock for %s", volname);
+ "Failed to release mgmt_v3 locks on localhost");
+ op_ret = ret;
+ }
}
-cleanup:
+ /* SEND CLI RESPONSE */
+ glusterd_op_send_cli_response (op, op_ret, 0, req, dict, op_errstr);
+
if (req_dict)
dict_unref (req_dict);
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.h b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
index 8c085d18c..b185a9bec 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.h
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.h
@@ -7,8 +7,8 @@
later), or the GNU General Public License, version 2 (GPLv2), in all
cases as published by the Free Software Foundation.
*/
-#ifndef _GLUSTERD_JARVIS_H_
-#define _GLUSTERD_JARVIS_H_
+#ifndef _GLUSTERD_MGMT_H_
+#define _GLUSTERD_MGMT_H_
#ifndef _CONFIG_H
#define _CONFIG_H
@@ -28,7 +28,7 @@ gd_mgmt_v3_commit_fn (glusterd_op_t op, dict_t *dict,
char **op_errstr, dict_t *rsp_dict);
int32_t
-gd_mgmt_v3_post_validate_fn (glusterd_op_t op, dict_t *dict,
+gd_mgmt_v3_post_validate_fn (glusterd_op_t op, int32_t op_ret, dict_t *dict,
char **op_errstr, dict_t *rsp_dict);
int32_t
@@ -38,4 +38,8 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
int32_t
glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *dict);
-#endif
+
+int
+glusterd_snap_pre_validate_use_rsp_dict (dict_t *dst, dict_t *src);
+
+#endif /* _GLUSTERD_MGMT_H_ */
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 0162fd23e..1666f5e4d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -81,7 +81,7 @@ struct txn_opinfo_object_ {
typedef struct txn_opinfo_object_ txn_opinfo_obj;
int32_t
-glusterd_txn_opinfo_init ()
+glusterd_txn_opinfo_dict_init ()
{
int32_t ret = -1;
@@ -97,12 +97,35 @@ out:
}
void
-glusterd_txn_opinfo_fini ()
+glusterd_txn_opinfo_dict_fini ()
{
if (txn_opinfo)
dict_destroy (txn_opinfo);
}
+void
+glusterd_txn_opinfo_init (glusterd_op_info_t *opinfo,
+ glusterd_op_sm_state_info_t *state,
+ glusterd_op_t *op,
+ dict_t *op_ctx,
+ rpcsvc_request_t *req)
+{
+ GF_ASSERT (opinfo);
+
+ if (state)
+ opinfo->state = *state;
+
+ if (op)
+ opinfo->op = *op;
+
+ opinfo->op_ctx = dict_ref(op_ctx);
+
+ if (req)
+ opinfo->req = req;
+
+ return;
+}
+
int32_t
glusterd_get_txn_opinfo (uuid_t *txn_id, glusterd_op_info_t *opinfo)
{
@@ -179,7 +202,8 @@ out:
int32_t
glusterd_clear_txn_opinfo (uuid_t *txn_id)
{
- int32_t ret = -1;
+ int32_t ret = -1;
+ glusterd_op_info_t txn_op_info = {{0},};
if (!txn_id) {
gf_log ("", GF_LOG_ERROR, "Empty transaction id received.");
@@ -187,6 +211,14 @@ glusterd_clear_txn_opinfo (uuid_t *txn_id)
goto out;
}
+ ret = glusterd_get_txn_opinfo (txn_id, &txn_op_info);
+ if (ret) {
+ gf_log ("", GF_LOG_ERROR, "Transaction opinfo not found");
+ goto out;
+ }
+
+ dict_unref (txn_op_info.op_ctx);
+
dict_del(txn_opinfo, uuid_utoa (*txn_id));
ret = 0;
@@ -1510,14 +1542,25 @@ glusterd_stop_bricks (glusterd_volinfo_t *volinfo)
int
glusterd_start_bricks (glusterd_volinfo_t *volinfo)
{
- glusterd_brickinfo_t *brickinfo = NULL;
+ int ret = -1;
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ GF_ASSERT (volinfo);
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (glusterd_brick_start (volinfo, brickinfo, _gf_false))
- return -1;
+ ret = glusterd_brick_start (volinfo, brickinfo, _gf_false);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Failed to start %s:%s for %s",
+ brickinfo->hostname, brickinfo->path,
+ volinfo->volname);
+ goto out;
+ }
}
- return 0;
+ ret = 0;
+out:
+ return ret;
}
static int
@@ -2515,7 +2558,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- /* Based on the op_version, acquire a cluster or volume lock */
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_LOCK];
if (proc->fn) {
@@ -2535,7 +2578,8 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
dict = glusterd_op_get_ctx ();
dict_ref (dict);
- proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_VOLUME_LOCK];
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_LOCK];
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
@@ -2549,7 +2593,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
- "Failed to send volume lock "
+ "Failed to send mgmt_v3 lock "
"request for operation "
"'Volume %s' to peer %s",
gd_op_list[opinfo.op],
@@ -2595,7 +2639,8 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
(glusterd_op_get_op() != GD_OP_SYNC_VOLUME))
continue;
- /* Based on the op_version, release the cluster or volume lock */
+ /* Based on the op_version, release the *
+ * cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
proc = &peerinfo->mgmt->proctable[GLUSTERD_MGMT_CLUSTER_UNLOCK];
if (proc->fn) {
@@ -2615,7 +2660,8 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
dict = glusterd_op_get_ctx ();
dict_ref (dict);
- proc = &peerinfo->mgmt_v3->proctable[GLUSTERD_MGMT_V3_VOLUME_UNLOCK];
+ proc = &peerinfo->mgmt_v3->proctable
+ [GLUSTERD_MGMT_V3_UNLOCK];
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
@@ -2629,7 +2675,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
gf_log (this->name, GF_LOG_WARNING,
- "Failed to send volume unlock "
+ "Failed to send mgmt_v3 unlock "
"request for operation "
"'Volume %s' to peer %s",
gd_op_list[opinfo.op],
@@ -2693,7 +2739,7 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
/* If the req came from a node running on older op_version
* the dict won't be present. Based on it acquiring a cluster
- * or volume lock */
+ * or mgmt_v3 lock */
if (lock_ctx->dict == NULL) {
ret = glusterd_lock (lock_ctx->uuid);
glusterd_op_lock_send_resp (lock_ctx->req, ret);
@@ -2703,15 +2749,16 @@ glusterd_op_ac_lock (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire volname");
else {
- ret = glusterd_volume_lock (volname, lock_ctx->uuid);
+ ret = glusterd_mgmt_v3_lock (volname, lock_ctx->uuid,
+ "vol");
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire lock for %s",
volname);
}
- glusterd_op_volume_lock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
+ glusterd_op_mgmt_v3_lock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
dict_unref (lock_ctx->dict);
}
@@ -2740,7 +2787,7 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
/* If the req came from a node running on older op_version
* the dict won't be present. Based on it releasing the cluster
- * or volume lock */
+ * or mgmt_v3 lock */
if (lock_ctx->dict == NULL) {
ret = glusterd_unlock (lock_ctx->uuid);
glusterd_op_unlock_send_resp (lock_ctx->req, ret);
@@ -2750,14 +2797,15 @@ glusterd_op_ac_unlock (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire volname");
else {
- ret = glusterd_volume_unlock (volname, lock_ctx->uuid);
+ ret = glusterd_mgmt_v3_unlock (volname, lock_ctx->uuid,
+ "vol");
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s", volname);
}
- glusterd_op_volume_unlock_send_resp (lock_ctx->req,
- &event->txn_id, ret);
+ glusterd_op_mgmt_v3_unlock_send_resp (lock_ctx->req,
+ &event->txn_id, ret);
dict_unref (lock_ctx->dict);
}
@@ -3288,7 +3336,7 @@ glusterd_op_start_rb_timer (dict_t *dict, uuid_t *txn_id)
}
ret = dict_set_bin (rb_ctx, "transaction_id",
- txn_id, sizeof (uuid_t));
+ txn_id, sizeof(*txn_id));
if (ret) {
gf_log ("", GF_LOG_ERROR,
"Failed to set transaction id.");
@@ -4020,7 +4068,7 @@ glusterd_op_txn_complete (uuid_t *txn_id)
glusterd_op_reset_ctx ();
glusterd_op_clear_errstr ();
- /* Based on the op-version, we release the cluster or volume lock */
+ /* Based on the op-version, we release the cluster or mgmt_v3 lock */
if (priv->op_version < 3) {
ret = glusterd_unlock (MY_UUID);
/* unlock cant/shouldnt fail here!! */
@@ -4036,7 +4084,8 @@ glusterd_op_txn_complete (uuid_t *txn_id)
"Unable to acquire volname");
if (volname) {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
+ "vol");
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s",
@@ -4129,7 +4178,7 @@ glusterd_op_ac_stage_op (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR, "Out of Memory");
ret = dict_set_bin (rsp_dict, "transaction_id",
- txn_id, sizeof(uuid_t *));
+ txn_id, sizeof(*txn_id));
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Failed to set transaction id.");
@@ -4236,7 +4285,7 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
gf_log (this->name, GF_LOG_ERROR, "Out of Memory");
ret = dict_set_bin (rsp_dict, "transaction_id",
- txn_id, sizeof(uuid_t));
+ txn_id, sizeof(*txn_id));
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Failed to set transaction id.");
@@ -4244,7 +4293,6 @@ glusterd_op_ac_commit_op (glusterd_op_sm_event_t *event, void *ctx)
ret = glusterd_op_commit_send_resp (req_ctx->req, req_ctx->op,
status, op_errstr, rsp_dict);
- glusterd_op_fini_ctx ();
if (op_errstr && (strcmp (op_errstr, "")))
GF_FREE (op_errstr);
@@ -4273,7 +4321,6 @@ glusterd_op_ac_send_commit_failed (glusterd_op_sm_event_t *event, void *ctx)
opinfo.op_ret, opinfo.op_errstr,
op_ctx);
- glusterd_op_fini_ctx ();
if (opinfo.op_errstr && (strcmp (opinfo.op_errstr, ""))) {
GF_FREE (opinfo.op_errstr);
opinfo.op_errstr = NULL;
@@ -6141,51 +6188,6 @@ glusterd_op_clear_op (glusterd_op_t op)
}
int32_t
-glusterd_op_init_ctx (glusterd_op_t op, glusterd_op_info_t *op_info)
-{
- int ret = 0;
- dict_t *dict = NULL;
- xlator_t *this = NULL;
-
- this = THIS;
- GF_ASSERT (this);
- GF_ASSERT (GD_OP_NONE < op && op < GD_OP_MAX);
-
- if (_gf_false == glusterd_need_brick_op (op)) {
- gf_log (this->name, GF_LOG_DEBUG, "Received op: %s, returning",
- gd_op_list[op]);
- goto out;
- }
- dict = dict_new ();
- if (dict == NULL) {
- ret = -1;
- goto out;
- }
-
- op_info->op_ctx = dict;
-out:
- gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-
-
-
-int32_t
-glusterd_op_fini_ctx ()
-{
- dict_t *dict = NULL;
-
- dict = glusterd_op_get_ctx ();
- if (dict)
- dict_unref (dict);
-
- glusterd_op_reset_ctx ();
- return 0;
-}
-
-
-
-int32_t
glusterd_op_free_ctx (glusterd_op_t op, void *ctx)
{
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.h b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
index 09da6b9f7..cf57b78e0 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.h
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.h
@@ -261,10 +261,7 @@ glusterd_op_init_commit_rsp_dict (glusterd_op_t op);
void
glusterd_op_modify_op_ctx (glusterd_op_t op, void *op_ctx);
-int32_t
-glusterd_op_init_ctx (glusterd_op_t op, glusterd_op_info_t *op_info);
-int32_t
-glusterd_op_fini_ctx ();
+
int32_t
glusterd_volume_stats_read_perf (char *brick_path, int32_t blk_size,
int32_t blk_count, double *throughput, double *time);
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 821da33be..d5200a4ae 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -647,10 +647,10 @@ glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
}
static int32_t
-glusterd_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
int ret = -1;
int32_t op_ret = -1;
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
@@ -669,10 +669,10 @@ glusterd_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR,
- "Failed to decode volume lock "
+ "Failed to decode mgmt_v3 lock "
"response received from peer");
rsp.op_ret = -1;
rsp.op_errno = EINVAL;
@@ -685,13 +685,13 @@ out:
txn_id = &rsp.txn_id;
gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
- "Received volume lock %s from uuid: %s",
+ "Received mgmt_v3 lock %s from uuid: %s",
(op_ret) ? "RJT" : "ACC", uuid_utoa (rsp.uuid));
ret = glusterd_friend_find (rsp.uuid, NULL, &peerinfo);
if (ret) {
gf_log (this->name, GF_LOG_CRITICAL,
- "Volume lock response received "
+ "mgmt_v3 lock response received "
"from unknown peer: %s", uuid_utoa (rsp.uuid));
}
@@ -717,18 +717,18 @@ out:
}
int32_t
-glusterd_vol_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_lock_peers_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- glusterd_vol_lock_cbk_fn);
+ glusterd_mgmt_v3_lock_peers_cbk_fn);
}
static int32_t
-glusterd_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
int ret = -1;
int32_t op_ret = -1;
glusterd_op_sm_event_type_t event_type = GD_OP_EVENT_NONE;
@@ -747,10 +747,10 @@ glusterd_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0) {
gf_log (this->name, GF_LOG_ERROR,
- "Failed to decode volume unlock "
+ "Failed to decode mgmt_v3 unlock "
"response received from peer");
rsp.op_ret = -1;
rsp.op_errno = EINVAL;
@@ -763,7 +763,7 @@ out:
txn_id = &rsp.txn_id;
gf_log (this->name, (op_ret) ? GF_LOG_ERROR : GF_LOG_DEBUG,
- "Received volume unlock %s from uuid: %s",
+ "Received mgmt_v3 unlock %s from uuid: %s",
(op_ret) ? "RJT" : "ACC",
uuid_utoa (rsp.uuid));
@@ -771,7 +771,7 @@ out:
if (ret) {
gf_log (this->name, GF_LOG_CRITICAL,
- "Volume unlock response received "
+ "mgmt_v3 unlock response received "
"from unknown peer: %s", uuid_utoa (rsp.uuid));
}
@@ -797,11 +797,11 @@ out:
}
int32_t
-glusterd_vol_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+glusterd_mgmt_v3_unlock_peers_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- glusterd_vol_unlock_cbk_fn);
+ glusterd_mgmt_v3_unlock_peers_cbk_fn);
}
int32_t
@@ -1395,15 +1395,16 @@ out:
}
int32_t
-glusterd_vol_lock (call_frame_t *frame, xlator_t *this,
- void *data)
+glusterd_mgmt_v3_lock_peers (call_frame_t *frame, xlator_t *this,
+ void *data)
{
- gd1_mgmt_v3_vol_lock_req req = {{0},};
+ gd1_mgmt_v3_lock_req req = {{0},};
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
call_frame_t *dummy_frame = NULL;
dict_t *dict = NULL;
+ uuid_t *txn_id = NULL;
if (!this)
goto out;
@@ -1430,30 +1431,44 @@ glusterd_vol_lock (call_frame_t *frame, xlator_t *this,
goto out;
}
+ /* Sending valid transaction ID to peers */
+ ret = dict_get_bin (dict, "transaction_id",
+ (void **)&txn_id);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get transaction id.");
+ goto out;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+ uuid_copy (req.txn_id, *txn_id);
+ }
+
dummy_frame = create_frame (this, this->ctx->pool);
if (!dummy_frame)
goto out;
ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
peerinfo->mgmt_v3,
- GLUSTERD_MGMT_V3_VOLUME_LOCK, NULL,
- this, glusterd_vol_lock_cbk,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_req);
+ GLUSTERD_MGMT_V3_LOCK, NULL,
+ this, glusterd_mgmt_v3_lock_peers_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_req);
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int32_t
-glusterd_vol_unlock (call_frame_t *frame, xlator_t *this,
- void *data)
+glusterd_mgmt_v3_unlock_peers (call_frame_t *frame, xlator_t *this,
+ void *data)
{
- gd1_mgmt_v3_vol_unlock_req req = {{0},};
+ gd1_mgmt_v3_unlock_req req = {{0},};
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
glusterd_conf_t *priv = NULL;
call_frame_t *dummy_frame = NULL;
dict_t *dict = NULL;
+ uuid_t *txn_id = NULL;
if (!this)
goto out;
@@ -1480,15 +1495,28 @@ glusterd_vol_unlock (call_frame_t *frame, xlator_t *this,
goto out;
}
+ /* Sending valid transaction ID to peers */
+ ret = dict_get_bin (dict, "transaction_id",
+ (void **)&txn_id);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get transaction id.");
+ goto out;
+ } else {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Transaction_id = %s", uuid_utoa (*txn_id));
+ uuid_copy (req.txn_id, *txn_id);
+ }
+
dummy_frame = create_frame (this, this->ctx->pool);
if (!dummy_frame)
goto out;
ret = glusterd_submit_request (peerinfo->rpc, &req, dummy_frame,
peerinfo->mgmt_v3,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK, NULL,
- this, glusterd_vol_unlock_cbk,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_req);
+ GLUSTERD_MGMT_V3_UNLOCK, NULL,
+ this, glusterd_mgmt_v3_unlock_peers_cbk,
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_req);
out:
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
@@ -1909,8 +1937,8 @@ struct rpc_clnt_procedure gd_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
struct rpc_clnt_procedure gd_mgmt_v3_actors[GLUSTERD_MGMT_V3_MAXVALUE] = {
[GLUSTERD_MGMT_V3_NULL] = {"NULL", NULL },
- [GLUSTERD_MGMT_V3_VOLUME_LOCK] = {"VOLUME_LOCK", glusterd_vol_lock},
- [GLUSTERD_MGMT_V3_VOLUME_UNLOCK] = {"VOLUME_UNLOCK", glusterd_vol_unlock},
+ [GLUSTERD_MGMT_V3_LOCK] = {"MGMT_V3_LOCK", glusterd_mgmt_v3_lock_peers},
+ [GLUSTERD_MGMT_V3_UNLOCK] = {"MGMT_V3_UNLOCK", glusterd_mgmt_v3_unlock_peers},
};
struct rpc_clnt_program gd_mgmt_prog = {
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 0d123732e..9b811cd05 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2013 Red Hat, Inc. <http://www.redhat.com>
+ Copyright (c) 2013-2014 Red Hat, Inc. <http://www.redhat.com>
This file is part of GlusterFS.
This file is licensed to you under your choice of the GNU Lesser
@@ -34,6 +34,7 @@
#include "run.h"
#include "glusterd-volgen.h"
#include "glusterd-mgmt.h"
+#include "glusterd-syncop.h"
#include "syscall.h"
#include "cli1-xdr.h"
@@ -43,192 +44,201 @@
#include <mntent.h>
#endif
+char snap_mount_folder[PATH_MAX];
+static int32_t
+glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
+ char *snap_uuid, struct list_head *peers,
+ int32_t op);
-/* This function will do the actual snapshot restore on the brick.
+/* This function will restore a snapshot volumes
*
- * @param brickinfo brickinfo structure
- * @param snapname name of the snap which will be restored
- *
- * @return Negative value on Failure and 0 in success
- */
-int
-glusterd_snapshot_restore_brick_snap (glusterd_brickinfo_t *brickinfo,
- char *snapname)
-{
- int ret = -1;
- char *device = NULL;
- xlator_t *this = NULL;
- runner_t runner = {0,};
- glusterd_conf_t *conf = NULL;
- char msg[PATH_MAX] = {0,};
-
- this = THIS;
- GF_ASSERT (this);
- conf = this->private;
-
- GF_ASSERT (conf);
- GF_ASSERT (brickinfo);
- GF_ASSERT (snapname);
-
- /* Using the brickinfo get the actual device name */
- device = glusterd_get_brick_mount_details (brickinfo);
-
- runinit (&runner);
- snprintf (msg, sizeof (msg), "Restoring snapshot of the brick %s:%s "
- "to %s snap", brickinfo->hostname, brickinfo->path, snapname);
-
- /* Command for restoring the snapshot */
- runner_add_args (&runner, "/sbin/lvconvert", "--merge", device, NULL);
- runner_log (&runner, "", GF_LOG_DEBUG, msg);
-
- synclock_unlock (&conf->big_lock);
- /* Run the actual command */
- ret = runner_run (&runner);
- synclock_lock (&conf->big_lock);
-
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "snapshot restore of the "
- "brick (%s:%s) of device %s failed",
- brickinfo->hostname, brickinfo->path, device);
- goto out;
- }
-
-out:
- return ret;
-}
-
-/* This function will restore the snapshot for the entire volume.
- *
- * @param snap snap object which needs to be restored
+ * @param dict dictionary containing snapshot restore request
* @param op_errstr In case of any failure error message will be returned
* in this variable
* @return Negative value on Failure and 0 in success
*/
int
-glusterd_snapshot_restore_snap (glusterd_snap_t *snap, char **op_errstr)
+glusterd_snapshot_restore (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
int ret = -1;
+ char *volname = NULL;
+ char *snapname = NULL;
xlator_t *this = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
GF_ASSERT (this);
- GF_ASSERT (snap);
- GF_ASSERT (snap->snap_volume);
+ GF_ASSERT (dict);
GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
- /* For restore always take volinfo stored in snap. Do not use
- * volinfo of the original volume*/
- volinfo = snap->snap_volume;
-
- list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- /* This code is executed on each node of the volume. We need
- * to run the restore only on those bricks which are present
- * in this node. Therefore check if node belongs to this node
- * or not.
- */
- if (uuid_compare (brickinfo->uuid, MY_UUID)) {
- continue; /* Bricks not present in this node */
- }
+ priv = this->private;
+ GF_ASSERT (priv);
- /* This case should never occur as volume is already stopped.
- * Just to avoid a case where the brick is explicitly started
- * by the user we have this check here.
- */
- if (glusterd_is_brick_started (brickinfo)) {
- ret = gf_asprintf (op_errstr, "Brick (%s: %s) is "
- "running therefore snapshot cannot "
- "be restored", brickinfo->hostname,
- brickinfo->path);
- if (ret < 0) {
- goto out;
- }
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snap name");
+ goto out;
+ }
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
- ret = -1;
+ snap = glusterd_find_snap_by_name (snapname);
+ if (NULL == snap) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) not found",
+ snapname);
+ if (ret < 0) {
goto out;
}
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
- /* Do the actual snapshot restore */
- ret = glusterd_snapshot_restore_brick_snap (brickinfo,
- snap->snap_name);
- if (ret) {
- ret = gf_asprintf (op_errstr, "Snapshot restore failed"
- " for %s:%s", brickinfo->hostname,
- brickinfo->path);
- if (ret < 0) {
- goto out;
- }
+ /* TODO : As of now there is only volume in snapshot.
+ * Change this when multiple volume snapshot is introduced
+ */
+ snap_volinfo = list_entry (snap->volumes.next, glusterd_volinfo_t,
+ vol_list);
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
- ret = -1;
+ ret = glusterd_volinfo_find (snap_volinfo->parent_volname, &volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not get volinfo of "
+ "%s", snap_volinfo->parent_volname);
+ goto out;
+ }
+
+ if (is_origin_glusterd (dict) == _gf_true) {
+ /* From origin glusterd check if *
+ * any peers with snap bricks is down */
+ ret = glusterd_find_missed_snap (rsp_dict, snap_volinfo,
+ snap_volinfo->volname,
+ &priv->peers,
+ GF_SNAP_OPTION_TYPE_RESTORE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to find missed snap restores");
goto out;
}
+ }
+ ret = gd_restore_snap_volume (rsp_dict, volinfo, snap_volinfo);
+ if (ret) {
+ /* No need to update op_errstr because it is assumed
+ * that the called function will do that in case of
+ * failure.
+ */
+ gf_log (this->name, GF_LOG_ERROR, "Failed to restore "
+ "snap for %s volume", volname);
+ goto out;
}
- /* TODO: Move this code to postvalidate */
- snap->snap_restored = _gf_true;
- /* TODO: persist the change in store */
+ ret = 0;
+ /* TODO: Need to check if we need to delete the snap after the
+ * operation is successful or not. Also need to persist the state
+ * of restore operation in the store.
+ */
out:
return ret;
}
-/* This function will restore a snapshot for the entire
- * volume or the entire CG (Consistency Group)
+/* This function is called before actual restore is taken place. This function
+ * will validate whether the snapshot volumes are ready to be restored or not.
*
* @param dict dictionary containing snapshot restore request
* @param op_errstr In case of any failure error message will be returned
* in this variable
+ * @param rsp_dict response dictionary
* @return Negative value on Failure and 0 in success
*/
int
-glusterd_snapshot_restore (dict_t *dict, char **op_errstr)
+glusterd_snapshot_restore_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
{
int ret = -1;
- int64_t i = 0;
- int64_t volcount = 0;
+ int32_t i = 0;
+ int32_t volcount = 0;
+ gf_boolean_t snap_restored = _gf_false;
+ char key[PATH_MAX] = {0, };
char *volname = NULL;
char *snapname = NULL;
- xlator_t *this = NULL;
glusterd_volinfo_t *volinfo = NULL;
glusterd_snap_t *snap = NULL;
- char key[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
- ret = dict_get_int64 (dict, "volcount", &volcount);
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snap name");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (NULL == snap) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) not found",
+ snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+
+ snap_restored = snap->snap_restored;
+
+ if (snap_restored) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) is already "
+ "restored", snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_str (rsp_dict, "snapname", snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "snap name");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "volcount", &volcount);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to get volume count");
goto out;
}
- /* If we are performing snapshot restore of a CG then volcount will be
- * greater than 1 else volcount will be 1.
- */
- for (i = 0; i < volcount; ++i) {
- /* TODO: Start the index from 0 when Jarvis code is fixed */
- snprintf (key, sizeof (key), "volname%ld", i+1);
+ /* Snapshot restore will only work if all the volumes,
+ that are part of the snapshot, are stopped. */
+ for (i = 1; i <= volcount; ++i) {
+ snprintf (key, sizeof (key), "volname%d", i);
ret = dict_get_str (dict, key, &volname);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "get volume name");
+ "get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
ret = gf_asprintf (op_errstr, "Volume (%s) not found",
- volname);
+ volname);
if (ret < 0) {
goto out;
}
@@ -237,17 +247,10 @@ glusterd_snapshot_restore (dict_t *dict, char **op_errstr)
goto out;
}
- ret = dict_get_str (dict, "snapname", &snapname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snap name");
- goto out;
- }
-
- snap = glusterd_find_snap_by_name (volinfo, snapname);
- if (NULL == snap) {
- ret = gf_asprintf (op_errstr, "Snap (%s) not found",
- snapname);
+ if (glusterd_is_volume_started (volinfo)) {
+ ret = gf_asprintf (op_errstr, "Volume (%s) has been "
+ "started. Volume needs to be stopped before restoring "
+ "a snapshot.", volname);
if (ret < 0) {
goto out;
}
@@ -255,52 +258,23 @@ glusterd_snapshot_restore (dict_t *dict, char **op_errstr)
ret = -1;
goto out;
}
-
- /* Restore the snap for the entire volume */
- ret = glusterd_snapshot_restore_snap (snap, op_errstr);
- if (ret) {
- /* No need to update op_errstr because it is assumed
- * that the called function will do that in case of
- * failure.
- */
- gf_log (this->name, GF_LOG_ERROR, "Failed to restore "
- "snap for %s volume", volname);
- goto out;
- }
}
- ret = 0; /* Success */
-
- /* TODO: Need to check if we need to delete the snap after the
- * operation is successful or not. Also need to persist the state
- * of restore operation in the store.
- */
+ ret = 0;
out:
return ret;
}
-/* This function is called before actual restore is taken place. This
- * function will validate whether the volume or CG is ready to be restored
- * or not.
- *
- * @param dict dictionary containing snapshot restore request
- * @param op_errstr In case of any failure error message will be returned
- * in this variable
- * @return Negative value on Failure and 0 in success
- */
int
-glusterd_snapshot_restore_prevalidate (dict_t *dict, char **op_errstr)
+snap_max_hard_limits_validate (dict_t *dict, char *volname,
+ uint64_t value, char **op_errstr)
{
- int ret = -1;
- int64_t i = 0;
- int64_t volcount = 0;
- gf_boolean_t snap_restored = _gf_false;
- char *volname = NULL;
- char *snapname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_snap_t *snap = NULL;
- xlator_t *this = NULL;
- char key[PATH_MAX] = {0, };
+ char err_str[PATH_MAX] = "";
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ uint64_t max_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
+ xlator_t *this = NULL;
this = THIS;
@@ -308,331 +282,556 @@ glusterd_snapshot_restore_prevalidate (dict_t *dict, char **op_errstr)
GF_ASSERT (dict);
GF_ASSERT (op_errstr);
- ret = dict_get_int64 (dict, "volcount", &volcount);
-
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get volume count");
- goto out;
- }
+ conf = this->private;
- /* Snapshot restore will only work if the volume is stopped.
- * If volume is running then snapshot restore will fail. In
- * case of CG if any of the volume in the CG is running then
- * snapshot restore for the entire CG will fail
- */
- for (i = 0; i < volcount; ++i) {
- /* TODO: Start the index from 0 when Jarvis code is fixed */
- snprintf (key, sizeof (key), "volname%ld", i+1);
- ret = dict_get_str (dict, key, &volname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "get volume name");
- goto out;
- }
+ GF_ASSERT (conf);
+ if (volname) {
ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- ret = gf_asprintf (op_errstr, "Volume (%s) not found",
- volname);
- if (ret < 0) {
- goto out;
- }
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
- ret = -1;
- goto out;
- }
-
- if (glusterd_is_volume_started (volinfo)) {
- ret = gf_asprintf (op_errstr, "Volume (%s) is running",
- volname);
- if (ret < 0) {
+ if (!ret) {
+ if (volinfo->is_snap_volume) {
+ ret = -1;
+ snprintf (err_str, PATH_MAX,
+ "%s is a snap volume. Configuring "
+ "snap-max-hard-limit for a snap "
+ "volume is prohibited.", volname);
goto out;
}
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
- ret = -1;
- goto out;
- }
-
- ret = dict_get_str (dict, "snapname", &snapname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snap name");
- goto out;
}
+ }
- snap = glusterd_find_snap_by_name (volinfo, snapname);
- if (NULL == snap) {
- ret = gf_asprintf (op_errstr, "Snap (%s) not found",
- snapname);
- if (ret < 0) {
- goto out;
- }
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
- ret = -1;
- goto out;
+ if (value) {
+ /* Max limit for the system is GLUSTERD_SNAPS_MAX_HARD_LIMIT
+ * but max limit for a volume is conf->snap_max_hard_limit.
+ */
+ if (volname) {
+ max_limit = conf->snap_max_hard_limit;
+ } else {
+ max_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
}
+ }
- snap_restored = snap->snap_restored;
-
- if (snap_restored) {
- ret = gf_asprintf (op_errstr, "Snap (%s) already "
- "restored", snapname);
- if (ret < 0) {
- goto out;
- }
- gf_log (this->name, GF_LOG_ERROR, "%s", *op_errstr);
- ret = -1;
- goto out;
- }
+ if ((value < 0) || (value > max_limit)) {
+ ret = -1;
+ snprintf (err_str, PATH_MAX, "Invalid snap-max-hard-limit"
+ "%"PRIu64 ". Expected range 0 - %"PRIu64,
+ value, max_limit);
+ goto out;
}
ret = 0;
out:
+ if (ret) {
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ }
return ret;
}
-
int
-glusterd_snapshot_config_limit_prevalidate (dict_t *dict, char **op_errstr,
- int config_command)
+glusterd_snapshot_config_prevalidate (dict_t *dict, char **op_errstr)
{
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- uint64_t limit = 0;
- xlator_t *this = NULL;
- int ret = -1;
- char err_str[PATH_MAX] = {0,};
- glusterd_conf_t *conf = NULL;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ int ret = -1;
+ int config_command = 0;
+ char err_str[PATH_MAX] = {0,};
+ glusterd_conf_t *conf = NULL;
+ uint64_t value = 0;
+ uint64_t hard_limit = 0;
+ uint64_t soft_limit = 0;
+ gf_loglevel_t loglevel = GF_LOG_ERROR;
+ uint64_t max_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
conf = this->private;
GF_ASSERT (conf);
- switch (config_command) {
+ ret = dict_get_int32 (dict, "config-command", &config_command);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str),
+ "failed to get config-command type");
+ goto out;
+ }
- case GF_SNAP_CONFIG_SYS_MAX:
- ret = dict_get_uint64 (dict, "limit", &limit);
- if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " snapshot limit");
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
- }
- if (limit < 0 || limit > GLUSTERD_SNAPS_MAX_LIMIT) {
- ret = -1;
- snprintf (err_str, PATH_MAX,"Invalid max snap limit "
- "%"PRIu64 ". Expected range 0 - %"PRIu64,
- limit, conf->snap_max_limit);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
- }
- break;
+ ret = dict_get_uint64 (dict, "snap-max-hard-limit", &hard_limit);
- case GF_SNAP_CONFIG_VOL_MAX:
- // volume wide limit
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volume name");
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
- }
+ ret = dict_get_uint64 (dict, "snap-max-soft-limit", &soft_limit);
+
+ ret = dict_get_str (dict, "volname", &volname);
+
+ if (volname) {
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volinfo for volume %s", volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ snprintf (err_str, sizeof (err_str),
+ "Volume %s does not exist.", volname);
goto out;
}
- ret = dict_get_uint64 (dict, "limit", &limit);
- if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " snapshot limit volinfo for volume %s",
- volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
+ }
+
+ switch (config_command) {
+ case GF_SNAP_CONFIG_TYPE_SET:
+ if (hard_limit) {
+ /* Validations for snap-max-hard-limits */
+ ret = snap_max_hard_limits_validate (dict, volname,
+ hard_limit, op_errstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "snap-max-hard-limit validation "
+ "failed.");
+ goto out;
+ }
}
- if (limit < 0 || limit > conf->snap_max_limit) {
- ret = -1;
- snprintf (err_str, PATH_MAX,"Invalid max snap limit "
- "%"PRIu64 " for volume %s. Expected range"
- " 0 - %"PRIu64, limit, volname,
- conf->snap_max_limit);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
+
+ if (soft_limit) {
+ max_limit = GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT;
+ if ((soft_limit < 0) || (soft_limit > max_limit)) {
+ ret = -1;
+ snprintf (err_str, PATH_MAX, "Invalid "
+ "snap-max-soft-limit ""%"
+ PRIu64 ". Expected range 0 - %"PRIu64,
+ value, max_limit);
+ goto out;
+ }
+ break;
}
+ default:
break;
+ }
- case GF_SNAP_CONFIG_CG_MAX:
- break;
+ ret = 0;
+out:
- case GF_SNAP_CONFIG_DISPLAY:
- ret = dict_get_str (dict, "volname", &volname);
+ if (ret && err_str[0] != '\0') {
+ gf_log (this->name, loglevel, "%s", err_str);
+ *op_errstr = gf_strdup (err_str);
+ }
+
+ return ret;
+}
+
+int
+glusterd_snap_create_pre_val_use_rsp_dict (dict_t *dst, dict_t *src)
+{
+ char *snap_brick_dir = NULL;
+ char *snap_device = NULL;
+ char *tmpstr = NULL;
+ char key[PATH_MAX] = "";
+ char snapbrckcnt[PATH_MAX] = "";
+ char snapbrckord[PATH_MAX] = "";
+ int ret = -1;
+ int64_t i = -1;
+ int64_t j = -1;
+ int64_t volume_count = 0;
+ int64_t brick_count = 0;
+ int64_t brick_order = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dst);
+ GF_ASSERT (src);
+
+ ret = dict_get_int64 (src, "volcount", &volume_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "get the volume count");
+ goto out;
+ }
+
+ for (i = 0; i < volume_count; i++) {
+ memset (snapbrckcnt, '\0', sizeof(snapbrckcnt));
+ ret = snprintf (snapbrckcnt, sizeof(snapbrckcnt) - 1,
+ "vol%ld_brickcount", i+1);
+ ret = dict_get_int64 (src, snapbrckcnt, &brick_count);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volume name");
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
- }
- if (!strncmp (volname, "all", 3)) {
- ret = 0;
- goto out;
+ gf_log (this->name, GF_LOG_TRACE,
+ "No bricks for this volume in this dict");
+ continue;
}
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volinfo for volume %s", volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
+
+ for (j = 0; j < brick_count; j++) {
+ /* Fetching data from source dict */
+ snprintf (key, sizeof(key) - 1,
+ "vol%ld.brickdir%ld", i+1, j);
+
+ ret = dict_get_ptr (src, key,
+ (void **)&snap_brick_dir);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Unable to fetch %s", key);
+ continue;
+ }
+
+ snprintf (key, sizeof(key) - 1,
+ "vol%ld.brick_snapdevice%ld", i+1, j);
+
+ ret = dict_get_ptr (src, key,
+ (void **)&snap_device);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_device");
+ goto out;
+ }
+
+ snprintf (snapbrckord, sizeof(snapbrckord) - 1,
+ "vol%ld.brick%ld.order", i+1, j);
+
+ ret = dict_get_int64 (src, snapbrckord, &brick_order);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get brick order");
+ goto out;
+ }
+
+ /* Adding the data in the dst dict */
+ snprintf (key, sizeof(key) - 1,
+ "vol%ld.brickdir%ld", i+1, brick_order);
+
+ tmpstr = gf_strdup (snap_brick_dir);
+ if (!tmpstr) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Out Of Memory");
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (dst, key, tmpstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ GF_FREE (tmpstr);
+ goto out;
+ }
+
+ snprintf (key, sizeof(key) - 1,
+ "vol%ld.brick_snapdevice%ld",
+ i+1, brick_order);
+
+ tmpstr = gf_strdup (snap_device);
+ if (!tmpstr) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (dst, key, tmpstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ GF_FREE (tmpstr);
+ goto out;
+ }
+
}
- break;
- default:
- break;
}
+
+ ret = 0;
out:
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
int
-glusterd_snapshot_config_prevalidate (dict_t *dict, char **op_errstr)
+glusterd_snap_pre_validate_use_rsp_dict (dict_t *dst, dict_t *src)
{
- int config_command = 0;
- xlator_t *this = NULL;
- int ret = -1;
+ int ret = -1;
+ int32_t snap_command = 0;
+ xlator_t *this = NULL;
this = THIS;
- ret = dict_get_int32 (dict, "config-command", &config_command);
+ GF_ASSERT (this);
+
+ if (!dst || !src) {
+ gf_log (this->name, GF_LOG_ERROR, "Source or Destination "
+ "dict is empty.");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dst, "type", &snap_command);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to get config-command type");
+ gf_log (this->name, GF_LOG_ERROR, "unable to get the type of "
+ "the snapshot command");
goto out;
}
- switch (config_command) {
- case GF_SNAP_CONFIG_SYS_MAX:
- case GF_SNAP_CONFIG_VOL_MAX:
- case GF_SNAP_CONFIG_CG_MAX:
- case GF_SNAP_CONFIG_DISPLAY:
- ret = glusterd_snapshot_config_limit_prevalidate (dict,
- op_errstr,
- config_command);
+ switch (snap_command) {
+ case GF_SNAP_OPTION_TYPE_CREATE:
+ ret = glusterd_snap_create_pre_val_use_rsp_dict (dst, src);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to use "
+ "rsp dict");
+ goto out;
+ }
break;
default:
- ret = -1;
- gf_log (this->name, GF_LOG_ERROR, "Incorrect config op");
break;
}
+
+ ret = 0;
out:
+ gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
int
glusterd_snapshot_create_prevalidate (dict_t *dict, char **op_errstr,
- dict_t *rsp_dict)
+ dict_t *rsp_dict)
{
- char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- int64_t volume_count = 0;
- char volname_buf[PATH_MAX] = {0, };
- int64_t i = 0;
- xlator_t *this = NULL;
- int ret = -1;
+ char *volname = NULL;
+ char *snapname = NULL;
+ char *device = NULL;
+ char *tmpstr = NULL;
+ char *brick_dir = NULL;
+ char snap_brick_dir[PATH_MAX] = "";
+ char *mnt_pt = NULL;
+ char key[PATH_MAX] = "";
+ char snap_mount[PATH_MAX] = "";
+ char snap_volname[64] = "";
+ char err_str[PATH_MAX] = "";
+ int ret = -1;
+ int64_t i = 0;
+ int64_t volcount = 0;
+ int64_t brick_count = 0;
+ int64_t brick_order = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
+ uuid_t *snap_volid = NULL;
+ gf_loglevel_t loglevel = GF_LOG_ERROR;
+ glusterd_conf_t *conf = NULL;
+ int64_t effective_max_limit = 0;
this = THIS;
+ GF_ASSERT (op_errstr);
+ conf = this->private;
+ GF_ASSERT (conf);
- ret = dict_get_int64 (dict, "volcount", &volume_count);
+ ret = dict_get_int64 (dict, "volcount", &volcount);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "failed to "
- "get the volume count");
+ snprintf (err_str, sizeof (err_str), "Failed to "
+ "get the volume count");
goto out;
}
- for (i = 0; i < volume_count; i++) {
- snprintf (volname_buf, sizeof (volname_buf),
- "volname%ld", i+1);
- ret = dict_get_str (dict, volname_buf,
- &volname);
+ if (volcount <= 0) {
+ snprintf (err_str, sizeof (err_str), "Invalid volume count %ld "
+ "supplied", volcount);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str), "Failed to get snapname");
+ goto out;
+ }
+
+ if (glusterd_find_snap_by_name (snapname)) {
+ ret = -1;
+ snprintf (err_str, sizeof (err_str), "Snap %s already exists",
+ snapname);
+ goto out;
+ }
+
+ for (i = 1; i <= volcount; i++) {
+ snprintf (key, sizeof (key), "volname%ld", i);
+ ret = dict_get_str (dict, key, &volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to get volume name");
+ snprintf (err_str, sizeof (err_str),
+ "failed to get volume name");
goto out;
}
ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "failed to get the volinfo for "
- "the volume %s", volname);
+ snprintf (err_str, sizeof (err_str),
+ "Volume (%s) does not exist ", volname);
+ goto out;
+ }
+
+ ret = -1;
+ if (!glusterd_is_volume_started (volinfo)) {
+ snprintf (err_str, sizeof (err_str), "volume %s is "
+ "not started", volinfo->volname);
+ loglevel = GF_LOG_WARNING;
goto out;
}
if (glusterd_is_defrag_on (volinfo)) {
- ret = -1;
- gf_log (this->name, GF_LOG_WARNING,
- "rebalance process is running "
- "for the volume %s", volname);
+ snprintf (err_str, sizeof (err_str),
+ "rebalance process is running for the "
+ "volume %s", volname);
+ loglevel = GF_LOG_WARNING;
goto out;
}
- //Also check whether geo replication is running
- }
-out:
- return ret;
-}
+ /* TODO: Also check whether geo replication is running */
-int
-glusterd_snapshot_prevalidate (dict_t *dict, char **op_errstr,
- dict_t *rsp_dict)
-{
- int snap_command = 0;
- xlator_t *this = NULL;
- int ret = -1;
+ if (volinfo->is_snap_volume == _gf_true) {
+ snprintf (err_str, sizeof (err_str),
+ "Volume %s is a snap volume", volname);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
- this = THIS;
+ if (volinfo->snap_max_hard_limit < conf->snap_max_hard_limit)
+ effective_max_limit = volinfo->snap_max_hard_limit;
+ else
+ effective_max_limit = conf->snap_max_hard_limit;
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (rsp_dict); //not sure if this is needed, verify.
+ if (volinfo->snap_count >= effective_max_limit) {
+ snprintf (err_str, sizeof (err_str),
+ "The number of existing snaps has reached "
+ "the effective maximum limit of %"PRIu64" ,"
+ "for the volume %s", effective_max_limit,
+ volname);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
- ret = dict_get_int32 (dict, "type", &snap_command);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "unable to get the type of "
- "the snapshot command");
- goto out;
- }
+ snprintf (key, sizeof(key) - 1, "vol%ld_volid", i);
+ ret = dict_get_bin (dict, key, (void **)&snap_volid);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_volid");
+ goto out;
+ }
- switch (snap_command) {
- case (GF_SNAP_OPTION_TYPE_CREATE):
- ret = glusterd_snapshot_create_prevalidate (dict, op_errstr,
- rsp_dict);
- break;
+ /* snap volume uuid is used as lvm snapshot name.
+ This will avoid restrictions on snapshot names
+ provided by user */
+ GLUSTERD_GET_UUID_NOHYPHEN (snap_volname, *snap_volid);
+
+ brick_count = 0;
+ brick_order = 0;
+ /* Adding snap bricks mount paths to the dict */
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ if (uuid_compare (brickinfo->uuid, MY_UUID)) {
+ brick_order++;
+ continue;
+ }
- case (GF_SNAP_OPTION_TYPE_CONFIG):
- ret = glusterd_snapshot_config_prevalidate (dict, op_errstr);
- break;
+ if (!glusterd_is_brick_started (brickinfo)) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "brick %s:%s is not started",
+ brickinfo->hostname,
+ brickinfo->path);
+ brick_order++;
+ brick_count++;
+ continue;
+ }
- case GF_SNAP_OPTION_TYPE_RESTORE:
- ret = glusterd_snapshot_restore_prevalidate (dict, op_errstr);
+ device = glusterd_get_brick_mount_details (brickinfo);
+ if (!device) {
+ snprintf (err_str, sizeof (err_str),
+ "getting device name for the brick "
+ "%s:%s failed", brickinfo->hostname,
+ brickinfo->path);
+ ret = -1;
+ goto out;
+ }
+
+ device = glusterd_build_snap_device_path (device,
+ snap_volname);
+ if (!device) {
+ snprintf (err_str, sizeof (err_str),
+ "cannot copy the snapshot device "
+ "name (volname: %s, snapname: %s)",
+ volinfo->volname, snapname);
+ loglevel = GF_LOG_WARNING;
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof(key),
+ "vol%ld.brick_snapdevice%ld", i,
+ brick_count);
+ ret = dict_set_dynstr (rsp_dict, key, device);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ GF_FREE (device);
+ goto out;
+ }
+
+ ret = glusterd_get_brick_root (brickinfo->path,
+ &mnt_pt);
+ if (ret) {
+ snprintf (err_str, sizeof (err_str),
+ "could not get the root of the brick path %s",
+ brickinfo->path);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+ if (strncmp (brickinfo->path, mnt_pt, strlen(mnt_pt))) {
+ snprintf (err_str, sizeof (err_str),
+ "brick: %s brick mount: %s",
+ brickinfo->path, mnt_pt);
+ loglevel = GF_LOG_WARNING;
+ goto out;
+ }
+
+ brick_dir = &brickinfo->path[strlen (mnt_pt)];
+ brick_dir++;
+
+ snprintf (snap_brick_dir, sizeof (snap_brick_dir),
+ "/%s", brick_dir);
+
+ tmpstr = gf_strdup (snap_brick_dir);
+ if (!tmpstr) {
+ ret = -1;
+ goto out;
+ }
+ snprintf (key, sizeof(key), "vol%ld.brickdir%ld", i,
+ brick_count);
+ ret = dict_set_dynstr (rsp_dict, key, tmpstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", snap_mount);
+ goto out;
+ }
+ tmpstr = NULL;
+
+ snprintf (key, sizeof(key) - 1, "vol%ld.brick%ld.order",
+ i, brick_count);
+ ret = dict_set_int64 (rsp_dict, key, brick_order);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set %s", key);
+ goto out;
+ }
+
+ brick_count++;
+ brick_order++;
+ }
+ snprintf (key, sizeof(key) - 1, "vol%ld_brickcount", i);
+ ret = dict_set_int64 (rsp_dict, key, brick_count);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "Snapshot restore "
- "validation failed");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set %s",
+ key);
goto out;
}
- break;
- default:
- gf_log (this->name, GF_LOG_WARNING, "invalid snap command");
+ }
+
+ ret = dict_set_int64 (rsp_dict, "volcount", volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set volcount");
goto out;
}
+
+ ret = 0;
out:
+ if (ret)
+ GF_FREE (tmpstr);
+
+ if (ret && err_str[0] != '\0') {
+ gf_log (this->name, loglevel, "%s", err_str);
+ *op_errstr = gf_strdup (err_str);
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -644,8 +843,16 @@ glusterd_new_snap_object()
snap = GF_CALLOC (1, sizeof (*snap), gf_gld_mt_snap_t);
if (snap) {
- LOCK_INIT (&snap->lock);
+ if (LOCK_INIT (&snap->lock)) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed initiating"
+ " snap lock");
+ GF_FREE (snap);
+ return NULL;
+ }
+
INIT_LIST_HEAD (&snap->snap_list);
+ INIT_LIST_HEAD (&snap->volumes);
+ snap->snapname[0] = 0;
snap->snap_status = GD_SNAP_STATUS_INIT;
}
@@ -653,371 +860,589 @@ glusterd_new_snap_object()
};
-glusterd_snap_cg_t*
-glusterd_new_snap_cg_object(int64_t volume_count)
-{
- glusterd_snap_cg_t *cg = NULL;
- glusterd_volinfo_t *volinfo = NULL;
-
- if (volume_count < 0) {
- gf_log (THIS->name, GF_LOG_ERROR, "Volume count < 0");
- return NULL;
- }
-
- cg = GF_CALLOC (1, (sizeof (*cg) +
- (volume_count * sizeof (*volinfo))),
- gf_gld_mt_snap_cg_t);
-
- if (cg) {
- LOCK_INIT (&cg->lock);
- INIT_LIST_HEAD (&cg->cg_list);
- cg->cg_status = GD_SNAP_STATUS_INIT;
- cg->volume_count = volume_count;
- }
-
- return cg;
-}
-
+/* Function glusterd_list_add_snapvol adds the volinfo object (snapshot volume)
+ to the snapshot object list and to the parent volume list */
int32_t
-glusterd_add_snap (glusterd_volinfo_t *volinfo, glusterd_snap_t *snap)
+glusterd_list_add_snapvol (glusterd_volinfo_t *origin_vol,
+ glusterd_volinfo_t *snap_vol)
{
- int ret = -1;
- uint64_t count = -1;
- glusterd_snap_t *entry = NULL;
- glusterd_snap_t *last = NULL;
- glusterd_snap_t *tmp = NULL;
+ int ret = -1;
+ glusterd_snap_t *snap = NULL;
+
+ GF_VALIDATE_OR_GOTO ("glusterd", origin_vol, out);
+ GF_VALIDATE_OR_GOTO ("glusterd", snap_vol, out);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", snap, out);
+ snap = snap_vol->snapshot;
+ GF_ASSERT (snap);
- LOCK (&volinfo->lock);
+ list_add_tail (&snap_vol->vol_list, &snap->volumes);
+ LOCK (&origin_vol->lock);
{
- list_for_each_entry_safe (entry, tmp, &volinfo->snaps,
- snap_list) {
- count++;
- if (!strcmp (entry->snap_name, snap->snap_name) ||
- !uuid_compare (entry->snap_id, snap->snap_id)) {
- gf_log (THIS->name, GF_LOG_ERROR, "Found "
- "duplicate snap %s (%s)",
- entry->snap_name,
- uuid_utoa (entry->snap_id));
- goto unlock;
- }
- last = entry;
- }
- list_add_tail (&snap->snap_list, &volinfo->snaps);
- volinfo->snap_count++;
- gf_log (THIS->name, GF_LOG_DEBUG, "Snap %s added @ %"PRIu64,
- snap->snap_name, count);
- ret = 0;
+ list_add_order (&snap_vol->snapvol_list,
+ &origin_vol->snap_volumes,
+ glusterd_compare_snap_vol_time);
+ origin_vol->snap_count++;
}
-unlock:
- UNLOCK (&volinfo->lock);
+ UNLOCK (&origin_vol->lock);
+
+ gf_log (THIS->name, GF_LOG_DEBUG, "Snap %s added to the list",
+ snap->snapname);
+ ret = 0;
out:
return ret;
}
glusterd_snap_t*
-glusterd_find_snap_by_index (glusterd_volinfo_t *volinfo, uint64_t index)
+glusterd_find_snap_by_name (char *snapname)
{
- uint64_t count = 0;
- glusterd_snap_t *entry = NULL;
- glusterd_snap_t *tmp = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
+ priv = THIS->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (snapname);
- LOCK (&volinfo->lock);
- {
- list_for_each_entry_safe (entry, tmp, &volinfo->snaps,
- snap_list) {
- if (index == count) {
- gf_log (THIS->name, GF_LOG_DEBUG, "Found "
- "snap %s (%s)", entry->snap_name,
- uuid_utoa (entry->snap_id));
- break;
- }
- ++count;
+
+ list_for_each_entry (snap, &priv->snapshots, snap_list) {
+ if (!strcmp (snap->snapname, snapname)) {
+ gf_log (THIS->name, GF_LOG_DEBUG, "Found "
+ "snap %s (%s)", snap->snapname,
+ uuid_utoa (snap->snap_id));
+ goto out;
}
}
- UNLOCK (&volinfo->lock);
+ snap = NULL;
out:
- return entry;
+ return snap;
}
glusterd_snap_t*
-glusterd_find_snap_by_name (glusterd_volinfo_t *volinfo, char *snap_name)
+glusterd_find_snap_by_id (uuid_t snap_id)
{
- uint64_t count = -1;
- glusterd_snap_t *entry = NULL;
- glusterd_snap_t *dup = NULL;
- glusterd_snap_t *tmp = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", snap_name, out);
+ priv = THIS->private;
+ GF_ASSERT (priv);
- LOCK (&volinfo->lock);
- {
- list_for_each_entry_safe (entry, tmp, &volinfo->snaps,
- snap_list) {
- count++;
- if (!strcmp (entry->snap_name, snap_name)) {
- gf_log (THIS->name, GF_LOG_DEBUG, "Found "
- "snap %s (%s)", entry->snap_name,
- uuid_utoa (entry->snap_id));
- dup = entry;
- break;
- }
+ if (uuid_is_null(snap_id))
+ goto out;
+
+ list_for_each_entry (snap, &priv->snapshots, snap_list) {
+ if (!uuid_compare (snap->snap_id, snap_id)) {
+ gf_log (THIS->name, GF_LOG_DEBUG, "Found "
+ "snap %s (%s)", snap->snapname,
+ uuid_utoa (snap->snap_id));
+ goto out;
}
}
- UNLOCK (&volinfo->lock);
+ snap = NULL;
out:
- return dup;
+ return snap;
}
-glusterd_snap_t*
-glusterd_find_snap_by_id (glusterd_volinfo_t *volinfo, uuid_t snap_id)
+int
+glusterd_do_lvm_snapshot_remove (glusterd_volinfo_t *snap_vol,
+ glusterd_brickinfo_t *brickinfo,
+ const char *mount_pt, const char *snap_device)
{
- uint64_t count = -1;
- glusterd_snap_t *entry = NULL;
- glusterd_snap_t *dup = NULL;
- glusterd_snap_t *tmp = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ runner_t runner = {0,};
+ char msg[1024] = {0, };
+ char pidfile[PATH_MAX] = {0, };
+ pid_t pid = -1;
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- if (uuid_is_null(snap_id))
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!brickinfo) {
+ gf_log (this->name, GF_LOG_ERROR, "brickinfo NULL");
goto out;
+ }
- LOCK (&volinfo->lock);
- {
- list_for_each_entry_safe (entry, tmp, &volinfo->snaps,
- snap_list) {
- count++;
- if (!uuid_compare (entry->snap_id, snap_id)) {
- gf_log (THIS->name, GF_LOG_DEBUG, "Found "
- "snap %s (%s)", entry->snap_name,
- uuid_utoa (entry->snap_id));
- dup = entry;
- break;
- }
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (mount_pt);
+ GF_ASSERT (snap_device);
+
+ GLUSTERD_GET_BRICK_PIDFILE (pidfile, snap_vol, brickinfo, priv);
+ if (glusterd_is_service_running (pidfile, &pid)) {
+ ret = kill (pid, SIGKILL);
+ if (ret && errno != ESRCH) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to kill pid "
+ "%d reason : %s", pid, strerror(errno));
+ goto out;
}
}
- UNLOCK (&volinfo->lock);
-out:
- return dup;
-}
-glusterd_snap_t*
-glusterd_remove_snap_by_id (glusterd_volinfo_t *volinfo, uuid_t snap_id)
-{
- glusterd_snap_t *entry = NULL;
+ runinit (&runner);
+ snprintf (msg, sizeof (msg), "umount the snapshot mounted path %s",
+ mount_pt);
+ runner_add_args (&runner, "umount", mount_pt, NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- if (uuid_is_null(snap_id))
+ /* We need not do synclock_unlock => runner_run => synclock_lock here.
+ Because it is needed if we are running a glusterfs process in
+ runner_run, so that when the glusterfs process started wants to
+ communicate to glusterd, glusterd wont be able to respond if it
+ has held the big lock. So we do unlock, run glusterfs process
+ (thus communicate to glusterd), lock. But since this is not a
+ glusterfs command that is being run, unlocking and then relocking
+ is not needed.
+ */
+ ret = runner_run (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "unmounting the "
+ "path %s (brick: %s) failed (%s)", mount_pt,
+ brickinfo->path, strerror (errno));
goto out;
+ }
- entry = glusterd_find_snap_by_id (volinfo, snap_id);
+ runinit (&runner);
+ snprintf (msg, sizeof(msg), "remove snapshot of the brick %s:%s, "
+ "device: %s", brickinfo->hostname, brickinfo->path,
+ snap_device);
+ runner_add_args (&runner, "/sbin/lvremove", "-f", snap_device, NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
- if (entry) {
- LOCK (&volinfo->lock);
- {
- entry->snap_status = GD_SNAP_STATUS_DECOMMISSION;
- list_del_init (&entry->snap_list);
- }
- UNLOCK (&volinfo->lock);
+ ret = runner_run (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "removing snapshot of the "
+ "brick (%s:%s) of device %s failed",
+ brickinfo->hostname, brickinfo->path, snap_device);
+ goto out;
}
+
out:
- return entry;
+ return ret;
}
-glusterd_snap_t*
-glusterd_remove_snap_by_name (glusterd_volinfo_t *volinfo, char *snap_name)
+int32_t
+glusterd_lvm_snapshot_remove (dict_t *rsp_dict, glusterd_volinfo_t *snap_vol)
{
- glusterd_snap_t *entry = NULL;
+ char *mnt_pt = NULL;
+ struct mntent *entry = NULL;
+ int32_t brick_count = -1;
+ int32_t ret = -1;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+ FILE *mtab = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap_vol);
+
+ if (!snap_vol) {
+ gf_log (this->name, GF_LOG_ERROR, "snap volinfo is NULL");
+ goto out;
+ }
- GF_VALIDATE_OR_GOTO ("glusterd", volinfo, out);
- GF_VALIDATE_OR_GOTO ("glusterd", snap_name, out);
+ brick_count = -1;
+ list_for_each_entry (brickinfo, &snap_vol->bricks, brick_list) {
+ brick_count++;
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
- entry = glusterd_find_snap_by_name (volinfo, snap_name);
+ if (brickinfo->snap_status == -1) {
+ gf_log (this->name, GF_LOG_INFO,
+ "snapshot was pending. lvm not present "
+ "for brick %s:%s of the snap %s.",
+ brickinfo->hostname, brickinfo->path,
+ snap_vol->snapshot->snapname);
+
+ /* Adding missed delete to the dict */
+ ret = glusterd_add_missed_snaps_to_dict
+ (rsp_dict,
+ snap_vol->volname,
+ brickinfo,
+ brick_count + 1,
+ GF_SNAP_OPTION_TYPE_DELETE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add missed snapshot info "
+ "for %s:%s in the rsp_dict",
+ brickinfo->hostname,
+ brickinfo->path);
+ goto out;
+ }
- if (entry) {
- LOCK (&volinfo->lock);
- {
- entry->snap_status = GD_SNAP_STATUS_DECOMMISSION;
- list_del_init (&entry->snap_list);
+ continue;
+ }
+
+ ret = glusterd_get_brick_root (brickinfo->path, &mnt_pt);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "getting the root "
+ "of the brick for volume %s (snap %s) failed ",
+ snap_vol->volname, snap_vol->snapshot->snapname);
+ goto out;
}
- UNLOCK (&volinfo->lock);
+
+ entry = glusterd_get_mnt_entry_info (mnt_pt, mtab);
+ if (!entry) {
+ gf_log (this->name, GF_LOG_WARNING, "getting the mount"
+ " entry for the brick %s:%s of the snap %s "
+ "(volume: %s) failed", brickinfo->hostname,
+ brickinfo->path, snap_vol->snapshot->snapname,
+ snap_vol->volname);
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_do_lvm_snapshot_remove (snap_vol, brickinfo,
+ mnt_pt,
+ entry->mnt_fsname);
+ if (mtab)
+ endmntent (mtab);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "remove the snapshot %s (%s)",
+ brickinfo->path, entry->mnt_fsname);
+ goto out;
+ }
+
}
+
+ ret = 0;
out:
- return entry;
+ return ret;
}
-// Big lock should already acquired before this is called
int32_t
-glusterd_add_snap_cg (glusterd_conf_t *conf, glusterd_snap_cg_t *cg)
+glusterd_snap_volume_remove (dict_t *rsp_dict,
+ glusterd_volinfo_t *snap_vol,
+ gf_boolean_t remove_lvm,
+ gf_boolean_t force)
{
- int ret = -1;
- uint64_t count = -1;
- glusterd_snap_cg_t *entry = NULL;
- glusterd_snap_cg_t *last = NULL;
- glusterd_snap_cg_t *tmp = NULL;
+ int ret = -1;
+ int save_ret = 0;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_volinfo_t *origin_vol = NULL;
+ xlator_t *this = NULL;
- GF_VALIDATE_OR_GOTO (THIS->name, conf, out);
- GF_VALIDATE_OR_GOTO (THIS->name, cg, out);
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap_vol);
- list_for_each_entry_safe (entry, tmp, &conf->snap_cg, cg_list) {
- count++;
- if (!strcmp (entry->cg_name, cg->cg_name) ||
- !uuid_compare (entry->cg_id, cg->cg_id)) {
- gf_log (THIS->name, GF_LOG_ERROR, "Found duplicate "
- "CG %s(%s)", entry->cg_name,
- uuid_utoa(entry->cg_id));
+ if (!snap_vol) {
+ gf_log(this->name, GF_LOG_WARNING, "snap_vol in NULL");
+ ret = -1;
+ goto out;
+ }
+
+ list_for_each_entry (brickinfo, &snap_vol->bricks, brick_list) {
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+
+ ret = glusterd_brick_stop (snap_vol, brickinfo, _gf_false);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to stop "
+ "brick for volume %s", snap_vol->volname);
+ save_ret = ret;
+
+ /* Continue to cleaning up the snap in case of error
+ if force flag is enabled */
+ if (!force)
+ goto out;
+ }
+ }
+
+ /* Only remove the backend lvm when required */
+ if (remove_lvm) {
+ ret = glusterd_lvm_snapshot_remove (rsp_dict, snap_vol);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove "
+ "lvm snapshot volume %s", snap_vol->volname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+ }
+
+ ret = glusterd_store_delete_volume (snap_vol);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove volume %s "
+ "from store", snap_vol->volname);
+ save_ret = ret;
+ if (!force)
goto out;
+ }
+
+ if (!list_empty(&snap_vol->snapvol_list)) {
+ ret = glusterd_volinfo_find (snap_vol->parent_volname,
+ &origin_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "parent volinfo %s for volume %s",
+ snap_vol->parent_volname, snap_vol->volname);
+ save_ret = ret;
+ if (!force)
+ goto out;
}
- last = entry;
+ origin_vol->snap_count--;
}
- list_add_tail (&cg->cg_list, &conf->snap_cg);
- gf_log (THIS->name, GF_LOG_DEBUG, "Added CG %s (%s) @ %"PRIu64,
- cg->cg_name, uuid_utoa(cg->cg_id), count);
- ret = 0;
+
+ ret = glusterd_volinfo_delete (snap_vol);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove volinfo "
+ "%s ", snap_vol->volname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+
+ if (save_ret)
+ ret = save_ret;
out:
+ gf_log (this->name, GF_LOG_TRACE, "returning %d", ret);
return ret;
-
}
-glusterd_snap_cg_t*
-glusterd_find_snap_cg_by_name (glusterd_conf_t *conf, char *cg_name)
+int32_t
+glusterd_snapobject_delete (glusterd_snap_t *snap)
{
- glusterd_snap_cg_t *entry = NULL;
- glusterd_snap_cg_t *dup = NULL;
- glusterd_snap_cg_t *tmp = NULL;
-
- GF_VALIDATE_OR_GOTO (THIS->name, conf, out);
- GF_VALIDATE_OR_GOTO (THIS->name, cg_name, out);
-
- list_for_each_entry_safe (entry, tmp, &conf->snap_cg, cg_list) {
- if (!strcmp (entry->cg_name, cg_name)) {
- gf_log (THIS->name, GF_LOG_DEBUG, "Found CG %s(%s)",
- entry->cg_name, uuid_utoa(entry->cg_id));
- dup = entry;
- break;
- }
+ if (snap == NULL) {
+ gf_log(THIS->name, GF_LOG_WARNING, "snap is NULL");
+ return -1;
}
-out:
- return dup;
+
+ list_del_init (&snap->snap_list);
+ list_del_init (&snap->volumes);
+ if (LOCK_DESTROY(&snap->lock))
+ gf_log (THIS->name, GF_LOG_WARNING, "Failed destroying lock"
+ "of snap %s", snap->snapname);
+
+ GF_FREE (snap->description);
+ GF_FREE (snap);
+
+ return 0;
}
-glusterd_snap_cg_t*
-glusterd_find_snap_cg_by_id (glusterd_conf_t *conf, uuid_t cg_id)
+int32_t
+glusterd_snap_remove (dict_t *rsp_dict,
+ glusterd_snap_t *snap,
+ gf_boolean_t remove_lvm,
+ gf_boolean_t force)
{
- glusterd_snap_cg_t *entry = NULL;
- glusterd_snap_cg_t *dup = NULL;
- glusterd_snap_cg_t *tmp = NULL;
+ int ret = -1;
+ int save_ret = 0;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp = NULL;
+ xlator_t *this = NULL;
- GF_VALIDATE_OR_GOTO (THIS->name, conf, out);
- if (uuid_is_null (cg_id))
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap);
+
+ if (!snap) {
+ gf_log(this->name, GF_LOG_WARNING, "snap is NULL");
+ ret = -1;
goto out;
+ }
- list_for_each_entry_safe (entry, tmp, &conf->snap_cg, cg_list) {
- if (!uuid_compare (entry->cg_id, cg_id)) {
- gf_log (THIS->name, GF_LOG_DEBUG, "Found CG %s(%s)",
- entry->cg_name, uuid_utoa(entry->cg_id));
- dup = entry;
- break;
+ list_for_each_entry_safe (snap_vol, tmp, &snap->volumes, vol_list) {
+ ret = glusterd_snap_volume_remove (rsp_dict, snap_vol,
+ remove_lvm, force);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove "
+ "volinfo %s for snap %s", snap_vol->volname,
+ snap->snapname);
+ save_ret = ret;
+
+ /* Continue to cleaning up the snap in case of error
+ if force flag is enabled */
+ if (!force)
+ goto out;
}
}
+
+ ret = glusterd_store_delete_snap (snap);
+ if (ret) {
+ gf_log(this->name, GF_LOG_WARNING, "Failed to remove snap %s "
+ "from store", snap->snapname);
+ save_ret = ret;
+ if (!force)
+ goto out;
+ }
+
+ ret = glusterd_snapobject_delete (snap);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "Failed to delete "
+ "snap object %s", snap->snapname);
+
+ if (save_ret)
+ ret = save_ret;
out:
- return dup;
+ gf_log (THIS->name, GF_LOG_TRACE, "returning %d", ret);
+ return ret;
}
-glusterd_snap_cg_t*
-glusterd_remove_snap_cg_by_name (glusterd_conf_t *conf, char *cg_name)
+static int
+glusterd_snapshot_get_snapvol_detail (dict_t *dict,
+ glusterd_volinfo_t *snap_vol,
+ char *keyprefix, int detail)
{
- glusterd_snap_cg_t *entry = NULL;
+ int ret = -1;
+ int snap_limit = 0;
+ char key[PATH_MAX] = {0,};
+ char *value = NULL;
+ glusterd_volinfo_t *origin_vol = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT (conf);
- GF_VALIDATE_OR_GOTO (THIS->name, conf, out);
- GF_VALIDATE_OR_GOTO (THIS->name, cg_name, out);
+ GF_ASSERT (dict);
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (keyprefix);
+
+ /* Volume Name */
+ value = gf_strdup (snap_vol->volname);
+ if (!value)
+ goto out;
- entry = glusterd_find_snap_cg_by_name(conf, cg_name);
- if (entry) {
- entry->cg_status = GD_SNAP_STATUS_DECOMMISSION;
- list_del_init (&entry->cg_list);
+ snprintf (key, sizeof (key), "%s.volname", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "volume name in dictionary: %s", key);
+ goto out;
}
-out:
- return entry;
-}
-glusterd_snap_cg_t*
-glusterd_remove_snap_cg_by_id (glusterd_conf_t *conf, uuid_t cg_id)
-{
- glusterd_snap_cg_t *entry = NULL;
+ /* Volume ID */
+ value = gf_strdup (uuid_utoa (snap_vol->volume_id));
+ if (NULL == value) {
+ ret = -1;
+ goto out;
+ }
- GF_VALIDATE_OR_GOTO (THIS->name, conf, out);
- if (uuid_is_null (cg_id))
+ snprintf (key, sizeof (key), "%s.vol-id", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "volume id in dictionary: %s", key);
goto out;
+ }
+ value = NULL;
- entry = glusterd_find_snap_cg_by_id (conf, cg_id);
- if (entry) {
- entry->cg_status = GD_SNAP_STATUS_DECOMMISSION;
- list_del_init (&entry->cg_list);
+ /* volume status */
+ snprintf (key, sizeof (key), "%s.vol-status", keyprefix);
+ switch (snap_vol->status) {
+ case GLUSTERD_STATUS_STARTED:
+ ret = dict_set_str (dict, key, "Started");
+ break;
+ case GLUSTERD_STATUS_STOPPED:
+ ret = dict_set_str (dict, key, "Stopped");
+ break;
+ case GD_SNAP_STATUS_NONE:
+ ret = dict_set_str (dict, key, "None");
+ break;
+ default:
+ gf_log (this->name, GF_LOG_ERROR, "Invalid volume status");
+ ret = -1;
+ goto out;
+ }
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set volume status"
+ " in dictionary: %s", key);
+ goto out;
}
-out:
- return entry;
-}
-int32_t
-glusterd_delete_snap_volume (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo)
-{
- int ret = -1;
- GF_ASSERT (volinfo);
- ret = glusterd_store_delete_volume (volinfo, snapinfo);
+ ret = glusterd_volinfo_find (snap_vol->parent_volname, &origin_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to get the parent "
+ "volinfo for the volume %s", snap_vol->volname);
+ goto out;
+ }
- if (ret)
+ /* Snaps available */
+ if (conf->snap_max_hard_limit < origin_vol->snap_max_hard_limit) {
+ snap_limit = conf->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "system snap-max-hard-limit is"
+ " lesser than volume snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
+ } else {
+ snap_limit = origin_vol->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "volume snap-max-hard-limit is"
+ " lesser than system snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
+ }
+
+ snprintf (key, sizeof (key), "%s.snaps-available", keyprefix);
+ if (snap_limit > origin_vol->snap_count)
+ ret = dict_set_int32 (dict, key,
+ snap_limit - origin_vol->snap_count);
+ else
+ ret = dict_set_int32 (dict, key, 0);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set available snaps");
+ goto out;
+ }
+
+ snprintf (key, sizeof (key), "%s.snapcount", keyprefix);
+ ret = dict_set_int32 (dict, key, origin_vol->snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not save snapcount");
+ goto out;
+ }
+
+ if (!detail)
+ goto out;
+
+ /* Parent volume name */
+ value = gf_strdup (snap_vol->parent_volname);
+ if (!value)
+ goto out;
+
+ snprintf (key, sizeof (key), "%s.origin-volname", keyprefix);
+ ret = dict_set_dynstr (dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set parent "
+ "volume name in dictionary: %s", key);
goto out;
+ }
+ value = NULL;
- ret = glusterd_volinfo_delete (snapinfo);
+ ret = 0;
out:
- gf_log (THIS->name, GF_LOG_DEBUG, "returning %d", ret);
+ if (value)
+ GF_FREE (value);
+
return ret;
}
-/* This function will retrieve the details of a single snap
- * and then serialize them to dictionary (dict)
- * This function is called under snap lock
- *
- * @param dict dictionary where response should be serialized
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param entry Snap object
- * @param detail if 1 then more details will be added for snap list
- *
- * @return -1 on failure and 0 on success.
- */
static int
-glusterd_snapshot_get_snapdetail_lk (dict_t *dict, char *keyprefix,
- glusterd_snap_t *entry,
- int8_t detail)
+glusterd_snapshot_get_snap_detail (dict_t *dict, glusterd_snap_t *snap,
+ char *keyprefix, glusterd_volinfo_t *volinfo)
{
- int ret = -1; /* Failure */
- const int maxstrlen = 256;
- char *value = NULL;
- char *timestr = NULL;
- struct tm *tmptr = NULL;
- xlator_t *this = NULL;
- char key[maxstrlen];
+ int ret = -1;
+ int volcount = 0;
+ char key[PATH_MAX] = {0,};
+ char *value = NULL;
+ char *timestr = NULL;
+ struct tm *tmptr = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ xlator_t *this = NULL;
this = THIS;
- /* General parameter validation */
- GF_ASSERT (this);
GF_ASSERT (dict);
+ GF_ASSERT (snap);
GF_ASSERT (keyprefix);
- GF_ASSERT (entry);
/* Snap Name */
- value = gf_strdup (entry->snap_name);
- if (NULL == value) {
+ value = gf_strdup (snap->snapname);
+ if (!value)
goto out;
- }
snprintf (key, sizeof (key), "%s.snapname", keyprefix);
ret = dict_set_dynstr (dict, key, value);
@@ -1028,32 +1453,22 @@ glusterd_snapshot_get_snapdetail_lk (dict_t *dict, char *keyprefix,
}
/* Snap ID */
- value = gf_strdup (uuid_utoa (entry->snap_id));
+ value = gf_strdup (uuid_utoa (snap->snap_id));
if (NULL == value) {
ret = -1;
goto out;
}
- ret = snprintf (key, sizeof (key), "%s.snap-id", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
+ snprintf (key, sizeof (key), "%s.snap-id", keyprefix);
ret = dict_set_dynstr (dict, key, value);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to set "
"snap id in dictionary");
goto out;
}
-
- /* Ownership of value transferred to dict. Therefore we must initalize
- * it to NULL */
value = NULL;
- /* Snap Timestamp */
-
- /* convert time_t to tm struct. */
- tmptr = localtime (&(entry->time_stamp));
+ tmptr = localtime (&(snap->time_stamp));
if (NULL == tmptr) {
gf_log (this->name, GF_LOG_ERROR, "Failed to convert "
"time_t to *tm");
@@ -1061,14 +1476,13 @@ glusterd_snapshot_get_snapdetail_lk (dict_t *dict, char *keyprefix,
goto out;
}
- timestr = GF_CALLOC (1, maxstrlen, gf_gld_mt_char);
+ timestr = GF_CALLOC (1, PATH_MAX, gf_gld_mt_char);
if (NULL == timestr) {
ret = -1;
goto out;
}
- /* Format time into string */
- ret = strftime (timestr, maxstrlen, "%Y-%m-%d %H:%M:%S", tmptr);
+ ret = strftime (timestr, PATH_MAX, "%Y-%m-%d %H:%M:%S", tmptr);
if (0 == ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to convert time_t "
"to string");
@@ -1076,1022 +1490,864 @@ glusterd_snapshot_get_snapdetail_lk (dict_t *dict, char *keyprefix,
goto out;
}
- ret = snprintf (key, sizeof (key), "%s.snap-time", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
+ snprintf (key, sizeof (key), "%s.snap-time", keyprefix);
ret = dict_set_dynstr (dict, key, timestr);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to set "
"snap time stamp in dictionary");
goto out;
}
-
- /* Ownership of timestr transferred to dict. Therefore we must initalize
- * it to NULL */
timestr = NULL;
- if (!detail) {
- /* If detail is not needed then return from here */
- goto out;
- }
-
- /* Add detail */
-
- /* If CG name is set the add the details in the dictionary */
- if (0 != entry->cg_name[0] ) {
- /* CG name */
- value = gf_strdup (entry->cg_name);
+ /* If snap description is provided then add that into dictionary */
+ if (NULL != snap->description) {
+ value = gf_strdup (snap->description);
if (NULL == value) {
ret = -1;
goto out;
}
- ret = snprintf (key, sizeof (key), "%s.cg-name", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
+ snprintf (key, sizeof (key), "%s.snap-desc", keyprefix);
ret = dict_set_dynstr (dict, key, value);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "snap name in dictionary");
- goto out;
- }
-
- /* CG ID */
- value = gf_strdup (uuid_utoa (entry->cg_id));
- if (NULL == value) {
- ret = -1;
- goto out;
- }
-
- ret = snprintf (key, sizeof (key), "%s.cg-id", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = dict_set_dynstr (dict, key, value);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "cg id in dictionary");
+ "snap description in dictionary");
goto out;
}
-
- /* Ownership of value transferred to dict. Therefore we must initalize
- * it to NULL */
value = NULL;
}
- /* If snap description is provided then add that into dictionary */
- if (NULL != entry->description) {
- /* Snap Description */
- value = gf_strdup (entry->description);
- if (NULL == value) {
- ret = -1;
- goto out;
- }
-
- ret = snprintf (key, sizeof (key), "%s.snap-desc", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
+ snprintf (key, sizeof (key), "%s.snap-status", keyprefix);
+ switch (snap->snap_status) {
+ case GD_SNAP_STATUS_INIT:
+ ret = dict_set_str (dict, key, "Init");
+ break;
+ case GD_SNAP_STATUS_IN_USE:
+ ret = dict_set_str (dict, key, "In-use");
+ break;
+ case GD_SNAP_STATUS_DECOMMISSION:
+ ret = dict_set_str (dict, key, "Decommisioned");
+ break;
+ case GD_SNAP_STATUS_RESTORED:
+ ret = dict_set_str (dict, key, "Restored");
+ break;
+ case GD_SNAP_STATUS_NONE:
+ ret = dict_set_str (dict, key, "None");
+ break;
+ default:
+ gf_log (this->name, GF_LOG_ERROR, "Invalid snap status");
+ ret = -1;
+ goto out;
+ }
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap status "
+ "in dictionary");
+ goto out;
+ }
- ret = dict_set_dynstr (dict, key, value);
+ if (volinfo) {
+ volcount = 1;
+ snprintf (key, sizeof (key), "%s.vol%d", keyprefix, volcount);
+ ret = glusterd_snapshot_get_snapvol_detail (dict,
+ volinfo, key, 0);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "snap description in dictionary");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "get volume detail %s for snap %s",
+ snap_vol->volname, snap->snapname);
goto out;
}
- /* Ownership of value transferred to dict. Therefore we must initalize
- * it to NULL */
- value = NULL;
+ goto done;
}
- /* Snap status */
- ret = snprintf (key, sizeof (key), "%s.snap-status", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- switch (entry->snap_status) {
- case GD_SNAP_STATUS_INIT:
- ret = dict_set_str (dict, key, "Init");
- break;
- case GD_SNAP_STATUS_IN_USE:
- ret = dict_set_str (dict, key, "In-use");
- break;
- case GD_SNAP_STATUS_DECOMMISSION:
- ret = dict_set_str (dict, key, "Decommisioned");
- break;
- case GD_SNAP_STATUS_RESTORED:
- ret = dict_set_str (dict, key, "Restored");
- break;
- case GD_SNAP_STATUS_NONE:
- ret = dict_set_str (dict, key, "None");
- break;
- default:
- gf_log (this->name, GF_LOG_ERROR, "Invalid snap "
- "status");
- ret = -1;
+ list_for_each_entry_safe (snap_vol, tmp_vol, &snap->volumes, vol_list) {
+ volcount++;
+ snprintf (key, sizeof (key), "%s.vol%d", keyprefix, volcount);
+ ret = glusterd_snapshot_get_snapvol_detail (dict,
+ snap_vol, key, 1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "get volume detail %s for snap %s",
+ snap_vol->volname, snap->snapname);
goto out;
+ }
}
+done:
+ snprintf (key, sizeof (key), "%s.vol-count", keyprefix);
+ ret = dict_set_int32 (dict, key, volcount);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "snap status in dictionary");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set %s",
+ key);
goto out;
}
- ret = 0; /* Success */
+ ret = 0;
out:
- if (NULL != value) {
+ if (value)
GF_FREE (value);
- }
- if (NULL != timestr) {
+ if (timestr)
GF_FREE(timestr);
- }
+
return ret;
}
-/* This function will retrieve the details of a single snap
- * and then serialize them to dictionary (dict)
- *
- * @param dict dictionary where response should be serialized
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param entry Snap object
- * @param detail if 1 then more details will be added for snap list
- *
- * @return -1 on failure and 0 on success.
- */
static int
-glusterd_snapshot_get_snapdetail (dict_t *dict, char *keyprefix,
- glusterd_snap_t *entry,
- int8_t detail)
+glusterd_snapshot_get_all_snap_info (dict_t *dict)
{
- int ret = -1;
- xlator_t *this = NULL;
+ int ret = -1;
+ int snapcount = 0;
+ char key[PATH_MAX] = {0,};
+ glusterd_snap_t *snap = NULL;
+ glusterd_snap_t *tmp_snap = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
this = THIS;
- GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
/* General parameter validation */
- GF_ASSERT (this);
GF_ASSERT (dict);
- GF_ASSERT (keyprefix);
- GF_ASSERT (entry);
- /* Acquire snap lock */
- LOCK (&(entry->lock));
- {
- ret = glusterd_snapshot_get_snapdetail_lk (dict, keyprefix,
- entry, detail);
+ list_for_each_entry_safe (snap, tmp_snap, &priv->snapshots, snap_list) {
+ snapcount++;
+ snprintf (key, sizeof (key), "snap%d", snapcount);
+ ret = glusterd_snapshot_get_snap_detail (dict, snap, key, NULL);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snapdetail for snap %s", snap->snapname);
+ goto out;
+ }
}
- UNLOCK (&(entry->lock));
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get snap detail");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ goto out;
}
+ ret = 0;
+out:
return ret;
}
-
-/* This function will retrieve snap list for the given volume
- * and then serialize them to dict.
- * This function is called under volinfo lock.
- *
- * @param dict dictionary where response should be serialized
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param volinfo Volinfo object of the volume
- * @param snapname snap name. This field can be NULL
- * @param detail if 1 then more details will be added for snap list
- *
- * @return -1 on failure and 0 on success.
- */
-static int
-glusterd_snapshot_vol_get_snaplist_lk (dict_t *dict, char *keyprefix,
- glusterd_volinfo_t *volinfo,
- char *snapname, int8_t detail)
+int
+glusterd_snapshot_get_info_by_volume (dict_t *dict, char *volname,
+ char *err_str, size_t len)
{
- int ret = -1;
- ssize_t index = -1;
- glusterd_snap_t *entry = NULL;
- glusterd_snap_t *tmp = NULL;
- xlator_t *this = NULL;
- char *value = NULL;
- char key[256];
+ int ret = -1;
+ int snapcount = 0;
+ int snap_limit = 0;
+ char *value = NULL;
+ char key[PATH_MAX] = "";
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
this = THIS;
+ conf = this->private;
+ GF_ASSERT (conf);
- /* General parameter validation */
- GF_ASSERT (this);
GF_ASSERT (dict);
- GF_ASSERT (keyprefix);
- GF_ASSERT (volinfo);
+ GF_ASSERT (volname);
- value = gf_strdup (volinfo->volname);
- if (NULL == value) {
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, len, "Volume (%s) does not exist", volname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
goto out;
}
- /* First set the volume name */
- ret = snprintf (key, sizeof (key), "%s.volname", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
+ /* Snaps available */
+ if (conf->snap_max_hard_limit < volinfo->snap_max_hard_limit) {
+ snap_limit = conf->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "system snap-max-hard-limit is"
+ " lesser than volume snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
+ } else {
+ snap_limit = volinfo->snap_max_hard_limit;
+ gf_log(this->name, GF_LOG_DEBUG, "volume snap-max-hard-limit is"
+ " lesser than system snap-max-hard-limit, "
+ "snap-max-hard-limit value is set to %d", snap_limit);
}
- ret = dict_set_dynstr (dict, key, value);
+ if (snap_limit > volinfo->snap_count)
+ ret = dict_set_int32 (dict, "snaps-available",
+ snap_limit - volinfo->snap_count);
+ else
+ ret = dict_set_int32 (dict, "snaps-available", 0);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set volume name");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set available snaps");
goto out;
}
- ret = snprintf (key, sizeof (key), "%s.snap-count-total", keyprefix);
- if (ret < 0) {
+ /* Origin volume name */
+ value = gf_strdup (volinfo->volname);
+ if (!value)
goto out;
- }
- ret = dict_set_int64 (dict, key, volinfo->snap_count);
+ ret = dict_set_dynstr (dict, "origin-volname", value);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set total snap count");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set parent "
+ "volume name in dictionary: %s", key);
goto out;
}
- /* Ownership of value transferred to dict. Therefore we must initalize
- * it to NULL */
value = NULL;
- /* New entries are always added to the end of snap_list and we need to
- * display the list in LIFO (Last-In-First-Out) order. Therefore insert
- * the entries in reverse order into the dictionary.
- */
- list_for_each_entry_safe_reverse (entry, tmp, &volinfo->snaps,
- snap_list) {
- ++index;
- ret = snprintf (key, sizeof (key), "%s.snap-%ld", keyprefix, index);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- /* If snapname is NULL then get all the snaps
- * for the given volume */
- if (NULL == snapname) {
- ret = glusterd_snapshot_get_snapdetail (dict, key,
- entry, detail);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "get snap detail for %s snap",
- snapname);
- goto out; /* something wrong */
- }
- continue; /* Get the next entry */
- }
-
- /* If snapname is provided then get snap detail
- * for only that snap */
- if (strncmp (entry->snap_name, snapname,
- sizeof (entry->snap_name))) {
- /* Entry not yet found.*/
- ret = -1;
- continue; /* Check the next entry */
- }
-
- /* snap found */
- ret = snprintf (key, sizeof (key), "%s.snap-0", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = glusterd_snapshot_get_snapdetail (dict,
- key, entry, detail);
+ list_for_each_entry_safe (snap_vol, tmp_vol, &volinfo->snap_volumes,
+ snapvol_list) {
+ snapcount++;
+ snprintf (key, sizeof (key), "snap%d", snapcount);
+ ret = glusterd_snapshot_get_snap_detail (dict,
+ snap_vol->snapshot,
+ key, snap_vol);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get snap "
- "detail for %s snap", snapname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "snapdetail for snap %s",
+ snap_vol->snapshot->snapname);
goto out;
}
-
- /* Index is used to identify how many snap objects are
- * added to the dictionary. If snapshot name is passed
- * as argument then we would send only one snap object.
- * Therefore index should be reset to 0. */
- index = 0;
- break; /* Found the snap */
}
-
- /* If all the snap is written into the dictionary then write the
- * snap count into the dictionary */
- if (0 == ret) {
- ++index; /* To get count increment index by 1*/
- ret = snprintf (key, sizeof (key), "%s.snap-count", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = dict_set_int64 (dict, key, index);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "snap count");
- goto out;
-
- }
- } else if (NULL != snapname) {
- gf_log (this->name, GF_LOG_ERROR, "Snap (%s) not found",
- snapname);
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ goto out;
}
+ ret = 0;
out:
- if (NULL != value) {
+ if (value)
GF_FREE (value);
- }
return ret;
}
-/* This function will retrieve snap list for the given volume
- * and then serialize them to dict.
- *
- * @param dict dictionary where response should be serialized
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param volinfo Volinfo object of the volume
- * @param snapname snap name. This field can be NULL
- * @param detail if 1 then more details will be added for snap list
+/* This function will be called from RPC handler routine.
+ * This function is responsible for getting the requested
+ * snapshot info into the dictionary.
*
- * @return -1 on failure and 0 on success.
+ * @param req RPC request object. Required for sending a response back.
+ * @param op glusterd operation. Required for sending a response back.
+ * @param dict pointer to dictionary which will contain both
+ * request and response key-pair values.
+ * @return -1 on error and 0 on success
*/
-static int
-glusterd_snapshot_vol_get_snaplist (dict_t *dict, char *keyprefix,
- glusterd_volinfo_t *volinfo,
- char *snapname, int8_t detail)
+int
+glusterd_handle_snapshot_info (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
{
- int ret = -1; /* Failure */
- xlator_t *this = NULL;
+ int ret = -1;
+ int8_t snap_driven = 1;
+ char *volname = NULL;
+ char *snapname = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = GF_SNAP_INFO_TYPE_ALL;
this = THIS;
-
- /* General parameter validation */
GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (keyprefix);
- GF_ASSERT (volinfo);
- /* Acquire the volinfo lock before proceeding */
- LOCK (&(volinfo->lock));
- {
- ret = glusterd_snapshot_vol_get_snaplist_lk (dict, keyprefix,
- volinfo, snapname, detail);
- }
- UNLOCK (&(volinfo->lock));
+ GF_VALIDATE_OR_GOTO (this->name, req, out);
+ GF_VALIDATE_OR_GOTO (this->name, dict, out);
+
+ ret = dict_get_int32 (dict, "cmd", &cmd);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get snap list for"
- " %s volume", volinfo->volname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get type "
+ "of snapshot info");
+ goto out;
}
- return ret;
-}
+ switch (cmd) {
+ case GF_SNAP_INFO_TYPE_ALL:
+ {
+ ret = glusterd_snapshot_get_all_snap_info (dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get info of all snaps");
+ goto out;
+ }
+ break;
+ }
+ case GF_SNAP_INFO_TYPE_SNAP:
+ {
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snap name");
+ goto out;
+ }
-/* This function will retrieve snap list for the given volume
- * and then serialize them to dict.
- *
- * @param dict dictionary where response should be serialized
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param volname Volname whose snap list is requested
- * @param snapname snap name. This field can be NULL.
- * @param detail if 1 then more details will be added for snap list
- *
- * @return -1 on failure and 0 on success.
- */
-static int
-glusterd_snapshot_vol_get_snaplist_by_name (dict_t *dict, char *keyprefix,
- char *volname, char *snapname,
- int8_t detail)
-{
- int ret = -1;
- glusterd_volinfo_t *volinfo = NULL;
- xlator_t *this = NULL;
+ ret = dict_set_int32 (dict, "snap-count", 1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set snapcount");
+ goto out;
+ }
- this = THIS;
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len,
+ "Snap (%s) does not exist", snapname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+ ret = glusterd_snapshot_get_snap_detail (dict, snap,
+ "snap1", NULL);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snap detail of snap "
+ "%s", snap->snapname);
+ goto out;
+ }
+ break;
+ }
- /* General parameter validation */
- GF_ASSERT (this);
- GF_ASSERT (dict);
- GF_ASSERT (keyprefix);
- GF_ASSERT (volname);
+ case GF_SNAP_INFO_TYPE_VOL:
+ {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volname");
+ goto out;
+ }
+ ret = glusterd_snapshot_get_info_by_volume (dict,
+ volname, err_str, len);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volume info of volume "
+ "%s", volname);
+ goto out;
+ }
+ snap_driven = 0;
+ break;
+ }
+ }
- /* Find te volinfo from the volname */
- ret = glusterd_volinfo_find (volname, &volinfo);
+ ret = dict_set_int8 (dict, "snap-driven", snap_driven);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get volinfo for "
- "%s volume", volname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap-driven");
goto out;
}
- /* Now using the volinfo object get the snap list */
- ret = glusterd_snapshot_vol_get_snaplist (dict, keyprefix, volinfo,
- snapname, detail);
+ /* If everything is successful then send the response back to cli.
+ * In case of failure the caller of this function will take care
+ of the response */
+ ret = glusterd_op_send_cli_response (op, 0, 0, req, dict, err_str);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get snaplist for "
- "%s volume", volname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to send cli "
+ "response");
goto out;
}
+ ret = 0;
+
out:
return ret;
}
-
-
-/* This function will retrieve snap list for all the volumes
- * present in a given CG and then serialize them to dict.
- * This function is called under CG lock.
- *
- * @param dict dictionary where response should be serialized
- * @param cg CG object which need to be written into dictionary
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param cgname CG name.
- * @param detail if 1 then more details will be added for snap list
- *
- * @return -1 on failure and 0 on success.
- */
-static int
-glusterd_snapshot_cg_get_snaplist_lk (dict_t *dict, glusterd_snap_cg_t *cg,
- char *keyprefix, char *cgname,
- int8_t detail)
+/* This function sets all the snapshot names in the dictionary */
+int
+glusterd_snapshot_get_all_snapnames (dict_t *dict)
{
- int ret = -1; /* Failure */
- glusterd_conf_t *conf = NULL;
- char *value = NULL;
- xlator_t *this = NULL;
- int64_t i = 0;
- char key[256]= {0,};
+ int ret = -1;
+ int snapcount = 0;
+ char *snapname = NULL;
+ char key[PATH_MAX] = {0,};
+ glusterd_snap_t *snap = NULL;
+ glusterd_snap_t *tmp_snap = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
this = THIS;
-
- /* General parameter validation */
- GF_ASSERT (this);
- conf = this->private;
-
- GF_ASSERT (conf);
+ priv = this->private;
+ GF_ASSERT (priv);
GF_ASSERT (dict);
- GF_ASSERT (cg);
- GF_ASSERT (keyprefix);
- GF_ASSERT (cgname);
-
- /* CG Name */
- value = gf_strdup (cg->cg_name);
- if (NULL == value) {
- goto out;
- }
-
- ret = snprintf (key, sizeof (key), "%s.cgname", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = dict_set_dynstr (dict, key, value);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "cg name in dictionary");
- goto out;
- }
-
- /* CG ID */
- value = gf_strdup (uuid_utoa (cg->cg_id));
- if (NULL == value) {
- ret = -1;
- goto out;
- }
-
- ret = snprintf (key, sizeof (key), "%s.cg-id", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = dict_set_dynstr (dict, key, value);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "cg id in dictionary");
- goto out;
- }
-
- /* Ownership of value transferred to dict. Therefore we must initalize
- * it to NULL */
- value = NULL;
-
- /* Volume count */
- ret = snprintf (key, sizeof (key), "%s.vol-count", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = dict_set_int64 (dict, key, cg->volume_count);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "volume count in dictionary");
- goto out;
- }
-
- /* Get snap list for all volumes present in the CG */
- for (i = 0; i < cg->volume_count; ++i) {
- ret = snprintf (key, sizeof (key), "%s.vol%ld", keyprefix, i);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = glusterd_snapshot_vol_get_snaplist (dict, key,
- &(cg->volumes[i]), NULL, detail);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snaplist for %s volume",
- cg->volumes[i].volname);
- goto out;
- }
- }
-
- if (!detail) {
- /* If detail is not needed then return from here */
- goto out;
- }
- /* If CG description is provided then add that into dictionary */
- if (NULL != cg->description) {
- /* CG Description */
- value = gf_strdup (cg->description);
- if (NULL == value) {
+ list_for_each_entry_safe (snap, tmp_snap, &priv->snapshots, snap_list) {
+ snapcount++;
+ snapname = gf_strdup (snap->snapname);
+ if (!snapname) {
+ gf_log (this->name, GF_LOG_ERROR, "strdup failed");
ret = -1;
goto out;
}
-
- ret = snprintf (key, sizeof (key), "%s.cg-desc", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = dict_set_dynstr (dict, key, value);
+ snprintf (key, sizeof (key), "snapname%d", snapcount);
+ ret = dict_set_dynstr (dict, key, snapname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "cg description in dictionary");
+ GF_FREE (snapname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set %s",
+ key);
goto out;
}
-
- /* Ownership of value transferred to dict. Therefore we must initalize
- * it to NULL */
- value = NULL;
- }
-
-
- /* CG status */
- ret = snprintf (key, sizeof (key), "%s.cg-status", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- switch (cg->cg_status) {
- case GD_SNAP_STATUS_INIT:
- ret = dict_set_str (dict, key, "Init");
- break;
- case GD_SNAP_STATUS_IN_USE:
- ret = dict_set_str (dict, key, "In-use");
- break;
- case GD_SNAP_STATUS_DECOMMISSION:
- ret = dict_set_str (dict, key, "Decommisioned");
- break;
- case GD_SNAP_STATUS_RESTORED:
- ret = dict_set_str (dict, key, "Restored");
- break;
- case GD_SNAP_STATUS_NONE:
- ret = dict_set_str (dict, key, "None");
- break;
- default:
- gf_log (this->name, GF_LOG_ERROR, "Invalid snap "
- "status");
- ret = -1; /* Failure */
- goto out;
}
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "snap status in dictionary");
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
goto out;
}
ret = 0;
out:
- if (NULL != value) {
- GF_FREE (value);
- }
return ret;
}
-/* This function will retrieve snap list for all the volumes
- * present in a given CG and then serialize them to dict.
- *
- * @param dict dictionary where response should be serialized
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param cgname CG name.
- * @param detail if 1 then more details will be added for snap list
- *
- * @return -1 on failure and 0 on success.
- */
-static int
-glusterd_snapshot_cg_get_snaplist (dict_t *dict, char *keyprefix,
- char *cgname, int8_t detail)
+/* This function sets all the snapshot names
+ under a given volume in the dictionary */
+int
+glusterd_snapshot_get_vol_snapnames (dict_t *dict, glusterd_volinfo_t *volinfo)
{
- int ret = -1; /* Failure */
- glusterd_conf_t *conf = NULL;
- glusterd_snap_cg_t *cg = NULL;
- xlator_t *this = NULL;
+ int ret = -1;
+ int snapcount = 0;
+ char *snapname = NULL;
+ char key[PATH_MAX] = {0,};
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp_vol = NULL;
+ xlator_t *this = NULL;
this = THIS;
-
- /* General parameter validation */
- GF_ASSERT (this);
- conf = this->private;
-
- GF_ASSERT (conf);
GF_ASSERT (dict);
- GF_ASSERT (keyprefix);
- GF_ASSERT (cgname);
-
- /* Find the CG object from CG name */
- cg = glusterd_find_snap_cg_by_name (conf, cgname);
-
- if (NULL == cg) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "%s CG", cgname);
- goto out;
- }
-
- /* Got CG. Now serialize the CG content to dictionary */
-
- LOCK (&(cg->lock));
- {
- ret = glusterd_snapshot_cg_get_snaplist_lk (dict, cg, keyprefix,
- cgname, detail);
- }
- UNLOCK (&(cg->lock));
-
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get CG details");
- }
-
- ret = 0; /* Success */
-out:
- return ret;
-}
-
-
-/* This function will retrieve snap list for all the volumes
- * present in voldict dictionary. And then serialize them to
- * rspdict.
- *
- * @param voldict dictionary containing volumes
- * @param rspdict dictionary where response should be serialized
- * @param volcount Total volume count
- * @param keyprefix Prefix used for all the keys for rspdict dictionary
- * @param snapname snap name. This field can be NULL.
- * @param detail if 1 then more details will be added for snap list
- *
- * @return -1 on failure and 0 on success.
- */
-static int
-glusterd_snapshot_get_snaplist (dict_t *voldict, dict_t *rspdict,
- int64_t volcount, char* keyprefix,
- char *snapname, int8_t detail)
-{
- int ret = -1; /* Failure */
- int64_t i = 0;
- char *volname = NULL;
- xlator_t *this = NULL;
- char key[256] = {0,};
-
- this = THIS;
-
- /* General parameter validation */
- GF_ASSERT (this);
- GF_ASSERT (voldict);
- GF_ASSERT (rspdict);
- GF_ASSERT (keyprefix);
-
- /* Write the total volume count into the rspdict */
- ret = snprintf (key, sizeof (key), "%s.vol-count", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = dict_set_int64 (rspdict, key, volcount);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set "
- "volume count in dictionary");
- goto out;
- }
+ GF_ASSERT (volinfo);
- /* For each volume add all the snap list to rspdict dictionary */
- for (i = 0; i < volcount; ++i) {
- /* This key is used to get the volume name from voldict
- * dictionary. Therefore do not use keyprefix here
- */
- ret = snprintf (key, sizeof (key), "vol%ld", i);
- if (ret < 0) { /* Only negative value is error */
+ list_for_each_entry_safe (snap_vol, tmp_vol,
+ &volinfo->snap_volumes, snapvol_list) {
+ snapcount++;
+ snapname = gf_strdup (snap_vol->snapshot->snapname);
+ if (!snapname) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "strdup failed");
+ ret = -1;
goto out;
}
-
- ret = dict_get_str (voldict, key, &volname);
+ snprintf (key, sizeof (key), "snapname%d", snapcount);
+ ret = dict_set_dynstr (dict, key, snapname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get"
- "volname for %s", key);
- goto out;
- }
-
- /* Now for each volume get the snap list */
- ret = snprintf (key, sizeof (key), "%s.vol%ld", keyprefix, i);
- if (ret < 0) { /* Only negative value is error */
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "set %s", key);
+ GF_FREE (snapname);
goto out;
}
+ }
- ret = glusterd_snapshot_vol_get_snaplist_by_name (rspdict, key,
- volname, snapname, detail);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snapshot list for %s volume", volname);
- goto out;
- }
+ ret = dict_set_int32 (dict, "snap-count", snapcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snapcount");
+ goto out;
}
- ret = 0; /* Success */
+ ret = 0;
out:
+
return ret;
}
-
-
-/* This function will be called from RPC handler routine.
- * This function is responsible for getting the requested
- * snapshot list into the dictionary.
- *
- * @param req RPC request object. Required for sending a response back.
- * @param op glusterd operation. Required for sending a response back.
- * @param dict pointer to dictionary which will contain both
- * request and response key-pair values.
- * @return -1 on error and 0 on success
- */
int
glusterd_handle_snapshot_list (rpcsvc_request_t *req, glusterd_op_t op,
- dict_t *dict)
+ dict_t *dict, char *err_str, size_t len)
{
- int ret = -1;
- int64_t volcount = 0;
- int vol_count = 0;
- int8_t detail = 0;
- char *keyprefix = "snaplist";
- char *cgname = NULL;
- char *snapname = NULL;
- dict_t *voldict = NULL;
- xlator_t *this = NULL;
- char *err_str = "Operation failed";
- char key[256] = {0,};
+ int ret = -1;
+ char *volname = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ xlator_t *this = NULL;
this = THIS;
- GF_ASSERT (this);
GF_VALIDATE_OR_GOTO (this->name, req, out);
GF_VALIDATE_OR_GOTO (this->name, dict, out);
- /* Get the request key-pair from the dictionary */
-
- /* All these options are optonal. Therefore ignore
- * error returned by following dictionary operations
- */
- ret = dict_get_str (dict, "snapname", &snapname);
- /* Ignore error */
- ret = dict_get_int8 (dict, "snap-details", &detail);
-
- ret = dict_get_int64 (dict, "vol-count", &volcount);
- if (ret) {
- /* Ignore error */
- ret = dict_get_str (dict, "cgname", &cgname);
- }
+ /* Ignore error for getting volname as it is optional */
+ ret = dict_get_str (dict, "volname", &volname);
-
- /* If volume names are passed as argument then we should
- * get all the snapshots for the said volumes.
- */
- if (volcount > 0) {
- ret = glusterd_snapshot_get_snaplist (dict, dict, volcount,
- keyprefix, snapname,
- detail);
+ if (NULL == volname) {
+ ret = glusterd_snapshot_get_all_snapnames (dict);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snapshot list");
- goto out;
- }
- } else if (NULL != cgname) {
- /* If CG (Consistency Group) name is passed as argument then
- * we should get snapshots of all the volumes present in the
- * said CG
- */
-
- /* TODO: Handle multiple CG if needed */
- ret = snprintf (key, sizeof (key), "%s.cg-0", keyprefix);
- if (ret < 0) { /* Only negative value is error */
- goto out;
- }
-
- ret = glusterd_snapshot_cg_get_snaplist (dict, key,
- cgname, detail);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snapshot list for %s CG", cgname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snapshot list");
goto out;
}
} else {
- /* If no argument is provided then we should get snapshots of
- * all the volumes managed by this server
- */
-
- /* Create a dictionary to hold all the volumes retrieved from
- * glusterd
- */
- voldict = dict_new ();
- if (NULL == voldict) {
- ret = -1;
- goto out;
- }
-
- /* Get all the volumes from glusterd */
- ret = glusterd_get_all_volnames (voldict);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get all "
- "volume names");
- goto out;
- }
-
- /* Get the volume count */
- ret = dict_get_int32 (voldict, "vol_count", &vol_count);
+ ret = glusterd_volinfo_find (volname, &volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get volume"
- " count");
+ snprintf (err_str, len,
+ "Volume (%s) does not exist", volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
goto out;
}
- volcount = vol_count;
- /* Get snap list for all the volumes*/
- ret = glusterd_snapshot_get_snaplist (voldict, dict, volcount,
- keyprefix, NULL, detail);
+ ret = glusterd_snapshot_get_vol_snapnames (dict, volinfo);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "snapshot list");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get snapshot list for volume %s",
+ volname);
goto out;
}
}
/* If everything is successful then send the response back to cli.
- * In case of failure the caller of this function will take of response.*/
- ret = glusterd_op_send_cli_response (op, 0, 0,
- req, dict, err_str);
+ In case of failure the caller of this function will take of response.*/
+ ret = glusterd_op_send_cli_response (op, 0, 0, req, dict, err_str);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Failed to send cli "
"response");
goto out;
}
- ret = 0; /* Success */
+ ret = 0;
out:
- if (voldict) {
- dict_unref (voldict);
- }
return ret;
}
-/* TODO: This function needs a revisit.
- *
- * As of now only one snap is supported per CG. This function will
- * retrieve the snap name which bleongs to the CG and put it in the
- * dictionary.
+/* This is a snapshot create handler function. This function will be
+ * executed in the originator node. This function is responsible for
+ * calling mgmt_v3 framework to do the actual snap creation on all the bricks
*
+ * @param req RPC request object
+ * @param op gluster operation
* @param dict dictionary containing snapshot restore request
- * @param cg CG object.
- * in this variable
+ * @param err_str In case of an err this string should be populated
+ * @param len length of err_str buffer
+ *
* @return Negative value on Failure and 0 in success
*/
int
-glusterd_get_cg_snap_name_lk (dict_t *dict, glusterd_snap_cg_t *cg)
+glusterd_handle_snapshot_create (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
{
- int ret = -1;
- uint64_t snap_count = 0;
- char *snapname = NULL;
- glusterd_snap_t *snap = NULL;
- xlator_t *this = NULL;
+ int ret = -1;
+ char *volname = NULL;
+ char *snapname = NULL;
+ int64_t volcount = 0;
+ xlator_t *this = NULL;
+ char key[PATH_MAX] = "";
+ char *username = NULL;
+ char *password = NULL;
+ uuid_t *uuid_ptr = NULL;
+ uuid_t tmp_uuid = {0};
+ int i = 0;
this = THIS;
-
GF_ASSERT (this);
+ GF_ASSERT (req);
GF_ASSERT (dict);
- GF_ASSERT (cg);
- /* CG should have at least one volume*/
- GF_ASSERT (cg->volume_count > 0);
+ GF_ASSERT (err_str);
- /* TODO: As of now only one snap is supported per CG When CG
- * management module comes in then this restriction can be removed.
- */
- snap_count = cg->volumes[0].snap_count;
- if (1 != snap_count) {
- gf_log (this->name, GF_LOG_ERROR, "More than one snap is "
- "associated with the cg (%s)", cg->cg_name);
+ ret = dict_get_int64 (dict, "volcount", &volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to "
+ "get the volume count");
+ goto out;
+ }
+ if (volcount <= 0) {
+ gf_log (this->name, GF_LOG_ERROR, "Invalid volume count %ld "
+ "supplied", volcount);
ret = -1;
goto out;
}
- snap = glusterd_find_snap_by_index (&(cg->volumes[0]), 0);
- if (NULL == snap) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get snap for "
- "%s CG", cg->cg_name);
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "failed to get the snapname");
+ goto out;
+ }
+
+ if (strlen(snapname) >= GLUSTERD_MAX_SNAP_NAME) {
+ snprintf (err_str, len, "snapname cannot exceed 255 "
+ "characters");
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
ret = -1;
goto out;
}
- snapname = gf_strdup (snap->snap_name);
- if (NULL == snapname) {
+ uuid_ptr = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!uuid_ptr) {
+ gf_log (this->name, GF_LOG_ERROR, "Out Of Memory");
ret = -1;
goto out;
}
- ret = dict_set_dynstr (dict, "snapname", snapname);
+ uuid_generate (*uuid_ptr);
+ ret = dict_set_bin (dict, "snap-id", uuid_ptr, sizeof(uuid_t));
if (ret) {
- GF_FREE (snapname);
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "set snap name");
+ gf_log (this->name, GF_LOG_ERROR, "Unable to set snap-id");
+ GF_FREE (uuid_ptr);
goto out;
}
+ uuid_ptr = NULL;
+
+ ret = dict_set_int64 (dict, "snap-time", (int64_t)time(NULL));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to set snap-time");
+ goto out;
+ }
+
+ for (i = 1; i <= volcount; i++) {
+ snprintf (key, sizeof (key), "volname%d", i);
+ ret = dict_get_str (dict, key, &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get volume name");
+ goto out;
+ }
+
+ /* generate internal username and password for the snap*/
+ uuid_generate (tmp_uuid);
+ username = gf_strdup (uuid_utoa (tmp_uuid));
+ snprintf (key, sizeof(key), "volume%d_username", i);
+ ret = dict_set_dynstr (dict, key, username);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap "
+ "username for volume %s", volname);
+ GF_FREE (username);
+ goto out;
+ }
+
+ uuid_generate (tmp_uuid);
+ password = gf_strdup (uuid_utoa (tmp_uuid));
+ snprintf (key, sizeof(key), "volume%d_password", i);
+ ret = dict_set_dynstr (dict, key, password);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set snap "
+ "password for volume %s", volname);
+ GF_FREE (password);
+ goto out;
+ }
+
+ uuid_ptr = GF_CALLOC (1, sizeof(uuid_t), gf_common_mt_uuid_t);
+ if (!uuid_ptr) {
+ gf_log (this->name, GF_LOG_ERROR, "Out Of Memory");
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (key, sizeof(key) - 1, "vol%d_volid", i);
+ uuid_generate (*uuid_ptr);
+ ret = dict_set_bin (dict, key, uuid_ptr, sizeof(uuid_t));
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set snap_volid");
+ GF_FREE (uuid_ptr);
+ goto out;
+ }
+ }
+
+ ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initiate snap "
+ "phases");
+ }
out:
return ret;
}
-/* This is a helper function will get all the volume names present in CG
- * and write into dictionary.
+/* This is a snapshot status handler function. This function will be
+ * executed in a originator node. This function is responsible for
+ * calling mgmt v3 framework to get the actual snapshot status from
+ * all the bricks
+ *
+ * @param req RPC request object
+ * @param op gluster operation
+ * @param dict dictionary containing snapshot status request
+ * @param err_str In case of an err this string should be populated
+ * @param len length of err_str buffer
+ *
+ * return : 0 in case of success.
+ * -1 in case of failure.
*
- * @param dict dictionary where volume names should be written
- * @param cg CG object.
- * in this variable
- * @return Negative value on Failure and 0 in success
*/
int
-glusterd_get_cg_volume_names_lk (dict_t *dict, glusterd_snap_cg_t *cg)
+glusterd_handle_snapshot_status (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
{
- int ret = -1;
- int64_t i = 0;
- char *volname = NULL;
- xlator_t *this = NULL;
- char key[PATH_MAX] = {0,};
+ int ret = -1;
+ char *volname = NULL;
+ char *snapname = NULL;
+ char *buf = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = -1;
+ int i = 0;
+ dict_t *voldict = NULL;
+ char key[PATH_MAX] = "";
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
this = THIS;
-
GF_ASSERT (this);
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ GF_ASSERT (req);
GF_ASSERT (dict);
- GF_ASSERT (cg);
+ GF_ASSERT (err_str);
- ret = dict_set_int64 (dict, "volcount", cg->volume_count);
+ ret = dict_get_int32 (dict, "cmd", &cmd);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "set volume count");
+ gf_log (this->name, GF_LOG_ERROR, "Could not get status type");
goto out;
}
+ switch (cmd) {
+ case GF_SNAP_STATUS_TYPE_ALL:
+ {
+ /* IF we give "gluster snapshot status"
+ * then lock is held on all snaps.
+ * This is the place where necessary information
+ * (snapname and snapcount)is populated in dictionary
+ * for locking.
+ */
+ ++i;
+ list_for_each_entry (snap, &conf->snapshots, snap_list)
+ {
+ snprintf (key, sizeof (key), "snapname%d", i);
+ buf = gf_strdup (snap->snapname);
+ if (!buf) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (dict, key, buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save snapname (%s) "
+ "in the dictionary",
+ snap->snapname);
+ GF_FREE (buf);
+ goto out;
+ }
+
+ buf = NULL;
+ i++;
+ }
- /* Set volume name of all the volumes present in CG in dict so that
- * Jarvis can use this to acquire volume locks on all the volume
- * present in the CG.
- */
- for (i = 0; i < cg->volume_count; ++i) {
- /* TODO: When Jarvis framework is fixed change the index
- * to start from 0 instead of 1
- */
- snprintf (key, sizeof (key), "volname%ld", i+1);
- volname = gf_strdup (cg->volumes[i].volname);
- if (NULL == volname) {
- ret = -1;
- goto out;
+ ret = dict_set_int32 (dict, "snapcount", i - 1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not "
+ "save snapcount in the dictionary");
+ goto out;
+ }
+ break;
}
- ret = dict_set_dynstr (dict, key, volname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR,
- "Failed to set volname");
- GF_FREE (volname);
- volname = NULL;
+ case GF_SNAP_STATUS_TYPE_SNAP:
+ {
+ /* IF we give "gluster snapshot status <snapname>"
+ * then lock is held on single snap.
+ * This is the place where necessary information
+ * (snapname)is populated in dictionary
+ * for locking.
+ */
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to fetch snap name");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len, "Snap (%s)"
+ "does not exist", snapname);
+ gf_log(this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ ret = -1;
+ goto out;
+ }
+ break;
+ }
+ case GF_SNAP_STATUS_TYPE_VOL:
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to fetch volname");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, len, "Volume (%s) "
+ "does not exist", volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ goto out;
+ }
+
+ i = 1;
+ list_for_each_entry (snap_volinfo,
+ &volinfo->snap_volumes, snapvol_list) {
+ snprintf (key, sizeof (key), "snapname%d", i);
+
+ buf = gf_strdup
+ (snap_volinfo->snapshot->snapname);
+ if (!buf) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (dict, key, buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save snapname");
+ GF_FREE (buf);
+ goto out;
+ }
+
+ buf = NULL;
+ i++;
+ }
+
+ ret = dict_set_int32 (dict, "snapcount", i-1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save snapcount");
+ goto out;
+ }
+ break;
+ default:
+ {
+ gf_log (this->name, GF_LOG_ERROR, "Unknown type");
+ ret = -1;
goto out;
}
}
+
+ /* Volume lock is not necessary for snapshot status, hence
+ * turning it off
+ */
+ ret = dict_set_int8 (dict, "hold_vol_locks", 0);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Setting volume lock "
+ "flag failed");
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initiate "
+ "snap phases");
+ goto out;
+ }
+
ret = 0;
+
out:
+ if (voldict) {
+ dict_unref (voldict);
+ }
return ret;
}
+
/* This is a snapshot restore handler function. This function will be
* executed in the originator node. This function is responsible for
* calling mgmt_v3 framework to do the actual restore on all the bricks
@@ -2109,13 +2365,14 @@ glusterd_handle_snapshot_restore (rpcsvc_request_t *req, glusterd_op_t op,
dict_t *dict, char *err_str, size_t len)
{
int ret = -1;
- int64_t vol_count = 0;
- char *volname = NULL;
char *snapname = NULL;
- char *cgname = NULL;
+ char *buf = NULL;
glusterd_conf_t *conf = NULL;
- glusterd_snap_cg_t *cg = NULL;
xlator_t *this = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
+ int32_t i = 0;
+ char key[PATH_MAX] = "";
this = THIS;
GF_ASSERT (this);
@@ -2126,61 +2383,45 @@ glusterd_handle_snapshot_restore (rpcsvc_request_t *req, glusterd_op_t op,
GF_ASSERT (dict);
GF_ASSERT (err_str);
- /* If volume name is provided then volcount will be set */
- ret = dict_get_int64 (dict, "volcount", &vol_count);
+ ret = dict_get_str (dict, "snapname", &snapname);
if (ret) {
- /* If volcount is not provided then cgname must be there */
- ret = dict_get_str (dict, "cgname", &cgname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "get neither volcount nor cgname");
- goto out;
- }
- } else {
- /* TODO: Change the index to 0 when Jarvis code is fixed */
- ret = dict_get_str (dict, "volname1", &volname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "get volname");
- goto out;
- }
- ret = dict_get_str (dict, "snapname", &snapname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to "
- "get snapname");
- goto out;
- }
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "get snapname");
+ goto out;
}
- if (NULL != cgname) { /* Snapshot restore of CG */
- cg = glusterd_find_snap_cg_by_name (conf, cgname);
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len, "Snap (%s) does not exist", snapname);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ ret = -1;
+ goto out;
+ }
- if (NULL == cg) {
- snprintf (err_str, len, "CG %s not found", cgname);
- gf_log (this->name, GF_LOG_WARNING, "%s", err_str);
+ list_for_each_entry (snap_volinfo, &snap->volumes, vol_list) {
+ i++;
+ snprintf (key, sizeof (key), "volname%d", i);
+ buf = gf_strdup (snap_volinfo->parent_volname);
+ if (!buf) {
ret = -1;
goto out;
}
-
- LOCK (&cg->lock);
- {
- /* First get the snap name of the CG */
- ret = glusterd_get_cg_snap_name_lk (dict, cg);
- if (ret) {
- goto unlock;
- }
-
- /* Then get the volumes belong to CG */
- ret = glusterd_get_cg_volume_names_lk (dict, cg);
- }
-unlock:
- UNLOCK (&cg->lock);
-
+ ret = dict_set_dynstr (dict, key, buf);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to get "
- "volume names or snap name of %s CG", cgname);
+ gf_log (this->name, GF_LOG_ERROR, "Could not set "
+ "parent volume name %s in the dict",
+ snap_volinfo->parent_volname);
+ GF_FREE (buf);
goto out;
}
+ buf = NULL;
+ }
+
+ ret = dict_set_int32 (dict, "volcount", i);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save volume count");
+ goto out;
}
ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
@@ -2190,198 +2431,141 @@ unlock:
goto out;
}
- ret = 0; /* Success */
+ ret = 0;
+
out:
return ret;
}
-/* this should be the last thing to be done.
- 1. Do op stage related checks such as whether volume is there or not etc
- 2. Do quorum checks.
- 3. Then do this and take the snapshot OR take the snapshot and build the
- snap object (Can be decided later)
-*/
-int32_t
-glusterd_snap_create (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snap_volinfo,
- char *description, uuid_t cg_id)
+glusterd_snap_t*
+glusterd_create_snap_object (dict_t *dict, dict_t *rsp_dict)
{
- glusterd_snap_t *snap = NULL;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- int ret = -1;
- uuid_t snap_uuid;
+ char *snapname = NULL;
+ uuid_t *snap_id = NULL;
+ char *description = NULL;
+ glusterd_snap_t *snap = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int ret = -1;
+ int64_t time_stamp = 0;
this = THIS;
priv = this->private;
- if (!volinfo) {
- gf_log (this->name, GF_LOG_ERROR, "volinfo is NULL");
- goto out;
- }
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
- snap = glusterd_new_snap_object ();
- if (!snap) {
- gf_log (this->name, GF_LOG_ERROR, "could not create "
- "the snap object fot the volume %s (snap: %s)",
- volinfo->volname, snap_volinfo->volname);
+ /* Fetch snapname, description, id and time from dict */
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snapname");
goto out;
}
+ /* Ignore ret value for description*/
+ ret = dict_get_str (dict, "description", &description);
- // for now ignore if description is not strduped
- if (description)
- snap->description = gf_strdup (description);
- snap->time_stamp = time (NULL);
- uuid_generate (snap_uuid);
- uuid_copy (snap->snap_id, snap_uuid);
- if (!uuid_is_null (cg_id))
- uuid_copy (snap->cg_id, cg_id);
- snap->snap_volume = snap_volinfo;
- uuid_copy (snap_volinfo->volume_id, snap_uuid);
- strcpy (snap->snap_name, snap_volinfo->volname);
- //TODO: replace strcpy with strncpy
-
- ret = glusterd_add_snap (volinfo, snap);
+ ret = dict_get_bin (dict, "snap-id", (void **)&snap_id);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "could not add the snap "
- "object %s to the snap list of the volume %s",
- snap_volinfo->volname, volinfo->volname);
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snap_id");
goto out;
}
- snap->snap_status = GD_SNAP_STATUS_IN_USE;
-
-out:
+ ret = dict_get_int64 (dict, "snap-time", &time_stamp);
if (ret) {
- if (snap) {
- list_del_init (&snap->snap_list);
- LOCK_DESTROY (&snap->lock);
- GF_FREE (snap->description);
- GF_FREE (snap->snap_volume);
- GF_FREE (snap);
- }
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snap-time");
+ goto out;
}
- return ret;
-}
-
-int
-glusterd_remove_snapshot (glusterd_brickinfo_t *brickinfo, char *volname,
- char *snapname, const char *snap_device)
-{
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- runner_t runner = {0,};
- char msg[1024] = {0, };
-
- this = THIS;
- priv = this->private;
-
- if (!brickinfo) {
- gf_log (this->name, GF_LOG_ERROR, "brickinfo NULL");
+ if (time_stamp <= 0) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Invalid time-stamp: %ld",
+ time_stamp);
goto out;
}
- snprintf (msg, sizeof(msg), "remove snapshot of the brick %s:%s, "
- "device: %s", brickinfo->hostname, brickinfo->path,
- snap_device);
- runner_add_args (&runner, "/sbin/lvmremove", snap_device);
- runner_log (&runner, "", GF_LOG_DEBUG, msg);
-
- //let glusterd get blocked till snapshot is over
- synclock_unlock (&priv->big_lock);
- ret = runner_run (&runner);
- synclock_lock (&priv->big_lock);
-
+ list_for_each_entry (snap, &priv->snapshots, snap_list) {
+ if (!strcmp (snap->snapname, snapname) ||
+ !uuid_compare (snap->snap_id, *snap_id)) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Found duplicate snap %s (%s)",
+ snap->snapname, uuid_utoa (snap->snap_id));
+ ret = -1;
+ break;
+ }
+ }
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "removing snapshot of the "
- "brick (%s:%s) of device %s failed",
- brickinfo->hostname, brickinfo->path, snap_device);
+ snap = NULL;
goto out;
}
-out:
- return ret;
-}
-
-int32_t
-glusterd_brick_snapshot_remove (glusterd_volinfo_t *snap_volinfo,
- glusterd_volinfo_t *actual_volinfo, char *name)
-{
- char *mnt_pt = NULL;
- struct mntent *entry = NULL;
- int32_t ret = -1;
- glusterd_brickinfo_t *brickinfo = NULL;
- xlator_t *this = NULL;
- FILE *mtab = NULL;
-
- this = THIS;
- GF_ASSERT (this);
-
- if (!snap_volinfo) {
- gf_log (this->name, GF_LOG_ERROR, "snap volinfo is NULL");
+ snap = glusterd_new_snap_object ();
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not create "
+ "the snap object for snap %s", snapname);
goto out;
}
- if (!actual_volinfo) {
- gf_log (this->name, GF_LOG_ERROR, "volinfo for the volume "
- "is NULL");
- goto out;
+ strcpy (snap->snapname, snapname);
+ uuid_copy (snap->snap_id, *snap_id);
+ snap->time_stamp = (time_t)time_stamp;
+ /* Set the status as GD_SNAP_STATUS_INIT and once the backend snapshot
+ is taken and snap is really ready to use, set the status to
+ GD_SNAP_STATUS_IN_USE. This helps in identifying the incomplete
+ snapshots and cleaning them up.
+ */
+ snap->snap_status = GD_SNAP_STATUS_INIT;
+ if (description) {
+ snap->description = gf_strdup (description);
+ if (snap->description == NULL) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Saving the Snap Description Failed");
+ ret = -1;
+ goto out;
+ }
}
- if (!name) {
- gf_log (this->name, GF_LOG_ERROR, "snapname is NULL "
- "(volume: %s)", actual_volinfo->volname);
+ ret = glusterd_store_snap (snap);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Could not store snap"
+ "object %s", snap->snapname);
goto out;
}
- list_for_each_entry (brickinfo, &snap_volinfo->bricks, brick_list) {
- ret = glusterd_get_brick_root (brickinfo->path, &mnt_pt);
- if (ret)
- goto out;
+ list_add_order (&snap->snap_list, &priv->snapshots,
+ glusterd_compare_snap_time);
- entry = glusterd_get_mnt_entry_info (mnt_pt, mtab);
- if (!entry) {
- ret = -1;
- goto out;
- }
- ret = glusterd_remove_snapshot (brickinfo,
- actual_volinfo->volname,
- name, entry->mnt_fsname);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "failed to "
- "remove the snapshot %s (%s)",
- brickinfo->path, entry->mnt_fsname);
- goto out;
- }
- }
+ gf_log (this->name, GF_LOG_TRACE, "Snap %s added to the list",
+ snap->snapname);
ret = 0;
out:
- if (mtab)
- endmntent (mtab);
- return ret;
+ if (ret) {
+ if (snap)
+ glusterd_snap_remove (rsp_dict, snap,
+ _gf_true, _gf_true);
+ snap = NULL;
+ }
+
+ return snap;
}
/* This function is called to get the device path of the snap lvm. Usually
- if /dev/<group-name>/<group-name>-<lvm-name> is the device for the lvm,
- then the snap device will be /dev/<group-name>/<group-name>-<snap-name>.
+ if /dev/mapper/<group-name>-<lvm-name> is the device for the lvm,
+ then the snap device will be /dev/<group-name>/<snapname>.
This function takes care of building the path for the snap device.
*/
char *
glusterd_build_snap_device_path (char *device, char *snapname)
{
- char *tmp = NULL;
- char snap[PATH_MAX] = {0, };
- char msg[1024] = {0, };
- char *str = NULL;
- int device_len = 0;
- int tmp_len = 0;
- int var = 0;
- char *snap_device = NULL;
- xlator_t *this = NULL;
+ char snap[PATH_MAX] = "";
+ char msg[1024] = "";
+ char volgroup[PATH_MAX] = "";
+ char *snap_device = NULL;
+ xlator_t *this = NULL;
+ runner_t runner = {0,};
+ char *ptr = NULL;
+ int ret = -1;
this = THIS;
GF_ASSERT (this);
@@ -2389,42 +2573,44 @@ glusterd_build_snap_device_path (char *device, char *snapname)
gf_log (this->name, GF_LOG_ERROR, "device is NULL");
goto out;
}
+ if (!snapname) {
+ gf_log (this->name, GF_LOG_ERROR, "snapname is NULL");
+ goto out;
+ }
- device_len = strlen (device);
-
- tmp = strrchr (device, '/');
- if (tmp)
- tmp++;
- str = gf_strdup (tmp);
- if (!str) {
+ runinit (&runner);
+ runner_add_args (&runner, "/sbin/lvs", "--noheadings", "-o", "vg_name",
+ device, NULL);
+ runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
+ snprintf (msg, sizeof (msg), "Get volume group for device %s", device);
+ runner_log (&runner, this->name, GF_LOG_DEBUG, msg);
+ ret = runner_start (&runner);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volume group "
+ "for device %s", device);
+ runner_end (&runner);
+ goto out;
+ }
+ ptr = fgets(volgroup, sizeof(volgroup),
+ runner_chio (&runner, STDOUT_FILENO));
+ if (!ptr || !strlen(volgroup)) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volume group "
+ "for snap %s", snapname);
+ runner_end (&runner);
+ ret = -1;
goto out;
}
+ runner_end (&runner);
- tmp_len = strlen (str);
- var = device_len - tmp_len;
- device[var] = '\0';
- tmp = strchr (str, '-');
- if (tmp)
- tmp++;
- device_len = tmp_len;
- tmp_len = strlen (tmp);
- var = device_len - tmp_len;
- str[var] = '\0';
- msg[0] = '\0';
- strcpy (msg, str);
- strcat (msg, snapname);
- strcpy (snap, device);
- strcat (snap, msg);
+ snprintf (snap, sizeof(snap), "/dev/%s/%s", gf_trim(volgroup),
+ snapname);
snap_device = gf_strdup (snap);
if (!snap_device) {
- gf_log (this->name, GF_LOG_WARNING, "cannot copy the "
- "snapshot device name "
- "snapname: %s)", snapname);
- goto out;
+ gf_log (this->name, GF_LOG_WARNING, "Cannot copy the "
+ "snapshot device name for snapname: %s)", snapname);
}
out:
- GF_FREE (str);
return snap_device;
}
@@ -2433,20 +2619,21 @@ out:
then call the glusterd_snap_create function to create the snap object
for glusterd
*/
-int
-glusterd_take_snapshot (glusterd_brickinfo_t *brickinfo, char *volname,
- char *snapname, dict_t *dict, char **snap_device)
+char *
+glusterd_take_lvm_snapshot (glusterd_volinfo_t *snap_vol,
+ glusterd_brickinfo_t *brickinfo)
{
- int ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- runner_t runner = {0,};
- char *device = NULL;
- char msg[1024] = {0, };
- char *tmp = NULL;
+ char msg[NAME_MAX] = "";
+ char buf[PATH_MAX] = "";
+ char *snap_device = NULL;
+ char *ptr = NULL;
+ char *device = NULL;
+ int ret = -1;
+ gf_boolean_t match = _gf_false;
+ runner_t runner = {0,};
+ xlator_t *this = NULL;
this = THIS;
- priv = this->private;
if (!brickinfo) {
gf_log (this->name, GF_LOG_ERROR, "brickinfo NULL");
@@ -2461,487 +2648,1134 @@ glusterd_take_snapshot (glusterd_brickinfo_t *brickinfo, char *volname,
goto out;
}
+ /* Figuring out if setactivationskip flag is supported or not */
runinit (&runner);
- snprintf (msg, sizeof (msg), "taking snapshot of the brick %s:%s",
- brickinfo->hostname, brickinfo->path);
- runner_add_args (&runner, "/sbin/lvcreate", "-s", device, "--name",
- snapname);
+ snprintf (msg, sizeof (msg), "running lvcreate help");
+ runner_add_args (&runner, "/sbin/lvcreate", "--help", NULL);
runner_log (&runner, "", GF_LOG_DEBUG, msg);
+ runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
+ ret = runner_start (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to run lvcreate help");
+ runner_end (&runner);
+ goto out;
+ }
- //let glusterd get blocked till snapshot is over
- synclock_unlock (&priv->big_lock);
- ret = runner_run (&runner);
- synclock_lock (&priv->big_lock);
+ /* Looking for setactivationskip in lvcreate --help */
+ do {
+ ptr = fgets(buf, sizeof(buf),
+ runner_chio (&runner, STDOUT_FILENO));
+ if (ptr) {
+ if (strstr(buf, "setactivationskip")) {
+ match = _gf_true;
+ break;
+ }
+ }
+ } while (ptr != NULL);
+ runner_end (&runner);
+ /* Takng the actual snapshot */
+ runinit (&runner);
+ snprintf (msg, sizeof (msg), "taking snapshot of the brick %s:%s",
+ brickinfo->hostname, brickinfo->path);
+ if (match == _gf_true)
+ runner_add_args (&runner, "/sbin/lvcreate", "-s", device,
+ "--setactivationskip", "n", "--name",
+ snap_vol->volname, NULL);
+ else
+ runner_add_args (&runner, "/sbin/lvcreate", "-s", device,
+ "--name", snap_vol->volname, NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
+ ret = runner_start (&runner);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "taking snapshot of the "
"brick (%s:%s) of device %s failed",
brickinfo->hostname, brickinfo->path, device);
+ runner_end (&runner);
goto out;
}
+ runner_end (&runner);
- gf_log (this->name, GF_LOG_INFO, "device: %s", device);
- if (device) {
- tmp = glusterd_build_snap_device_path (device, snapname);
- *snap_device = tmp;
- if (!*snap_device) {
- gf_log (this->name, GF_LOG_WARNING, "cannot copy the "
- "snapshot device name (volname: %s, "
- "snapname: %s)", volname, snapname);
- ret = -1;
- goto out;
- }
+ snap_device = glusterd_build_snap_device_path (device,
+ snap_vol->volname);
+ if (!snap_device) {
+ gf_log (this->name, GF_LOG_WARNING, "Cannot copy the snapshot "
+ "device name for snap %s (volume id: %s)",
+ snap_vol->snapshot->snapname, snap_vol->volname);
+ ret = -1;
+ goto out;
}
out:
- return ret;
+ return snap_device;
}
int32_t
glusterd_snap_brick_create (char *device, glusterd_volinfo_t *snap_volinfo,
- glusterd_brickinfo_t *original_brickinfo)
+ glusterd_brickinfo_t *original_brickinfo,
+ int32_t brick_count, char *snap_brick_dir)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_brickinfo_t *snap_brickinfo = NULL;
- char snap_brick_mount_path[PATH_MAX] = {0, };
- char *tmp = NULL;
- char *mnt_pt = NULL;
- struct mntent *entry = NULL;
- FILE *mtab = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ char snap_brick_mount_path[PATH_MAX] = "";
+ char snap_brick_path[PATH_MAX] = "";
+ char msg[1024] = "";
+ struct stat statbuf = {0, };
+ runner_t runner = {0, };
this = THIS;
priv = this->private;
- if (!device) {
- gf_log (this->name, GF_LOG_ERROR, "device is NULL");
+ GF_ASSERT (device);
+ GF_ASSERT (snap_volinfo);
+ GF_ASSERT (original_brickinfo);
+ GF_ASSERT (snap_brick_dir);
+
+ snprintf (snap_brick_mount_path, sizeof (snap_brick_mount_path),
+ "%s/%s/brick%d", snap_mount_folder, snap_volinfo->volname,
+ brick_count+1);
+
+ snprintf (snap_brick_path, sizeof (snap_brick_path), "%s%s",
+ snap_brick_mount_path, snap_brick_dir);
+
+ ret = mkdir_p (snap_brick_mount_path, 0777, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "creating the brick directory"
+ " %s for the snapshot %s(device: %s) failed",
+ snap_brick_mount_path, snap_volinfo->volname, device);
goto out;
}
+ /* mount the snap logical device on the directory inside
+ /run/gluster/snaps/<snapname>/@snap_brick_mount_path
+ Way to mount the snap brick via mount api is this.
+ ret = mount (device, snap_brick_mount_path, entry->mnt_type,
+ MS_MGC_VAL, "nouuid");
+ But for now, mounting using runner apis.
+ */
+ runinit (&runner);
+ snprintf (msg, sizeof (msg), "mounting snapshot of the brick %s:%s",
+ original_brickinfo->hostname, original_brickinfo->path);
+ runner_add_args (&runner, "mount", "-o", "nouuid", device,
+ snap_brick_mount_path, NULL);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
- if (!snap_volinfo) {
- gf_log (this->name, GF_LOG_ERROR, "snap volinfo is NULL");
+ /* let glusterd get blocked till snapshot is over */
+ synclock_unlock (&priv->big_lock);
+ ret = runner_run (&runner);
+ synclock_lock (&priv->big_lock);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "mounting the snapshot "
+ "logical device %s failed (error: %s)", device,
+ strerror (errno));
goto out;
- }
+ } else
+ gf_log (this->name, GF_LOG_DEBUG, "mounting the snapshot "
+ "logical device %s successful", device);
- if (!original_brickinfo) {
- gf_log (this->name, GF_LOG_ERROR, "original brickinfo is NULL"
- "(snap: %s)", snap_volinfo->volname);
+ ret = stat (snap_brick_path, &statbuf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "stat of the brick %s"
+ "(brick mount: %s) failed (%s)", snap_brick_path,
+ snap_brick_mount_path, strerror (errno));
goto out;
}
-
- tmp = gf_strdup (device);
- if (!tmp) {
- gf_log (this->name, GF_LOG_INFO, "out of memory");
+ ret = sys_lsetxattr (snap_brick_path,
+ GF_XATTR_VOL_ID_KEY,
+ snap_volinfo->volume_id, 16,
+ XATTR_REPLACE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "extended attribute %s on %s. Reason: "
+ "%s, snap: %s", GF_XATTR_VOL_ID_KEY,
+ snap_brick_path, strerror (errno),
+ snap_volinfo->volname);
goto out;
}
- glusterd_replace_slash_with_hyphen (tmp);
- if (tmp[0] == '-')
- tmp[0] = '/';
+out:
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "unmounting the snap brick"
+ " mount %s", snap_brick_mount_path);
+ umount (snap_brick_mount_path);
+ }
- snprintf (snap_brick_mount_path, sizeof (snap_brick_mount_path),
- "%s/%s%s-brick", GLUSTERD_DEFAULT_SNAPS_BRICK_DIR,
- snap_volinfo->volname, tmp);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
- ret = glusterd_get_brick_root (original_brickinfo->path, &mnt_pt);
- if (ret)
- goto out;
+/* Added missed_snap_entry to rsp_dict */
+int32_t
+glusterd_add_missed_snaps_to_dict (dict_t *rsp_dict, char *snap_uuid,
+ glusterd_brickinfo_t *brickinfo,
+ int32_t brick_number, int32_t op)
+{
+ char *buf = NULL;
+ char missed_snap_entry[PATH_MAX] = "";
+ char name_buf[PATH_MAX] = "";
+ int32_t missed_snap_count = -1;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
- entry = glusterd_get_mnt_entry_info (mnt_pt, mtab);
- if (!entry) {
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap_uuid);
+ GF_ASSERT (brickinfo);
+
+ snprintf (missed_snap_entry, sizeof(missed_snap_entry),
+ "%s:%s=%d:%s:%d:%d", uuid_utoa(brickinfo->uuid),
+ snap_uuid, brick_number, brickinfo->path, op,
+ GD_MISSED_SNAP_PENDING);
+
+ buf = gf_strdup (missed_snap_entry);
+ if (!buf) {
ret = -1;
goto out;
}
- ret = mkdir_p (snap_brick_mount_path, 0777, _gf_true);
+ /* Fetch the missed_snap_count from the dict */
+ ret = dict_get_int32 (rsp_dict, "missed_snap_count",
+ &missed_snap_count);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "creating the brick directory"
- " %s for the snapshot %s(device: %s) failed",
- snap_brick_mount_path, snap_volinfo->volname, device);
+ /* Initialize the missed_snap_count for the first time */
+ missed_snap_count = 0;
+ }
+
+ /* Setting the missed_snap_entry in the rsp_dict */
+ snprintf (name_buf, sizeof(name_buf), "missed_snaps_%d",
+ missed_snap_count);
+ ret = dict_set_dynstr (rsp_dict, name_buf, buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set missed_snap_entry (%s) "
+ "in the rsp_dict.", buf);
+ GF_FREE (buf);
goto out;
}
- /* mount the snap logical device on the directory inside
- /var/run/gluster/snaps/<snapname>/@snap_brick_mount_path
- */
- ret = mount (device, snap_brick_mount_path, entry->mnt_type, MS_MGC_VAL,
- "nouuid");
+ missed_snap_count++;
+
+ /* Setting the new missed_snap_count in the dict */
+ ret = dict_set_int32 (rsp_dict, "missed_snap_count",
+ missed_snap_count);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "mounting the snapshot "
- "logical device %s failed (error: %s)", device,
- strerror (errno));
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to set missed_snap_count for %s "
+ "in the rsp_dict.", missed_snap_entry);
goto out;
}
- ret = glusterd_brickinfo_new (&snap_brickinfo);
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int32_t
+glusterd_add_bricks_to_snap_volume (dict_t *dict, dict_t *rsp_dict,
+ glusterd_volinfo_t *snap_vol,
+ glusterd_brickinfo_t *original_brickinfo,
+ glusterd_brickinfo_t *snap_brickinfo,
+ char **snap_brick_dir, int64_t volcount,
+ int32_t brick_count)
+{
+ char key[PATH_MAX] = "";
+ char snap_brick_path[PATH_MAX] = "";
+ char *snap_device = NULL;
+ gf_boolean_t add_missed_snap = _gf_false;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (original_brickinfo);
+ GF_ASSERT (snap_brickinfo);
+ GF_ASSERT (snap_brick_dir);
+
+ snprintf (key, sizeof(key) - 1, "vol%ld.brickdir%d", volcount,
+ brick_count);
+ ret = dict_get_ptr (dict, key, (void **)snap_brick_dir);
+ if (ret) {
+ /* Using original brickinfo here because it will be a
+ * pending snapshot and storing the original brickinfo
+ * will help in mapping while recreating the missed snapshot
+ */
+ gf_log (this->name, GF_LOG_WARNING, "Unable to fetch "
+ "snap mount path (%s). Using original brickinfo", key);
+ snap_brickinfo->snap_status = -1;
+ strcpy (snap_brick_path, original_brickinfo->path);
+
+ /* In origiator node add snaps missed
+ * from different nodes to the dict
+ */
+ if (is_origin_glusterd (dict) == _gf_true)
+ add_missed_snap = _gf_true;
+ } else {
+ /* Create brick-path in the format /var/run/gluster/snaps/ *
+ * <snap-uuid>/<original-brick#>/snap-brick-dir *
+ */
+ snprintf (snap_brick_path, sizeof(snap_brick_path),
+ "%s/%s/brick%d%s", snap_mount_folder,
+ snap_vol->volname, brick_count+1,
+ *snap_brick_dir);
+ }
+
+ if ((snap_brickinfo->snap_status != -1) &&
+ (!uuid_compare (original_brickinfo->uuid, MY_UUID)) &&
+ (!glusterd_is_brick_started (original_brickinfo))) {
+ /* In case if the brick goes down after prevalidate. */
+ gf_log (this->name, GF_LOG_WARNING, "brick %s:%s is not"
+ " started (snap: %s)",
+ original_brickinfo->hostname,
+ original_brickinfo->path,
+ snap_vol->snapshot->snapname);
+
+ snap_brickinfo->snap_status = -1;
+ strcpy (snap_brick_path, original_brickinfo->path);
+ add_missed_snap = _gf_true;
+ }
+
+ if (add_missed_snap) {
+ ret = glusterd_add_missed_snaps_to_dict (rsp_dict,
+ snap_vol->volname,
+ original_brickinfo,
+ brick_count + 1,
+ GF_SNAP_OPTION_TYPE_CREATE);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to add missed"
+ " snapshot info for %s:%s in the rsp_dict",
+ original_brickinfo->hostname,
+ original_brickinfo->path);
+ goto out;
+ }
+ }
+
+ snprintf (key, sizeof(key), "vol%ld.brick_snapdevice%d",
+ volcount, brick_count);
+ ret = dict_get_ptr (dict, key, (void **)&snap_device);
+ if (ret) {
+ /* If the device name is empty, so will be the brick path
+ * Hence the missed snap has already been added above
+ */
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch "
+ "snap device (%s). Leaving empty", key);
+ } else
+ strcpy (snap_brickinfo->device_path, snap_device);
+
+ ret = gf_canonicalize_path (snap_brick_path);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "initializing the brick for the snap "
- "volume failed (snapname: %s)", snap_volinfo->volname);
+ "Failed to canonicalize path");
goto out;
}
strcpy (snap_brickinfo->hostname, original_brickinfo->hostname);
- strcpy (snap_brickinfo->path, snap_brick_mount_path);
- LOCK (&snap_volinfo->lock);
- {
- list_add_tail (&snap_brickinfo->brick_list,
- &snap_volinfo->bricks);
- }
- UNLOCK (&snap_volinfo->lock);
+ strcpy (snap_brickinfo->path, snap_brick_path);
+ uuid_copy (snap_brickinfo->uuid, original_brickinfo->uuid);
+ list_add_tail (&snap_brickinfo->brick_list, &snap_vol->bricks);
+
out:
- GF_FREE (tmp);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+static int32_t
+glusterd_take_brick_snapshot (glusterd_volinfo_t *origin_vol,
+ glusterd_volinfo_t *snap_vol, dict_t *rsp_dict,
+ glusterd_brickinfo_t *original_brickinfo,
+ glusterd_brickinfo_t *snap_brickinfo,
+ char *snap_brick_dir, int32_t brick_count)
+{
+ char *device = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (origin_vol);
+ GF_ASSERT (snap_vol);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (original_brickinfo);
+ GF_ASSERT (snap_brickinfo);
+ GF_ASSERT (snap_brick_dir);
+
+ device = glusterd_take_lvm_snapshot (snap_vol, original_brickinfo);
+ /* Fail the snapshot even though snapshot on one of
+ the bricks fails. At the end when we check whether
+ the snapshot volume meets quorum or not, then the
+ the snapshot can either be treated as success, or
+ in case of failure we can undo the changes and return
+ failure to cli. */
+ if (!device) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to take snapshot of %s:%s",
+ original_brickinfo->hostname,
+ original_brickinfo->path);
+ goto out;
+ }
+
+ /* create the complete brick here */
+ ret = glusterd_snap_brick_create (device, snap_vol,
+ original_brickinfo,
+ brick_count, snap_brick_dir);
if (ret) {
- umount (snap_brick_mount_path);
- if (snap_brickinfo)
- glusterd_brickinfo_delete (snap_brickinfo);
+ gf_log (this->name, GF_LOG_ERROR, "not able to"
+ " create the brickinfo for the snap %s"
+ ", volume %s", snap_vol->snapshot->snapname,
+ origin_vol->volname);
+ goto out;
}
- if (mtab)
- endmntent (mtab);
+
+out:
+ if (device)
+ GF_FREE (device);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
-/* TODO: lvm uses '-' as the delimter for differentiating the logical volume
- name and the volume group name. So as of now, if the snapname given from
- cli contains '-', it confuses lvm. Handle it.
-*/
-int32_t
-glusterd_do_snap (glusterd_volinfo_t *volinfo, char *name, dict_t *dict,
- gf_boolean_t cg, uuid_t cg_id)
+/* Look for disconnected peers, for missed snap creates or deletes */
+static int32_t
+glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
+ char *snap_uuid, struct list_head *peers,
+ int32_t op)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
- char *device = NULL;
- char snapname[PATH_MAX] = {0, };
- char tmp[2046] = {0, };
- glusterd_volinfo_t *snap_volume = NULL;
- char *description = NULL;
+ int32_t brick_count = -1;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_peerinfo_t *peerinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (peers);
+ GF_ASSERT (vol);
+ GF_ASSERT (snap_uuid);
+
+ brick_count = 0;
+ list_for_each_entry (brickinfo, &vol->bricks, brick_list) {
+ if (!uuid_compare (brickinfo->uuid, MY_UUID)) {
+ /* If the brick belongs to the same node */
+ brick_count++;
+ continue;
+ }
+ list_for_each_entry (peerinfo, peers, uuid_list) {
+ if (uuid_compare (peerinfo->uuid, brickinfo->uuid)) {
+ /* If the brick doesnt belong to this peer */
+ continue;
+ }
+
+ /* Found peer who owns the brick, *
+ * if peer is not connected or not *
+ * friend add it to missed snap list */
+ if (!(peerinfo->connected) ||
+ (peerinfo->state.state !=
+ GD_FRIEND_STATE_BEFRIENDED)) {
+ ret = glusterd_add_missed_snaps_to_dict
+ (rsp_dict,
+ snap_uuid,
+ brickinfo,
+ brick_count + 1,
+ op);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add missed snapshot "
+ "info for %s:%s in the "
+ "rsp_dict", brickinfo->hostname,
+ brickinfo->path);
+ goto out;
+ }
+ }
+ }
+ brick_count++;
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+glusterd_volinfo_t *
+glusterd_do_snap_vol (glusterd_volinfo_t *origin_vol, glusterd_snap_t *snap,
+ dict_t *dict, dict_t *rsp_dict, int64_t volcount)
+{
+ char key[PATH_MAX] = "";
+ char *snap_brick_dir = NULL;
+ char *username = NULL;
+ char *password = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ uuid_t *snap_volid = NULL;
+ int32_t ret = -1;
+ int32_t brick_count = 0;
+ glusterd_brickinfo_t *snap_brickinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
priv = this->private;
GF_ASSERT (priv);
+ GF_ASSERT (origin_vol);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
- if (cg) {
- snprintf (tmp, sizeof (tmp), "%s_snap", volinfo->volname);
- snprintf (snapname, sizeof (snapname), "%s_%s", name, tmp);
- } else {
- snprintf (snapname, sizeof (snapname), "%s", name);
+ /* fetch username, password and vol_id from dict*/
+ snprintf (key, sizeof(key), "volume%ld_username", volcount);
+ ret = dict_get_str (dict, key, &username);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get %s for "
+ "snap %s", key, snap->snapname);
+ goto out;
}
- ret = glusterd_volinfo_dup (volinfo, &snap_volume);
- strcpy (snap_volume->volname, snapname);
- snap_volume->is_snap_volume = _gf_true;
+ snprintf (key, sizeof(key), "volume%ld_password", volcount);
+ ret = dict_get_str (dict, key, &password);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get %s for "
+ "snap %s", key, snap->snapname);
+ goto out;
+ }
+ snprintf (key, sizeof(key) - 1, "vol%ld_volid", volcount);
+ ret = dict_get_bin (dict, key, (void **)&snap_volid);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_volid");
+ goto out;
+ }
- list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- if (uuid_compare (brickinfo->uuid, MY_UUID)) {
- continue;
+ /* We are not setting the username and password here as
+ * we need to set the user name and password passed in
+ * the dictionary
+ */
+ ret = glusterd_volinfo_dup (origin_vol, &snap_vol, _gf_false);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to duplicate volinfo "
+ "for the snapshot %s", snap->snapname);
+ goto out;
+ }
+
+ /* uuid is used as lvm snapshot name.
+ This will avoid restrictions on snapshot names provided by user */
+ GLUSTERD_GET_UUID_NOHYPHEN (snap_vol->volname, *snap_volid);
+ uuid_copy (snap_vol->volume_id, *snap_volid);
+ snap_vol->is_snap_volume = _gf_true;
+ strcpy (snap_vol->parent_volname, origin_vol->volname);
+ snap_vol->snapshot = snap;
+
+ glusterd_auth_set_username (snap_vol, username);
+ glusterd_auth_set_password (snap_vol, password);
+
+ /* Adding snap brickinfos to the snap volinfo */
+ brick_count = 0;
+ list_for_each_entry (brickinfo, &origin_vol->bricks, brick_list) {
+ snap_brickinfo = NULL;
+
+ ret = glusterd_brickinfo_new (&snap_brickinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "initializing the brick for the snap "
+ "volume failed (snapname: %s)", snap->snapname);
+ goto out;
}
- if (!glusterd_is_brick_started (brickinfo)) {
- gf_log (this->name, GF_LOG_WARNING, "brick %s:%s is not"
- " started (volume: %s snap: %s)",
- brickinfo->hostname, brickinfo->path,
- volinfo->volname, snapname);
- continue;
+ ret = glusterd_add_bricks_to_snap_volume (dict, rsp_dict,
+ snap_vol,
+ brickinfo,
+ snap_brickinfo,
+ &snap_brick_dir,
+ volcount,
+ brick_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add the snap brick for "
+ "%s:%s to the snap volume",
+ brickinfo->hostname, brickinfo->path);
+ GF_FREE (snap_brickinfo);
+ goto out;
}
- ret = glusterd_take_snapshot (brickinfo,
- volinfo->volname,
- snapname, dict, &device);
- /* continue with the snapshot even though snapshot
- on one of the bricks fails. At the end check
- whether the snapshot volume meets quorum or not.
- If so, then the snapshot can be treated as success.
- If not, undo the changes and return failure to cli.
- */
- if (ret)
+ /* Take snapshot of the brick */
+ if ((uuid_compare (brickinfo->uuid, MY_UUID)) ||
+ (snap_brickinfo->snap_status == -1)) {
+ brick_count++;
continue;
+ }
- /*create the complete brick here and add it to the
- volinfo
- */
- ret = glusterd_snap_brick_create (device, snap_volume,
- brickinfo);
+ ret = glusterd_take_brick_snapshot (origin_vol, snap_vol,
+ rsp_dict, brickinfo,
+ snap_brickinfo,
+ snap_brick_dir,
+ brick_count);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "not able to"
- " create the brickinfo for the snap %s"
- ", volume %s", snapname,
- volinfo->volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to take snapshot for %s:%s",
+ brickinfo->hostname, brickinfo->path);
goto out;
}
- }
-
- //TODO: the quorum check of the snap volume here
- ret = dict_get_str (dict, "snap-description", &description);
- // for now continue the snap, if getting description fails.
-
- ret = glusterd_snap_create (volinfo, snap_volume, description, cg_id);
- if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "creating the"
- "snap object failed for the volume %s",
- volinfo->volname);
- goto out;
+ brick_count++;
}
- ret = glusterd_store_perform_snap_store (volinfo);
+ /*TODO: the quorum check of the snap volume here */
+
+ ret = glusterd_store_volinfo (snap_vol,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "could not do volume store"
- " after taking the snapshot (volume: %s)",
- volinfo->volname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to store snapshot "
+ "volinfo (%s) for snap %s", snap_vol->volname,
+ snap->snapname);
goto out;
}
- ret = generate_snap_brick_volfiles (volinfo, snap_volume);
+ ret = generate_brick_volfiles (snap_vol);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "generating the brick "
"volfiles for the snap %s (volume: %s) failed",
- snapname, volinfo->volname);
+ snap->snapname, origin_vol->volname);
goto out;
}
- ret = generate_snap_client_volfiles (volinfo, snap_volume,
- GF_CLIENT_TRUSTED);
+ ret = generate_client_volfiles (snap_vol, GF_CLIENT_TRUSTED);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "generating the trusted "
"client volfiles for the snap %s (volume: %s) failed",
- snapname, volinfo->volname);
+ snap->snapname, origin_vol->volname);
goto out;
}
-
- ret = generate_snap_client_volfiles (volinfo, snap_volume,
- GF_CLIENT_OTHER);
+ ret = generate_client_volfiles (snap_vol, GF_CLIENT_OTHER);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "generating the client "
"volfiles for the snap %s (volume: %s) failed",
- snapname, volinfo->volname);
+ snap->snapname, origin_vol->volname);
goto out;
}
- //check whether this is needed or not
- list_add_tail (&snap_volume->vol_list, &priv->volumes);
+ ret = glusterd_list_add_snapvol (origin_vol, snap_vol);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "could not add the snap "
+ "volume %s to the list", snap_vol->volname);
+ goto out;
+ }
- list_for_each_entry (brickinfo, &snap_volume->bricks, brick_list) {
- ret = glusterd_snap_brick_start (volinfo, snap_volume, brickinfo,
- _gf_true);
+ list_for_each_entry (brickinfo, &snap_vol->bricks, brick_list) {
+ if (uuid_compare (brickinfo->uuid, MY_UUID))
+ continue;
+
+ if (brickinfo->snap_status == -1) {
+ gf_log (this->name, GF_LOG_INFO,
+ "not starting snap brick %s:%s for "
+ "for the snap %s (volume: %s)",
+ brickinfo->hostname, brickinfo->path,
+ snap->snapname, origin_vol->volname);
+ continue;
+ }
+
+ ret = glusterd_brick_start (snap_vol, brickinfo, _gf_true);
if (ret) {
gf_log (this->name, GF_LOG_WARNING, "starting the "
"brick %s:%s for the snap %s (volume: %s) "
- "failed", brickinfo->hostname,
- brickinfo->path, snapname, volinfo->volname);
+ "failed", brickinfo->hostname, brickinfo->path,
+ snap->snapname, origin_vol->volname);
goto out;
}
}
- snap_volume->status = GLUSTERD_STATUS_STARTED;
+ snap_vol->status = GLUSTERD_STATUS_STARTED;
+ ret = glusterd_store_volinfo (snap_vol,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to store snap volinfo");
+ goto out;
+ }
out:
- if (ret)
- glusterd_volinfo_delete (snap_volume);
- return ret;
+ if (ret) {
+ if (snap_vol)
+ glusterd_snap_volume_remove (rsp_dict, snap_vol,
+ _gf_true, _gf_true);
+ snap_vol = NULL;
+ }
+
+ return snap_vol;
}
-int32_t
-glusterd_do_snap_remove (glusterd_volinfo_t *volinfo, char *name, dict_t *dict)
+/* This is a snapshot remove handler function. This function will be
+ * executed in the originator node. This function is responsible for
+ * calling mgmt v3 framework to do the actual remove on all the bricks
+ *
+ * @param req RPC request object
+ * @param op gluster operation
+ * @param dict dictionary containing snapshot remove request
+ * @param err_str In case of an err this string should be populated
+ * @param len length of err_str buffer
+ *
+ * @return Negative value on Failure and 0 in success
+ */
+int
+glusterd_handle_snapshot_remove (rpcsvc_request_t *req, glusterd_op_t op,
+ dict_t *dict, char *err_str, size_t len)
{
- int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *priv = NULL;
- glusterd_snap_t *snap = NULL;
- glusterd_snap_cg_t *cg = NULL;
- int i = 0;
+ int ret = -1;
+ int64_t volcount = 0;
+ char *snapname = NULL;
+ char *volname = NULL;
+ char key[PATH_MAX] = "";
+ glusterd_snap_t *snap = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_volinfo_t *tmp = NULL;
+ xlator_t *this = NULL;
this = THIS;
- priv = this->private;
- if (!volinfo) {
- gf_log (this->name, GF_LOG_ERROR, "volinfo NULL");
+ GF_ASSERT (req);
+ GF_ASSERT (dict);
+ GF_ASSERT (err_str);
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get snapname");
goto out;
}
- if (!name) {
- gf_log (this->name, GF_LOG_ERROR, "name is NULL (volume: %s)",
- volinfo->volname);
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ snprintf (err_str, len, "Snap (%s) does not exist", snapname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "%s", err_str);
+ ret = -1;
goto out;
}
- snap = glusterd_find_snap_by_name (volinfo, name);
- if (!snap) {
- cg = glusterd_find_snap_cg_by_name (priv, volinfo->volname);
- if (!cg) {
- gf_log (this->name, GF_LOG_ERROR, "could not find "
- "the snap or the cg object by the name %s",
- name);
+ /* Set volnames in the dict to get mgmt_v3 lock */
+ list_for_each_entry_safe (snap_vol, tmp, &snap->volumes, vol_list) {
+ volcount++;
+ volname = gf_strdup (snap_vol->parent_volname);
+ if (!volname) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "strdup failed");
goto out;
}
- }
- if (snap) {
- ret = glusterd_brick_snapshot_remove (snap->snap_volume,
- volinfo, name);
+ snprintf (key, sizeof (key), "volname%ld", volcount);
+ ret = dict_set_dynstr (dict, key, volname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "removing the bricks"
- " snapshots for the snap %s (volume: %s) "
- "failed", name, volinfo->volname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set "
+ "volume name in dictionary");
+ GF_FREE (volname);
goto out;
}
+ volname = NULL;
+ }
+ ret = dict_set_int64 (dict, "volcount", volcount);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set volcount");
+ goto out;
+ }
+
+ ret = glusterd_mgmt_v3_initiate_snap_phases (req, op, dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to initiate snap "
+ "phases");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_remove_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ char *snapname = NULL;
+ xlator_t *this = NULL;
+ glusterd_snap_t *snap = NULL;
+
+ this = THIS;
+
+ if (!dict || !op_errstr) {
+ gf_log (this->name, GF_LOG_ERROR, "input parameters NULL");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Getting the snap name "
+ "failed");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "Snap %s does not exist",
+ snapname);
+ ret = -1;
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_status_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int ret = -1;
+ char *snapname = NULL;
+ glusterd_conf_t *conf = NULL;
+ xlator_t *this = NULL;
+ int32_t cmd = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ char *volname = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ GF_ASSERT (op_errstr);
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR, "Input dict is NULL");
+ goto out;
+ }
+
+ ret = dict_get_int32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not fetch status cmd");
+ goto out;
}
- if (cg) {
- for (i = 0; i < cg->volume_count ; i++) {
- ret = glusterd_brick_snapshot_remove (&cg->volumes[i],
- volinfo, name);
+ switch (cmd) {
+ case GF_SNAP_STATUS_TYPE_ALL:
+ {
+ break;
+ }
+ case GF_SNAP_STATUS_TYPE_SNAP:
+ {
+ ret = dict_get_str (dict, "snapname", &snapname);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "removing the"
- " bricks snapshots for the snap %s "
- "(volume: %s) failed", name,
- volinfo->volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not fetch snapname");
goto out;
}
+
+ if (!glusterd_find_snap_by_name (snapname)) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) "
+ "not found", snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Snap (%s) "
+ "not found", snapname);
+ goto out;
+ }
+ break;
}
- }
+ case GF_SNAP_STATUS_TYPE_VOL:
+ {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not fetch volname");
+ goto out;
+ }
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ ret = gf_asprintf (op_errstr, "Volume (%s)"
+ "not found", volname);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Volume "
+ "%s not present", volname);
+ goto out;
+ }
+ break;
+ }
+ default:
+ {
+ gf_log (this->name, GF_LOG_ERROR, "Invalid command");
+ break;
+ }
+ }
ret = 0;
+
out:
return ret;
}
-
-/* This function helps in generating the names for either the snapshot
- (if only one volume name is given in the snap create command) or
- the consistency group (if multiple volume names are given in the snaap
- create command). Apart from that, it also helps in generating the names
- for the snaps of the individual volumes in a consistency group.
-*/
-char *
-generate_snapname (char *volname, char *name, gf_boolean_t volume_from_cg)
+int32_t
+glusterd_snapshot_remove_commit (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
{
- char internal_snapname[PATH_MAX] = {0, };
- char timestr[256] = {0, };
- int ret = -1;
- char *snapname = NULL;
- struct timeval tv = {0, };
- xlator_t *this = NULL;
- int i = 0;
+ int32_t ret = -1;
+ char *snapname = NULL;
+ char *dup_snapname = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
+ glusterd_volinfo_t *snap_volinfo = NULL;
+ xlator_t *this = NULL;
this = THIS;
GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (op_errstr);
- if (name) {
- GF_ASSERT (volname);
- if (volume_from_cg) {
- snprintf (internal_snapname, sizeof (internal_snapname),
- "%s_%s", volname, name);
- } else {
- snprintf (internal_snapname, sizeof (internal_snapname),
- "%s", name);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (!dict || !op_errstr) {
+ gf_log (this->name, GF_LOG_ERROR, "input parameters NULL");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Getting the snap name "
+ "failed");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "Snap %s does not exist",
+ snapname);
+ ret = -1;
+ goto out;
+ }
+
+ if (is_origin_glusterd (dict) == _gf_true) {
+ /* TODO : As of now there is only volume in snapshot.
+ * Change this when multiple volume snapshot is introduced
+ */
+ snap_volinfo = list_entry (snap->volumes.next,
+ glusterd_volinfo_t,
+ vol_list);
+ if (!snap_volinfo) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch snap_volinfo");
+ ret = -1;
+ goto out;
}
- } else {
- ret = gettimeofday (&tv, NULL);
+
+ /* From origin glusterd check if *
+ * any peers with snap bricks is down */
+ ret = glusterd_find_missed_snap (rsp_dict, snap_volinfo,
+ snap_volinfo->volname,
+ &priv->peers,
+ GF_SNAP_OPTION_TYPE_DELETE);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "getting time failed. snapname is not given via"
- "cli. ");
+ "Failed to find missed snap deletes");
goto out;
}
- gf_time_fmt (timestr, sizeof (timestr),
- tv.tv_sec, gf_timefmt_FT);
- snprintf (timestr + strlen (timestr),
- sizeof timestr - strlen (timestr),
- ".%"GF_PRI_SUSECONDS,
- tv.tv_usec);
+ }
- for (i = 0; i < strlen (timestr); i++) {
- if (timestr[i] == ' ' || timestr[i] == ':' ||
- timestr[i] == '.' || timestr[i] == '-')
- timestr[i] = '_';
- }
+ ret = glusterd_snap_remove (rsp_dict, snap, _gf_true, _gf_false);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to remove snap %s",
+ snapname);
+ goto out;
+ }
- snprintf (internal_snapname,
- sizeof (internal_snapname), "%s%s",
- (volume_from_cg)?"cg_":"",
- timestr);
+ dup_snapname = gf_strdup (snapname);
+ if (!dup_snapname) {
+ gf_log (this->name, GF_LOG_ERROR, "Strdup failed");
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, "snapname", dup_snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to set the snapname");
+ GF_FREE (dup_snapname);
+ goto out;
}
- snapname = gf_strdup (internal_snapname);
+ ret = 0;
out:
- return snapname;
+ return ret;
}
-/* name can be either the snapname if @volnames contains only one volume or
- cg name if there are multiple volume names in volnames string
-*/
+int32_t
+glusterd_do_snap_cleanup (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ char *name = NULL;
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_snap_t *snap = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ conf = this->private;
+ GF_ASSERT (conf);
+
+ if (!dict || !op_errstr) {
+ gf_log (this->name, GF_LOG_ERROR, "input parameters NULL");
+ goto out;
+ }
+
+ ret = dict_get_str (dict, "snapname", &name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "getting the snap "
+ "name failed (volume: %s)", volinfo->volname);
+ goto out;
+ }
+
+ /*
+ If the snapname is not found that means the failure happened at
+ staging, or in commit, before the snap object is created, in which
+ case there is nothing to cleanup. So set ret to 0.
+ */
+ snap = glusterd_find_snap_by_name (name);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_INFO, "snap %s is not found", name);
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_snap_remove (rsp_dict, snap, _gf_true, _gf_true);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "removing the snap %s failed",
+ name);
+ goto out;
+ }
+
+ name = NULL;
+
+ ret = 0;
+
+out:
+
+ return ret;
+}
+
+/* In case of a successful, delete or create operation, during post_validate *
+ * look for missed snap operations and update the missed snap lists */
+int32_t
+glusterd_snapshot_update_snaps_post_validate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int32_t ret = -1;
+ int32_t missed_snap_count = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (op_errstr);
+
+ ret = dict_get_int32 (dict, "missed_snap_count",
+ &missed_snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "No missed snaps");
+ ret = 0;
+ goto out;
+ }
+
+ ret = glusterd_store_update_missed_snaps (dict, missed_snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to update missed_snaps_list");
+ goto out;
+ }
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
int32_t
glusterd_snapshot_create_commit (dict_t *dict, char **op_errstr,
dict_t *rsp_dict)
{
int ret = -1;
- int i = 0;
- int64_t volume_count = 0;
- gf_boolean_t is_cg = _gf_false;
- char *name = NULL;
+ int64_t i = 0;
+ int64_t volcount = 0;
+ char *snapname = NULL;
char *volname = NULL;
- char *tmp = NULL;
- char volname_buf[PATH_MAX] = {0, };
+ char *tmp_name = NULL;
+ char key[PATH_MAX] = "";
xlator_t *this = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_snap_cg_t *cg = NULL;
- glusterd_conf_t *priv = NULL;
- uuid_t cg_id;
glusterd_snap_t *snap = NULL;
- char err_str[PATH_MAX] = {0, };
+ glusterd_volinfo_t *origin_vol = NULL;
+ glusterd_volinfo_t *snap_vol = NULL;
+ glusterd_conf_t *priv = NULL;
this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+ GF_ASSERT(op_errstr);
+ GF_ASSERT(rsp_dict);
+ priv = this->private;
+ GF_ASSERT(priv);
- ret = dict_get_int64 (dict, "volcount", &volume_count);
+ ret = dict_get_int64 (dict, "volcount", &volcount);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "failed to "
"get the volume count");
goto out;
}
- //snap-name should not be set if volume_count > 1
- ret = dict_get_str (dict, "snap-name", &name);
- if (volume_count > 1 && !ret)
- GF_ASSERT (0);
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to fetch snapname");
+ goto out;
+ }
+ tmp_name = gf_strdup (snapname);
+ if (!tmp_name) {
+ gf_log (this->name, GF_LOG_ERROR, "Out of memory");
+ ret = -1;
+ goto out;
+ }
- if (volume_count > 1) {
- is_cg = _gf_true;
- ret = dict_get_str (dict, "cg-name", &name);
- uuid_generate (cg_id);
- } else if (volume_count == 1) {
- ret = dict_get_str (dict, "snap-name", &name);
+ ret = dict_set_dynstr (rsp_dict, "snapname", tmp_name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to set snapname in rsp_dict");
+ GF_FREE (tmp_name);
+ goto out;
}
+ tmp_name = NULL;
- if (!name) {
- name = generate_snapname (volname, NULL, is_cg);
- if (!name) {
- gf_log (this->name, GF_LOG_ERROR,
- "strdup of internal snapname"
- " ((%s) failed for the "
- "volume %s", name,
- volname);
- goto out;
- }
+ snap = glusterd_create_snap_object (dict, rsp_dict);
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "creating the"
+ "snap object %s failed", snapname);
+ ret = -1;
+ goto out;
}
- for (i = 1; i < volume_count + 1; i++) {
- snprintf (volname_buf, sizeof (volname_buf),
- "volname%d", i);
- ret = dict_get_str (dict, volname_buf,
- &volname);
+ for (i = 1; i <= volcount; i++) {
+ snprintf (key, sizeof (key), "volname%ld", i);
+ ret = dict_get_str (dict, key, &volname);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"failed to get volume name");
goto out;
}
- ret = glusterd_volinfo_find (volname, &volinfo);
+ ret = glusterd_volinfo_find (volname, &origin_vol);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"failed to get the volinfo for "
@@ -2949,61 +3783,271 @@ glusterd_snapshot_create_commit (dict_t *dict, char **op_errstr,
goto out;
}
- tmp = generate_snapname (volname, name, is_cg);
- if (!tmp) {
- gf_log (this->name,
- GF_LOG_ERROR, "strdup "
- "failed (%s)", name);
+ /* TODO: Create a stub where the bricks are
+ added parallely by worker threads so that
+ the snap creating happens parallely. */
+ snap_vol = glusterd_do_snap_vol (origin_vol, snap, dict,
+ rsp_dict, i);
+ if (!snap_vol) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_WARNING, "taking the "
+ "snapshot of the volume %s failed", volname);
goto out;
}
+ }
- list_for_each_entry (snap, &volinfo->snaps, snap_list) {
- if (!strcmp (snap->snap_name, tmp)) {
- snprintf (err_str, sizeof (err_str), "snap "
- "with name %s already exists", tmp);
- gf_log (this->name, GF_LOG_ERROR, "%s",
- err_str);
- ret = -1;
- *op_errstr = gf_strdup (err_str);
+ snap->snap_status = GD_SNAP_STATUS_IN_USE;
+ ret = glusterd_store_snap (snap);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Could not store snap"
+ "object %s", snap->snapname);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (ret) {
+ if (snap)
+ glusterd_snap_remove (rsp_dict, snap,
+ _gf_true, _gf_true);
+ snap = NULL;
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+snap_max_hard_limit_set_commit (dict_t *dict, uint64_t value,
+ char *volname, char **op_errstr)
+{
+ char err_str[PATH_MAX] = "";
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (volname);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+
+ /* TODO: Initiate auto deletion when there is a limit change */
+ if (!volname) {
+ /* For system limit */
+ conf->snap_max_hard_limit = value;
+
+ ret = glusterd_store_global_info (this);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to store "
+ "snap-max-hard-limit for system");
+ goto out;
+ }
+ } else {
+ /* For one volume */
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to get the"
+ " volinfo for volume %s", volname);
+ goto out;
+ }
+
+ volinfo->snap_max_hard_limit = value;
+
+ ret = glusterd_store_volinfo (volinfo,
+ GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to store "
+ "snap-max-hard-limit for volume %s", volname);
+ goto out;
+ }
+ }
+
+ ret = 0;
+out:
+ if (ret) {
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ }
+ return ret;
+}
+
+int
+snap_max_limits_display_commit (dict_t *rsp_dict, char *volname,
+ char **op_errstr)
+{
+ char err_str[PATH_MAX] = "";
+ char buf[PATH_MAX] = "";
+ glusterd_conf_t *conf = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ int ret = -1;
+ uint64_t active_hard_limit = 0;
+ uint64_t snap_max_limit = 0;
+ uint64_t soft_limit_value = -1;
+ uint64_t count = 0;
+ xlator_t *this = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (volname);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+
+ if (!volname) {
+ /* For system limit */
+ list_for_each_entry (volinfo, &conf->volumes, vol_list) {
+ if (volinfo->is_snap_volume == _gf_true)
+ continue;
+ snap_max_limit = volinfo->snap_max_hard_limit;
+ if (snap_max_limit > conf->snap_max_hard_limit)
+ active_hard_limit = conf->snap_max_hard_limit;
+ else
+ active_hard_limit = snap_max_limit;
+ soft_limit_value = (active_hard_limit *
+ conf->snap_max_soft_limit) / 100;
+
+ snprintf (buf, sizeof(buf), "volume%ld-volname", count);
+ ret = dict_set_str (rsp_dict, buf, volinfo->volname);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
goto out;
}
+
+ snprintf (buf, sizeof(buf),
+ "volume%ld-snap-max-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, snap_max_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%ld-active-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf,
+ active_hard_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ snprintf (buf, sizeof(buf),
+ "volume%ld-snap-max-soft-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, soft_limit_value);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+ count++;
}
- /* TODO: Create a stub where the bricks are
- added parallely by worker threads so that
- the snap creating happens parallely.
- */
- ret = glusterd_do_snap (volinfo, tmp, dict, is_cg, cg_id);
+ ret = dict_set_uint64 (rsp_dict, "voldisplaycount", count);
if (ret) {
- gf_log (this->name, GF_LOG_WARNING, "taking the "
- "snapshot of the volume %s failed", volname);
+ snprintf (err_str, PATH_MAX,
+ "Failed to set voldisplaycount");
+ goto out;
+ }
+ } else {
+ /* For one volume */
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to get the"
+ " volinfo for volume %s", volname);
+ goto out;
+ }
+
+ snap_max_limit = volinfo->snap_max_hard_limit;
+ if (snap_max_limit > conf->snap_max_hard_limit)
+ active_hard_limit = conf->snap_max_hard_limit;
+ else
+ active_hard_limit = snap_max_limit;
+
+ soft_limit_value = (active_hard_limit *
+ conf->snap_max_soft_limit) / 100;
+
+ snprintf (buf, sizeof(buf), "volume%ld-volname", count);
+ ret = dict_set_str (rsp_dict, buf, volinfo->volname);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
goto out;
}
- }
- if (volume_count > 1) {
- cg = glusterd_new_snap_cg_object (volume_count);
- if (!cg) {
- gf_log (this->name, GF_LOG_ERROR, "cannot create the "
- "consistency group %s", name);
+ snprintf (buf, sizeof(buf),
+ "volume%ld-snap-max-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, snap_max_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
goto out;
}
- uuid_copy (cg->cg_id, cg_id);
- strncpy (cg->cg_name, name, sizeof (cg->cg_name));
- ret = glusterd_add_snap_cg (priv, cg);
+ snprintf (buf, sizeof(buf),
+ "volume%ld-active-hard-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, active_hard_limit);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "could not add the"
- " consistency group %s to the glusterd list ",
- name);
- cg = glusterd_remove_snap_cg_by_name (priv, name);
- if (!cg)
- gf_log (this->name, GF_LOG_WARNING, "cannot "
- "find the consistency group %s", name);
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
goto out;
}
+
+ snprintf (buf, sizeof(buf),
+ "volume%ld-snap-max-soft-limit", count);
+ ret = dict_set_uint64 (rsp_dict, buf, soft_limit_value);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set %s", buf);
+ goto out;
+ }
+
+ count++;
+
+ ret = dict_set_uint64 (rsp_dict, "voldisplaycount", count);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set voldisplaycount");
+ goto out;
+ }
+
+ }
+
+ ret = dict_set_uint64 (rsp_dict, "snap-max-hard-limit",
+ conf->snap_max_hard_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set sys-snap-max-hard-limit ");
+ goto out;
+ }
+
+ ret = dict_set_uint64 (rsp_dict, "snap-max-soft-limit",
+ conf->snap_max_soft_limit);
+ if (ret) {
+ snprintf (err_str, PATH_MAX,
+ "Failed to set sys-snap-max-hard-limit ");
+ goto out;
}
+
+ ret = 0;
out:
+ if (ret) {
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ }
return ret;
}
@@ -3012,19 +4056,19 @@ glusterd_snapshot_config_commit (dict_t *dict, char **op_errstr,
dict_t *rsp_dict)
{
char *volname = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- uint64_t limit = 0;
xlator_t *this = NULL;
int ret = -1;
char err_str[PATH_MAX] = {0,};
glusterd_conf_t *conf = NULL;
int config_command = 0;
+ uint64_t hard_limit = 0;
+ uint64_t soft_limit = 0;
this = THIS;
GF_ASSERT (this);
GF_ASSERT (dict);
- GF_ASSERT (rsp_dict);
+ GF_ASSERT (op_errstr);
conf = this->private;
@@ -3037,124 +4081,779 @@ glusterd_snapshot_config_commit (dict_t *dict, char **op_errstr,
goto out;
}
+ /* Ignore the return value of the following dict_get,
+ * as they are optional
+ */
+ ret = dict_get_str (dict, "volname", &volname);
+
+ ret = dict_get_uint64 (dict, "snap-max-hard-limit", &hard_limit);
+
+ ret = dict_get_uint64 (dict, "snap-max-soft-limit", &soft_limit);
+
switch (config_command) {
+ case GF_SNAP_CONFIG_TYPE_SET:
+ if (hard_limit) {
+ /* Commit ops for snap-max-hard-limit */
+ ret = snap_max_hard_limit_set_commit (dict, hard_limit,
+ volname,
+ op_errstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "snap-max-hard-limit set "
+ "commit failed.");
+ goto out;
+ }
+ }
- case GF_SNAP_CONFIG_SYS_MAX:
- ret = dict_get_uint64 (dict, "limit", &limit);
- if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " snapshot limit");
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
- goto out;
+ if (soft_limit) {
+ /* For system limit */
+ conf->snap_max_soft_limit = soft_limit;
+
+ ret = glusterd_store_global_info (this);
+ if (ret) {
+ snprintf (err_str, PATH_MAX, "Failed to store "
+ "snap-max-soft-limit for system");
+ *op_errstr = gf_strdup (err_str);
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ err_str);
+ goto out;
+ }
}
- conf->snap_max_limit = limit;
- ret = glusterd_store_global_info (this);
+ break;
+
+ case GF_SNAP_CONFIG_DISPLAY:
+ /* Reading data from local node only */
+ if (!is_origin_glusterd (dict)) {
+ ret = 0;
+ break;
+ }
+
+ ret = snap_max_limits_display_commit (rsp_dict, volname,
+ op_errstr);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to store the"
- " snapshot limit volinfo for system");
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR,
+ "snap-max-limit "
+ "display commit failed.");
goto out;
}
break;
+ default:
+ break;
+ }
- case GF_SNAP_CONFIG_VOL_MAX:
- // volume wide limit
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volume name");
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int
+glusterd_get_brick_lvm_details (dict_t *rsp_dict,
+ glusterd_brickinfo_t *brickinfo, char *volname,
+ char *device, char *key_prefix)
+{
+
+ int ret = -1;
+ glusterd_conf_t *priv = NULL;
+ runner_t runner = {0,};
+ xlator_t *this = NULL;
+ char msg[PATH_MAX] = "";
+ char buf[PATH_MAX] = "";
+ char *ptr = NULL;
+ char *token = NULL;
+ char key[PATH_MAX] = "";
+ char *value = NULL;
+
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (brickinfo);
+ GF_ASSERT (volname);
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ device = glusterd_get_brick_mount_details (brickinfo);
+ if (!device) {
+ gf_log (this->name, GF_LOG_ERROR, "Getting device name for "
+ "the brick %s:%s failed", brickinfo->hostname,
+ brickinfo->path);
+ goto out;
+ }
+ runinit (&runner);
+ snprintf (msg, sizeof (msg), "running lvs command, "
+ "for getting snap status");
+ /* Using lvs command fetch the Volume Group name,
+ * Percentage of data filled and Logical Volume size
+ *
+ * "-o" argument is used to get the desired information,
+ * example : "lvs /dev/VolGroup/thin_vol -o vgname,lv_size",
+ * will get us Volume Group name and Logical Volume size.
+ *
+ * Here separator used is ":",
+ * for the above given command with separator ":",
+ * The output will be "vgname:lvsize"
+ */
+ runner_add_args (&runner, "lvs", device, "--noheading", "-o",
+ "vg_name,data_percent,lv_size",
+ "--separator", ":", NULL);
+ runner_redir (&runner, STDOUT_FILENO, RUN_PIPE);
+ runner_log (&runner, "", GF_LOG_DEBUG, msg);
+ ret = runner_start (&runner);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not perform lvs action");
+ goto end;
+ }
+ do {
+ ptr = fgets (buf, sizeof (buf),
+ runner_chio (&runner, STDOUT_FILENO));
+
+ if (ptr == NULL)
+ break;
+ token = strtok (buf, ":");
+ if (token != NULL) {
+ while (token && token[0] == ' ')
+ token++;
+ if (!token) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid vg entry");
+ goto end;
+ }
+ value = gf_strdup (token);
+ if (!value) {
+ ret = -1;
+ goto end;
+ }
+ ret = snprintf (key, sizeof (key), "%s.vgname",
+ key_prefix);
+ if (ret < 0) {
+ goto end;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save vgname ");
+ goto end;
+ }
+ }
+
+ token = strtok (NULL, ":");
+ if (token != NULL) {
+ value = gf_strdup (token);
+ if (!value) {
+ ret = -1;
+ goto end;
+ }
+ ret = snprintf (key, sizeof (key), "%s.data",
+ key_prefix);
+ if (ret < 0) {
+ goto end;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save data percent ");
+ goto end;
+ }
+ }
+ token = strtok (NULL, ":");
+ if (token != NULL) {
+ value = gf_strdup (token);
+ if (!value) {
+ ret = -1;
+ goto end;
+ }
+ ret = snprintf (key, sizeof (key), "%s.lvsize",
+ key_prefix);
+ if (ret < 0) {
+ goto end;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save meta data percent ");
+ goto end;
+ }
+ }
+
+ } while (ptr != NULL);
+
+ ret = 0;
+
+end:
+ runner_end (&runner);
+
+out:
+ if (ret && value) {
+ GF_FREE (value);
+ }
+
+ return ret;
+}
+
+int
+glusterd_get_single_brick_status (char **op_errstr, dict_t *rsp_dict,
+ char *keyprefix, int index,
+ glusterd_volinfo_t *snap_volinfo,
+ glusterd_brickinfo_t *brickinfo)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ char key[PATH_MAX] = "";
+ char *device = NULL;
+ char *value = NULL;
+ char brick_path[PATH_MAX] = "";
+ char pidfile[PATH_MAX] = "";
+ pid_t pid = -1;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (keyprefix);
+ GF_ASSERT (snap_volinfo);
+ GF_ASSERT (brickinfo);
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.path", keyprefix,
+ index);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = snprintf (brick_path, sizeof (brick_path),
+ "%s:%s", brickinfo->hostname, brickinfo->path);
+ if (ret < 0) {
+ goto out;
+ }
+
+ value = gf_strdup (brick_path);
+ if (!value) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, key, value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to store "
+ "brick_path %s", brickinfo->path);
+ goto out;
+ }
+
+ if (brickinfo->snap_status == -1) {
+ /* Setting vgname as "Pending Snapshot" */
+ value = gf_strdup ("Pending Snapshot");
+ if (!value) {
+ ret = -1;
goto out;
}
- ret = glusterd_volinfo_find (volname, &volinfo);
+
+ snprintf (key, sizeof (key), "%s.brick%d.vgname",
+ keyprefix, index);
+ ret = dict_set_dynstr (rsp_dict, key, value);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volinfo for volume %s", volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save vgname ");
+ goto out;
+ }
+
+ ret = 0;
+ goto out;
+ }
+ value = NULL;
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.status",
+ keyprefix, index);
+ if (ret < 0) {
+ goto out;
+ }
+
+ if (brickinfo->status == GF_BRICK_STOPPED) {
+ value = gf_strdup ("No");
+ if (!value) {
+ ret = -1;
goto out;
}
- ret = dict_get_uint64 (dict, "limit", &limit);
+ ret = dict_set_str (rsp_dict, key, value);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " snapshot limit volinfo for volume %s",
- volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save brick status");
goto out;
}
- volinfo->snap_max_limit = limit;
- ret = glusterd_store_volinfo (volinfo,
- GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ value = NULL;
+ } else {
+ value = gf_strdup ("Yes");
+ if (!value) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_str (rsp_dict, key, value);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to store the"
- " snapshot limit volinfo for volume %s",
- volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save brick status");
goto out;
}
- break;
+ value = NULL;
- case GF_SNAP_CONFIG_CG_MAX:
- break;
- case GF_SNAP_CONFIG_DISPLAY:
- ret = dict_get_str (dict, "volname", &volname);
+ GLUSTERD_GET_BRICK_PIDFILE (pidfile, snap_volinfo,
+ brickinfo, priv);
+ ret = glusterd_is_service_running (pidfile, &pid);
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d.pid",
+ keyprefix, index);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_set_int32 (rsp_dict, key, pid);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volume name");
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save pid %d", pid);
goto out;
}
- if (!strncmp (volname, "all", 3)) {
- limit = conf->snap_max_limit;
- } else {
- ret = glusterd_volinfo_find (volname, &volinfo);
+ }
+
+ ret = snprintf (key, sizeof (key), "%s.brick%d",
+ keyprefix, index);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = glusterd_get_brick_lvm_details (rsp_dict, brickinfo,
+ snap_volinfo->volname,
+ device, key);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get "
+ "brick LVM details");
+ goto out;
+ }
+out:
+ if (ret && value) {
+ GF_FREE (value);
+ }
+
+ return ret;
+}
+
+int
+glusterd_get_single_snap_status (char **op_errstr, dict_t *rsp_dict,
+ char *keyprefix, glusterd_snap_t *snap)
+{
+ int ret = -1;
+ xlator_t *this = NULL;
+ char key[PATH_MAX] = "";
+ char brickkey[PATH_MAX] = "";
+ glusterd_volinfo_t *snap_volinfo = NULL;
+ glusterd_volinfo_t *tmp_volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ int volcount = 0;
+ int brickcount = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (keyprefix);
+ GF_ASSERT (snap);
+
+ list_for_each_entry_safe (snap_volinfo, tmp_volinfo, &snap->volumes,
+ vol_list) {
+ ret = snprintf (key, sizeof (key), "%s.vol%d", keyprefix,
+ volcount);
+ if (ret < 0) {
+ goto out;
+ }
+ list_for_each_entry (brickinfo, &snap_volinfo->bricks,
+ brick_list) {
+ if (!glusterd_is_local_brick (this, snap_volinfo,
+ brickinfo)) {
+ brickcount++;
+ continue;
+ }
+
+ ret = glusterd_get_single_brick_status (op_errstr,
+ rsp_dict, key, brickcount,
+ snap_volinfo, brickinfo);
+
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " volinfo for volume %s", volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s",
- err_str);
+ gf_log (this->name, GF_LOG_ERROR, "Getting "
+ "single snap status failed");
goto out;
}
- limit = volinfo->snap_max_limit;
+ brickcount++;
+ }
+ ret = snprintf (brickkey, sizeof (brickkey), "%s.brickcount",
+ key);
+ if (ret < 0) {
+ goto out;
}
- ret = dict_set_uint64 (rsp_dict, "limit", limit);
+ ret = dict_set_int32 (rsp_dict, brickkey, brickcount);
if (ret) {
- snprintf (err_str, PATH_MAX,"Failed to get the"
- " set limit for volume %s",
- volname);
- *op_errstr = gf_strdup (err_str);
- gf_log (this->name, GF_LOG_ERROR, "%s", err_str);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save brick count");
goto out;
}
- break;
+ volcount++;
+ }
- default:
- break;
+ ret = snprintf (key, sizeof (key), "%s.volcount", keyprefix);
+ if (ret < 0) {
+ goto out;
}
- ret = dict_set_str (rsp_dict, "volname", volname);
+ ret = dict_set_int32 (rsp_dict, key, volcount);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to set the"
- " volume name");
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not save volcount");
goto out;
}
- ret = dict_set_int32 (dict, "config-command", config_command);
+
+out:
+
+ return ret;
+}
+
+int
+glusterd_get_each_snap_object_status (char **op_errstr, dict_t *rsp_dict,
+ glusterd_snap_t *snap, char *keyprefix)
+{
+ int ret = -1;
+ char key[PATH_MAX] = "";
+ char *temp = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (snap);
+ GF_ASSERT (keyprefix);
+
+ /* TODO : Get all the snap volume info present in snap object,
+ * as of now, There will be only one snapvolinfo per snap object
+ */
+ ret = snprintf (key, sizeof (key), "%s.snapname", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ temp = gf_strdup (snap->snapname);
+ if (temp == NULL) {
+ ret = -1;
+ goto out;
+ }
+ ret = dict_set_dynstr (rsp_dict, key, temp);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not save "
+ "snap name");
+ goto out;
+ }
+
+ temp = NULL;
+
+ ret = snprintf (key, sizeof (key), "%s.uuid", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ temp = gf_strdup (uuid_utoa (snap->snap_id));
+ if (temp == NULL) {
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_set_dynstr (rsp_dict, key, temp);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not save "
+ "snap UUID");
+ goto out;
+ }
+
+ temp = NULL;
+
+ ret = glusterd_get_single_snap_status (op_errstr, rsp_dict, keyprefix,
+ snap);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Could not get single snap status");
+ goto out;
+ }
+
+ ret = snprintf (key, sizeof (key), "%s.volcount", keyprefix);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = dict_set_int32 (rsp_dict, key, 1);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not save volcount");
+ goto out;
+ }
+out:
+ if (ret && temp)
+ GF_FREE (temp);
+
+ return ret;
+}
+
+int
+glusterd_get_snap_status_of_volume (char **op_errstr, dict_t *rsp_dict,
+ char *volname, char *keyprefix) {
+ int ret = -1;
+ glusterd_volinfo_t *snap_volinfo = NULL;
+ glusterd_volinfo_t *temp_volinfo = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+ char key[PATH_MAX] = "";
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int i = 0;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ GF_ASSERT (op_errstr);
+ GF_ASSERT (rsp_dict);
+ GF_ASSERT (volname);
+ GF_ASSERT (keyprefix);
+
+ ret = glusterd_volinfo_find (volname, &volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get volinfo of "
+ "volume %s", volname);
+ goto out;
+ }
+
+ list_for_each_entry_safe (snap_volinfo, temp_volinfo,
+ &volinfo->snap_volumes, snapvol_list) {
+ ret = snprintf (key, sizeof (key), "status.snap%d", i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = glusterd_get_each_snap_object_status (op_errstr,
+ rsp_dict, snap_volinfo->snapshot, key);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Function : "
+ "glusterd_get_single_snap_status failed");
+ goto out;
+ }
+ i++;
+ }
+
+ ret = dict_set_int32 (rsp_dict, "status.snapcount", i);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to save snapcount");
+ ret = -1;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+int
+glusterd_get_all_snapshot_status (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int32_t i = 0;
+ int ret = -1;
+ char key[PATH_MAX] = "";
+ glusterd_conf_t *priv = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_snap_t *tmp_snap = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
+ list_for_each_entry_safe (snap, tmp_snap,
+ &priv->snapshots, snap_list) {
+ ret = snprintf (key, sizeof (key), "status.snap%d", i);
+ if (ret < 0) {
+ goto out;
+ }
+
+ ret = glusterd_get_each_snap_object_status (op_errstr,
+ rsp_dict, snap, key);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not get "
+ "the details of a snap object: %s",
+ snap->snapname);
+ goto out;
+ }
+ i++;
+ }
+
+ ret = dict_set_int32 (rsp_dict, "status.snapcount", i);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Could not save snapcount");
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+
+int
+glusterd_snapshot_status_commit (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ xlator_t *this = NULL;
+ int ret = -1;
+ glusterd_conf_t *conf = NULL;
+ char *get_buffer = NULL;
+ int32_t cmd = -1;
+ char *snapname = NULL;
+ glusterd_snap_t *snap = NULL;
+ char *volname = NULL;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (op_errstr);
+
+ conf = this->private;
+
+ GF_ASSERT (conf);
+ ret = dict_get_int32 (dict, "cmd", &cmd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to get status cmd type");
+ goto out;
+ }
+
+ ret = dict_set_int32 (rsp_dict, "cmd", cmd);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "failed to set config-command type");
+ "Could not save status cmd in rsp dictionary");
goto out;
}
+ switch (cmd) {
+ case GF_SNAP_STATUS_TYPE_ALL:
+ {
+ ret = glusterd_get_all_snapshot_status (dict, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to "
+ "get snapshot status");
+ goto out;
+ }
+ break;
+ }
+ case GF_SNAP_STATUS_TYPE_SNAP:
+ {
+
+ ret = dict_get_str (dict, "snapname", &snapname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to "
+ "get snap name");
+ goto out;
+ }
+
+ snap = glusterd_find_snap_by_name (snapname);
+ if (!snap) {
+ ret = gf_asprintf (op_errstr, "Snap (%s) "
+ "not found", snapname);
+ if (ret < 0) {
+ goto out;
+ }
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR, "Unable to "
+ "get snap volinfo");
+ goto out;
+ }
+ ret = glusterd_get_each_snap_object_status (op_errstr,
+ rsp_dict, snap, "status.snap0");
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to "
+ "get status of snap %s", get_buffer);
+ goto out;
+ }
+ break;
+ }
+ case GF_SNAP_STATUS_TYPE_VOL:
+ {
+ ret = dict_get_str (dict, "volname", &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to"
+ " get volume name");
+ goto out;
+ }
+
+ ret = glusterd_get_snap_status_of_volume (op_errstr,
+ rsp_dict, volname, "status.vol0");
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Function :"
+ " glusterd_get_snap_status_of_volume "
+ "failed");
+ goto out;
+ }
+ }
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int32_t
+glusterd_snapshot_create_postvalidate (dict_t *dict, int32_t op_ret,
+ char **op_errstr, dict_t *rsp_dict)
+{
+ xlator_t *this = NULL;
+ glusterd_conf_t *priv = NULL;
+ int ret = -1;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (op_ret) {
+ ret = glusterd_do_snap_cleanup (dict, op_errstr, rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "cleanup operation "
+ "failed");
+ goto out;
+ }
+ } else {
+ ret = glusterd_snapshot_update_snaps_post_validate (dict,
+ op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "create snapshot");
+ goto out;
+ }
+ }
+
+ ret = 0;
out:
return ret;
}
+
int32_t
glusterd_snapshot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
{
@@ -3168,7 +4867,7 @@ glusterd_snapshot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
GF_ASSERT (this);
GF_ASSERT (dict);
- GF_ASSERT (rsp_dict); //not sure if this is needed, verify.
+ GF_ASSERT (rsp_dict);
priv = this->private;
GF_ASSERT (priv);
@@ -3184,6 +4883,11 @@ glusterd_snapshot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
case (GF_SNAP_OPTION_TYPE_CREATE):
ret = glusterd_snapshot_create_commit (dict, op_errstr,
rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "create snapshot");
+ goto out;
+ }
break;
case GF_SNAP_OPTION_TYPE_CONFIG:
@@ -3191,8 +4895,19 @@ glusterd_snapshot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
rsp_dict);
break;
+ case GF_SNAP_OPTION_TYPE_DELETE:
+ ret = glusterd_snapshot_remove_commit (dict, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "delete snapshot");
+ goto out;
+ }
+ break;
+
case GF_SNAP_OPTION_TYPE_RESTORE:
- ret = glusterd_snapshot_restore (dict, op_errstr);
+ ret = glusterd_snapshot_restore (dict, op_errstr,
+ rsp_dict);
if (ret) {
gf_log (this->name, GF_LOG_WARNING, "Failed to "
"restore snapshot");
@@ -3201,16 +4916,222 @@ glusterd_snapshot (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
break;
+ case GF_SNAP_OPTION_TYPE_STATUS:
+ ret = glusterd_snapshot_status_commit (dict, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "show snapshot status");
+ goto out;
+ }
+ break;
+
+
default:
gf_log (this->name, GF_LOG_WARNING, "invalid snap command");
goto out;
break;
}
+ ret = 0;
+
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_brickop (dict_t *dict, char **op_errstr, dict_t *rsp_dict)
+{
+ int ret = -1;
+ int64_t vol_count = 0;
+ int64_t count = 1;
+ char key[1024] = {0,};
+ char *volname = NULL;
+ int32_t snap_command = 0;
+ xlator_t *this = NULL;
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+
+ ret = dict_get_int32 (dict, "type", &snap_command);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "unable to get the type of "
+ "the snapshot command");
+ goto out;
+ }
+
+ switch (snap_command) {
+ case GF_SNAP_OPTION_TYPE_CREATE:
+ ret = dict_get_int64 (dict, "volcount", &vol_count);
+ if (ret)
+ goto out;
+ while (count <= vol_count) {
+ snprintf (key, 1024, "volname%"PRId64, count);
+ ret = dict_get_str (dict, key, &volname);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to get volname");
+ goto out;
+ }
+ ret = dict_set_str (dict, "volname", volname);
+ if (ret)
+ goto out;
+
+ ret = gd_brick_op_phase (GD_OP_SNAP, NULL, dict,
+ op_errstr);
+ if (ret)
+ goto out;
+ volname = NULL;
+ count++;
+ }
+
+ dict_del (dict, "volname");
+ ret = 0;
+ break;
+ case GF_SNAP_OPTION_TYPE_DELETE:
+ break;
+ default:
+ break;
+ }
+
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_prevalidate (dict_t *dict, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int snap_command = 0;
+ xlator_t *this = NULL;
+ int ret = -1;
+
+ this = THIS;
+
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+
+ ret = dict_get_int32 (dict, "type", &snap_command);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "unable to get the type of "
+ "the snapshot command");
+ goto out;
+ }
+
+ switch (snap_command) {
+ case (GF_SNAP_OPTION_TYPE_CREATE):
+ ret = glusterd_snapshot_create_prevalidate (dict, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot create "
+ "pre-validation failed");
+ goto out;
+ }
+ break;
+
+ case (GF_SNAP_OPTION_TYPE_CONFIG):
+ ret = glusterd_snapshot_config_prevalidate (dict, op_errstr);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot config "
+ "pre-validation failed");
+ goto out;
+ }
+ break;
+
+ case GF_SNAP_OPTION_TYPE_RESTORE:
+ ret = glusterd_snapshot_restore_prevalidate (dict, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot restore "
+ "validation failed");
+ goto out;
+ }
+ break;
+ case GF_SNAP_OPTION_TYPE_DELETE:
+ ret = glusterd_snapshot_remove_prevalidate (dict, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot remove "
+ "validation failed");
+ goto out;
+ }
+ break;
+
+ case GF_SNAP_OPTION_TYPE_STATUS:
+ ret = glusterd_snapshot_status_prevalidate (dict, op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot status "
+ "validation failed");
+ goto out;
+ }
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_WARNING, "invalid snap command");
+ goto out;
+ }
ret = 0;
+out:
+ return ret;
+}
+
+int
+glusterd_snapshot_postvalidate (dict_t *dict, int32_t op_ret, char **op_errstr,
+ dict_t *rsp_dict)
+{
+ int snap_command = 0;
+ xlator_t *this = NULL;
+ int ret = -1;
+
+ this = THIS;
+ GF_ASSERT (this);
+ GF_ASSERT (dict);
+ GF_ASSERT (rsp_dict);
+
+ ret = dict_get_int32 (dict, "type", &snap_command);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "unable to get the type of "
+ "the snapshot command");
+ goto out;
+ }
+
+ switch (snap_command) {
+ case GF_SNAP_OPTION_TYPE_CREATE:
+ ret = glusterd_snapshot_create_postvalidate (dict, op_ret,
+ op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot create "
+ "post-validation failed");
+ goto out;
+ }
+ break;
+
+ case GF_SNAP_OPTION_TYPE_DELETE:
+ case GF_SNAP_OPTION_TYPE_RESTORE:
+ ret = glusterd_snapshot_update_snaps_post_validate (dict,
+ op_errstr,
+ rsp_dict);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to "
+ "update missed snaps list");
+ goto out;
+ }
+ break;
+
+ default:
+ gf_log (this->name, GF_LOG_WARNING, "invalid snap command");
+ goto out;
+ }
+
+ ret = 0;
out:
return ret;
}
@@ -3227,6 +5148,7 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
char *host_uuid = NULL;
char err_str[2048] = {0,};
xlator_t *this = NULL;
+ char *volname = NULL;
GF_ASSERT (req);
@@ -3289,7 +5211,12 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
switch (type) {
case GF_SNAP_OPTION_TYPE_CREATE:
- ret = glusterd_mgmt_v3_initiate_snap_phases (req, cli_op, dict);
+ ret = glusterd_handle_snapshot_create (req, cli_op, dict,
+ err_str, sizeof (err_str));
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot create "
+ "failed: %s", err_str);
+ }
break;
case GF_SNAP_OPTION_TYPE_RESTORE:
ret = glusterd_handle_snapshot_restore (req, cli_op, dict,
@@ -3300,26 +5227,60 @@ glusterd_handle_snapshot_fn (rpcsvc_request_t *req)
}
break;
+ case GF_SNAP_OPTION_TYPE_INFO:
+ ret = glusterd_handle_snapshot_info (req, cli_op, dict,
+ err_str, sizeof (err_str));
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot info "
+ "failed");
+ }
+ break;
case GF_SNAP_OPTION_TYPE_LIST:
- ret = glusterd_handle_snapshot_list (req, cli_op, dict);
+ ret = glusterd_handle_snapshot_list (req, cli_op, dict,
+ err_str, sizeof (err_str));
if (ret) {
gf_log (this->name, GF_LOG_WARNING, "Snapshot list "
"failed");
}
break;
case GF_SNAP_OPTION_TYPE_CONFIG:
+ /* TODO : Type of lock to be taken when we are setting
+ * limits system wide
+ */
+ ret = dict_get_str (dict, "volname", &volname);
+ if (!volname) {
+ ret = dict_set_int32 (dict, "hold_vol_locks",
+ _gf_false);
+ if (ret) {
+ gf_log ("cli", GF_LOG_ERROR,
+ "Unable to set hold_vol_locks value "
+ "as _gf_false");
+ goto out;
+ }
+
+ }
ret = glusterd_mgmt_v3_initiate_all_phases (req, cli_op, dict);
break;
case GF_SNAP_OPTION_TYPE_DELETE:
+ ret = glusterd_handle_snapshot_remove (req, cli_op, dict,
+ err_str,
+ sizeof (err_str));
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot delete "
+ "failed: %s", err_str);
+ }
+ break;
case GF_SNAP_OPTION_TYPE_START:
case GF_SNAP_OPTION_TYPE_STOP:
case GF_SNAP_OPTION_TYPE_STATUS:
- gf_log (this->name, GF_LOG_ERROR, "Operation (%d) not "
- "supported ", type);
-
- ret = -1; /* Failure */
+ ret = glusterd_handle_snapshot_status (req, cli_op, dict,
+ err_str,
+ sizeof (err_str));
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING, "Snapshot status "
+ "failed: %s", err_str);
+ }
break;
-
default:
gf_log (this->name, GF_LOG_ERROR, "Unkown snapshot request "
"type (%d)", type);
@@ -3343,3 +5304,287 @@ glusterd_handle_snapshot (rpcsvc_request_t *req)
{
return glusterd_big_locked_handler (req, glusterd_handle_snapshot_fn);
}
+
+static inline void
+glusterd_free_snap_op (glusterd_snap_op_t *snap_op)
+{
+ if (snap_op) {
+ if (snap_op->brick_path)
+ GF_FREE (snap_op->brick_path);
+
+ GF_FREE (snap_op);
+ }
+}
+
+/* Look for duplicates and accordingly update the list */
+int32_t
+glusterd_update_missed_snap_entry (glusterd_missed_snap_info *missed_snapinfo,
+ glusterd_snap_op_t *missed_snap_op)
+{
+ int32_t ret = -1;
+ glusterd_snap_op_t *snap_opinfo = NULL;
+ gf_boolean_t match = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(missed_snapinfo);
+ GF_ASSERT(missed_snap_op);
+
+ list_for_each_entry (snap_opinfo, &missed_snapinfo->snap_ops,
+ snap_ops_list) {
+ if ((!strcmp (snap_opinfo->brick_path,
+ missed_snap_op->brick_path)) &&
+ (snap_opinfo->op == missed_snap_op->op)) {
+ /* If two entries have conflicting status
+ * GD_MISSED_SNAP_DONE takes precedence
+ */
+ if ((snap_opinfo->status == GD_MISSED_SNAP_PENDING) &&
+ (missed_snap_op->status == GD_MISSED_SNAP_DONE)) {
+ snap_opinfo->status = GD_MISSED_SNAP_DONE;
+ gf_log (this->name, GF_LOG_INFO,
+ "Updating missed snap status "
+ "for %s:%d:%s:%d as DONE",
+ missed_snapinfo->node_snap_info,
+ snap_opinfo->brick_num,
+ snap_opinfo->brick_path,
+ snap_opinfo->op);
+ ret = 0;
+ glusterd_free_snap_op (missed_snap_op);
+ goto out;
+ }
+ match = _gf_true;
+ break;
+ } else if ((snap_opinfo->brick_num ==
+ missed_snap_op->brick_num) &&
+ (snap_opinfo->op == GF_SNAP_OPTION_TYPE_CREATE) &&
+ (missed_snap_op->op ==
+ GF_SNAP_OPTION_TYPE_DELETE)) {
+ /* Optimizing create and delete entries for the same
+ * brick and same node
+ */
+ gf_log (this->name, GF_LOG_INFO,
+ "Updating missed snap status "
+ "for %s:%d:%s:%d as DONE",
+ missed_snapinfo->node_snap_info,
+ snap_opinfo->brick_num,
+ snap_opinfo->brick_path,
+ snap_opinfo->op);
+ snap_opinfo->status = GD_MISSED_SNAP_DONE;
+ ret = 0;
+ glusterd_free_snap_op (missed_snap_op);
+ goto out;
+ }
+ }
+
+ if (match == _gf_true) {
+ gf_log (this->name, GF_LOG_INFO,
+ "Duplicate entry. Not updating");
+ glusterd_free_snap_op (missed_snap_op);
+ } else {
+ list_add_tail (&missed_snap_op->snap_ops_list,
+ &missed_snapinfo->snap_ops);
+ }
+
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Add new missed snap entry to the missed_snaps list. */
+int32_t
+glusterd_store_missed_snaps_list (char *missed_info, int32_t brick_num,
+ char *brick_path, int32_t snap_op,
+ int32_t snap_status)
+{
+ int32_t ret = -1;
+ glusterd_missed_snap_info *missed_snapinfo = NULL;
+ glusterd_snap_op_t *missed_snap_op = NULL;
+ glusterd_conf_t *priv = NULL;
+ gf_boolean_t match = _gf_false;
+ gf_boolean_t free_missed_snap_info = _gf_false;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(missed_info);
+ GF_ASSERT(brick_path);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ /* Create the snap_op object consisting of the *
+ * snap id and the op */
+ ret = glusterd_missed_snap_op_new (&missed_snap_op);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create new missed snap object.");
+ ret = -1;
+ goto out;
+ }
+
+ missed_snap_op->brick_path = gf_strdup(brick_path);
+ if (!missed_snap_op->brick_path) {
+ ret = -1;
+ goto out;
+ }
+ missed_snap_op->brick_num = brick_num;
+ missed_snap_op->op = snap_op;
+ missed_snap_op->status = snap_status;
+
+ /* Look for other entries for the same node and same snap */
+ list_for_each_entry (missed_snapinfo, &priv->missed_snaps_list,
+ missed_snaps) {
+ if (!strcmp (missed_snapinfo->node_snap_info,
+ missed_info)) {
+ /* Found missed snapshot info for *
+ * the same node and same snap */
+ match = _gf_true;
+ break;
+ }
+ }
+
+ if (match == _gf_false) {
+ /* First snap op missed for the brick */
+ ret = glusterd_missed_snapinfo_new (&missed_snapinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create missed snapinfo");
+ goto out;
+ }
+ free_missed_snap_info = _gf_true;
+ missed_snapinfo->node_snap_info = gf_strdup(missed_info);
+ if (!missed_snapinfo->node_snap_info) {
+ ret = -1;
+ goto out;
+ }
+
+ list_add_tail (&missed_snap_op->snap_ops_list,
+ &missed_snapinfo->snap_ops);
+ list_add_tail (&missed_snapinfo->missed_snaps,
+ &priv->missed_snaps_list);
+
+ ret = 0;
+ goto out;
+ } else {
+ ret = glusterd_update_missed_snap_entry (missed_snapinfo,
+ missed_snap_op);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to update existing missed snap entry.");
+ goto out;
+ }
+ }
+
+out:
+ if (ret) {
+ glusterd_free_snap_op (missed_snap_op);
+
+ if (missed_snapinfo &&
+ (free_missed_snap_info == _gf_true)) {
+ if (missed_snapinfo->node_snap_info)
+ GF_FREE (missed_snapinfo->node_snap_info);
+
+ GF_FREE (missed_snapinfo);
+ }
+ }
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+/* Add missing snap entries to the in-memory conf->missed_snap_list */
+int32_t
+glusterd_add_missed_snaps_to_list (dict_t *dict, int32_t missed_snap_count)
+{
+ char *buf = NULL;
+ char *tmp = NULL;
+ char *save_ptr = NULL;
+ char *nodeid = NULL;
+ char *snap_uuid = NULL;
+ char *brick_path = NULL;
+ char missed_info[PATH_MAX] = "";
+ char name_buf[PATH_MAX] = "";
+ int32_t i = -1;
+ int32_t ret = -1;
+ int32_t brick_num = -1;
+ int32_t snap_op = -1;
+ int32_t snap_status = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ /* We can update the missed_snaps_list without acquiring *
+ * any additional locks as big lock will be held. */
+ for (i = 0; i < missed_snap_count; i++) {
+ snprintf (name_buf, sizeof(name_buf), "missed_snaps_%d",
+ i);
+ ret = dict_get_str (dict, name_buf, &buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to fetch %s", name_buf);
+ goto out;
+ }
+
+ gf_log (this->name, GF_LOG_DEBUG, "missed_snap_entry = %s",
+ buf);
+
+ /* Need to make a duplicate string coz the same dictionary *
+ * is resent to the non-originator nodes */
+ tmp = gf_strdup (buf);
+ if (!tmp) {
+ ret = -1;
+ goto out;
+ }
+
+ /* Fetch the node-id, snap-id, brick_num,
+ * brick_path, snap_op and snap status
+ */
+ nodeid = strtok_r (tmp, ":", &save_ptr);
+ snap_uuid = strtok_r (NULL, "=", &save_ptr);
+ brick_num = atoi(strtok_r (NULL, ":", &save_ptr));
+ brick_path = strtok_r (NULL, ":", &save_ptr);
+ snap_op = atoi(strtok_r (NULL, ":", &save_ptr));
+ snap_status = atoi(strtok_r (NULL, ":", &save_ptr));
+
+ if (!nodeid || !snap_uuid || !brick_path ||
+ brick_num < 1 || snap_op < 1 ||
+ snap_status < 1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid missed_snap_entry");
+ ret = -1;
+ goto out;
+ }
+
+ snprintf (missed_info, sizeof(missed_info), "%s:%s",
+ nodeid, snap_uuid);
+
+ ret = glusterd_store_missed_snaps_list (missed_info,
+ brick_num,
+ brick_path,
+ snap_op,
+ snap_status);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to store missed snaps_list");
+ goto out;
+ }
+
+ GF_FREE (tmp);
+ tmp = NULL;
+ }
+
+out:
+ if (tmp)
+ GF_FREE (tmp);
+
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index f24b319c9..1c2ec58e8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -76,26 +76,6 @@ glusterd_store_create_brick_dir (glusterd_volinfo_t *volinfo)
return ret;
}
-int32_t
-glusterd_store_create_snap_brick_dir (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo)
-{
- int32_t ret = -1;
- char brickdirpath[PATH_MAX] = {0,};
- glusterd_conf_t *priv = NULL;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (snapinfo);
-
- priv = THIS->private;
- GF_ASSERT (priv);
-
- GLUSTERD_GET_SNAP_BRICK_DIR (brickdirpath, volinfo, snapinfo->volname,
- priv);
- ret = gf_store_mkdir (brickdirpath);
-
- return ret;
-}
static void
glusterd_store_key_vol_brick_set (glusterd_brickinfo_t *brickinfo,
char *key_vol_brick, size_t len)
@@ -145,32 +125,6 @@ glusterd_store_brickinfopath_set (glusterd_volinfo_t *volinfo,
snprintf (brickpath, len, "%s/%s", brickdirpath, brickfname);
}
-static void
-glusterd_store_snap_brickinfopath_set (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo,
- glusterd_brickinfo_t *brickinfo,
- char *brickpath, size_t len)
-{
- char brickfname[PATH_MAX] = {0};
- char brickdirpath[PATH_MAX] = {0,};
- glusterd_conf_t *priv = NULL;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (snapinfo);
- GF_ASSERT (brickpath);
- GF_ASSERT (brickinfo);
- GF_ASSERT (len >= PATH_MAX);
-
- priv = THIS->private;
- GF_ASSERT (priv);
-
- GLUSTERD_GET_SNAP_BRICK_DIR (brickdirpath, volinfo,
- snapinfo->volname, priv);
- glusterd_store_brickinfofname_set (brickinfo, brickfname,
- sizeof (brickfname));
- snprintf (brickpath, len, "%s/%s", brickdirpath, brickfname);
-}
-
gf_boolean_t
glusterd_store_is_valid_brickpath (char *volname, char *brick)
{
@@ -256,24 +210,6 @@ glusterd_store_create_brick_shandle_on_absence (glusterd_volinfo_t *volinfo,
}
int32_t
-glusterd_store_create_snap_brick_shandle_on_absence(glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo,
- glusterd_brickinfo_t *brickinfo)
-{
- char brickpath[PATH_MAX] = {0,};
- int32_t ret = 0;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (snapinfo);
- GF_ASSERT (brickinfo);
-
- glusterd_store_snap_brickinfopath_set (volinfo, snapinfo, brickinfo,
- brickpath, sizeof (brickpath));
- ret = gf_store_handle_create_on_absence (&brickinfo->shandle,
- brickpath);
- return ret;
-}
-int32_t
glusterd_store_brickinfo_write (int fd, glusterd_brickinfo_t *brickinfo)
{
char value[256] = {0,};
@@ -305,6 +241,20 @@ glusterd_store_brickinfo_write (int fd, glusterd_brickinfo_t *brickinfo)
if (ret)
goto out;
+ if (strlen(brickinfo->device_path) > 0) {
+ snprintf (value, sizeof(value), "%s", brickinfo->device_path);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_BRICK_DEVICE_PATH,
+ value);
+ if (ret)
+ goto out;
+ }
+
+ snprintf (value, sizeof(value), "%d", brickinfo->snap_status);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS,
+ value);
+ if (ret)
+ goto out;
+
if (!brickinfo->vg[0])
goto out;
@@ -366,48 +316,12 @@ glusterd_store_brickinfo (glusterd_volinfo_t *volinfo,
goto out;
ret = glusterd_store_perform_brick_store (brickinfo);
-out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
- return ret;
-}
-
-int32_t
-glusterd_store_snap_brickinfo (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo,
- glusterd_brickinfo_t *brickinfo,
- int32_t brick_count,
- int vol_fd)
-{
- int32_t ret = -1;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (snapinfo);
- GF_ASSERT (brickinfo);
-
- ret = glusterd_store_volinfo_brick_fname_write (vol_fd, brickinfo,
- brick_count);
- if (ret)
- goto out;
-
- ret = glusterd_store_create_snap_brick_dir (volinfo, snapinfo);
- if (ret)
- goto out;
-
- ret = glusterd_store_create_snap_brick_shandle_on_absence (volinfo,
- snapinfo,
- brickinfo);
- if (ret)
- goto out;
-
- ret = glusterd_store_perform_brick_store (brickinfo);
- if (ret)
- goto out;
- ret = gf_store_rename_tmppath (brickinfo->shandle);
out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
return ret;
}
+
int32_t
glusterd_store_delete_brick (glusterd_brickinfo_t *brickinfo, char *delete_path)
{
@@ -607,14 +521,13 @@ int _storeopts (dict_t *this, char *key, data_t *value, void *data)
int32_t
glusterd_volume_exclude_options_write (int fd, glusterd_volinfo_t *volinfo)
{
- char *str = NULL;
+ char *str = NULL;
+ char buf[PATH_MAX] = {0,};
+ int32_t ret = -1;
GF_ASSERT (fd > 0);
GF_ASSERT (volinfo);
- char buf[PATH_MAX] = {0,};
- int32_t ret = -1;
-
snprintf (buf, sizeof (buf), "%d", volinfo->type);
ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_TYPE, buf);
if (ret)
@@ -656,6 +569,14 @@ glusterd_volume_exclude_options_write (int fd, glusterd_volinfo_t *volinfo)
if (ret)
goto out;
+ snprintf (buf, sizeof (buf), "%s", volinfo->parent_volname);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_PARENT_VOLNAME, buf);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to store "
+ GLUSTERD_STORE_KEY_PARENT_VOLNAME);
+ goto out;
+ }
+
ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_ID,
uuid_utoa (volinfo->volume_id));
if (ret)
@@ -695,6 +616,23 @@ glusterd_volume_exclude_options_write (int fd, glusterd_volinfo_t *volinfo)
goto out;
}
+ snprintf (buf, sizeof (buf), "%d", volinfo->is_volume_restored);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_IS_RESTORED, buf);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Unable to write is_volume_restored");
+ goto out;
+ }
+
+ snprintf (buf, sizeof (buf), "%"PRIu64, volinfo->snap_max_hard_limit);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT,
+ buf);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,
+ "Unable to write snap-max-hard-limit");
+ goto out;
+ }
+
out:
if (ret)
gf_log (THIS->name, GF_LOG_ERROR, "Unable to write volume "
@@ -712,8 +650,7 @@ glusterd_store_voldirpath_set (glusterd_volinfo_t *volinfo, char *voldirpath,
priv = THIS->private;
GF_ASSERT (priv);
- snprintf (voldirpath, len, "%s/%s/%s", priv->workdir,
- GLUSTERD_VOLUME_DIR_PREFIX, volinfo->volname);
+ GLUSTERD_GET_VOLUME_DIR (voldirpath, volinfo, priv);
}
static int32_t
@@ -727,98 +664,29 @@ glusterd_store_create_volume_dir (glusterd_volinfo_t *volinfo)
glusterd_store_voldirpath_set (volinfo, voldirpath,
sizeof (voldirpath));
ret = gf_store_mkdir (voldirpath);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
- return ret;
-}
-
-static void
-glusterd_store_vol_snaps_cg_dirpath_set (char *cgdirpath, size_t len)
-{
- glusterd_conf_t *priv = NULL;
-
- priv = THIS->private;
- GF_ASSERT (priv);
-
- snprintf (cgdirpath, len, "%s/%s", priv->workdir,
- GLUSTERD_VOL_SNAP_CG_DIR_PREFIX);
-}
-static void
-glusterd_store_vol_snaps_dirpath_set (glusterd_volinfo_t *volinfo,
- char *snapdirpath, size_t len)
-{
- glusterd_conf_t *priv = NULL;
-
- GF_ASSERT (volinfo);
- priv = THIS->private;
- GF_ASSERT (priv);
- snprintf (snapdirpath, len, "%s/%s/%s/%s", priv->workdir,
- GLUSTERD_VOLUME_DIR_PREFIX, volinfo->volname,
- GLUSTERD_VOL_SNAP_DIR_PREFIX);
-}
-
-static void
-glusterd_store_snap_vol_dirpath_set (glusterd_volinfo_t *volinfo,
- char *snapdirpath, size_t len,
- char *snap_name)
-{
- glusterd_conf_t *priv = NULL;
- size_t strlen = 0;
- char path[PATH_MAX] = {0,};
-
- GF_ASSERT (volinfo);
- priv = THIS->private;
- GF_ASSERT (priv);
-
- glusterd_store_vol_snaps_dirpath_set (volinfo, path, sizeof (path));
- strlen = sizeof (path) + sizeof (*snap_name);
- snprintf (snapdirpath, strlen, "%s/%s", path, snap_name);
-}
-/* creates GLUSTERD_VOLUME_DIR_PREFIX/<volname>/snaps directory */
-static int32_t
-glusterd_store_create_snaps_dir (glusterd_volinfo_t *volinfo)
-{
- int32_t ret = -1;
- char snapdirpath[PATH_MAX] = {0,};
-
- GF_ASSERT (volinfo);
-
- glusterd_store_vol_snaps_dirpath_set (volinfo, snapdirpath,
- sizeof (snapdirpath));
- ret = gf_store_mkdir (snapdirpath);
gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
return ret;
}
-/* creates GLUSTERD_VOLUME_DIR_PREFIX/cg directory */
static int32_t
-glusterd_store_create_snaps_cg_dir ()
+glusterd_store_create_snap_dir (glusterd_snap_t *snap)
{
- int32_t ret = -1;
- char cgdirpath[PATH_MAX] = {0,};
-
- glusterd_store_vol_snaps_cg_dirpath_set (cgdirpath,
- sizeof (cgdirpath));
- ret = gf_store_mkdir (cgdirpath);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
- return ret;
-}
-/* creates GLUSTERD_VOLUME_DIR_PREFIX/<volname>/snaps/<snap-name> directory */
-static int32_t
-glusterd_store_create_snap_vol_dir (glusterd_volinfo_t *volinfo, char *snap_name)
-{
- int32_t ret = -1;
- char snapdirpath[PATH_MAX] = {0,};
+ int32_t ret = -1;
+ char snapdirpath[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = NULL;
- GF_ASSERT (volinfo);
- GF_ASSERT (snap_name);
+ priv = THIS->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (snap);
- glusterd_store_snap_vol_dirpath_set (volinfo, snapdirpath,
- sizeof (snapdirpath),
- snap_name);
+ GLUSTERD_GET_SNAP_DIR (snapdirpath, snap, priv);
- ret = gf_store_mkdir (snapdirpath);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning with %d", ret);
+ ret = mkdir_p (snapdirpath, 0755, _gf_true);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to create snaps dir "
+ "%s", snapdirpath);
+ }
return ret;
}
@@ -846,6 +714,49 @@ out:
return ret;
}
+int32_t
+glusterd_store_snapinfo_write (glusterd_snap_t *snap)
+{
+ int32_t ret = -1;
+ int fd = 0;
+ char buf[PATH_MAX] = "";
+
+ GF_ASSERT (snap);
+
+ fd = gf_store_mkstemp (snap->shandle);
+ if (fd <= 0)
+ goto out;
+
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_ID,
+ uuid_utoa (snap->snap_id));
+ if (ret)
+ goto out;
+
+ snprintf (buf, sizeof (buf), "%d", snap->snap_status);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_STATUS, buf);
+ if (ret)
+ goto out;
+
+ snprintf (buf, sizeof (buf), "%d", snap->snap_restored);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_RESTORED, buf);
+ if (ret)
+ goto out;
+
+ if (snap->description) {
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_DESC,
+ snap->description);
+ if (ret)
+ goto out;
+ }
+
+ snprintf (buf, sizeof (buf), "%ld", snap->time_stamp);
+ ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_TIMESTAMP, buf);
+
+out:
+ gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ return ret;
+}
+
static void
glusterd_store_rbstatepath_set (glusterd_volinfo_t *volinfo, char *rbstatepath,
size_t len)
@@ -891,79 +802,37 @@ glusterd_store_node_state_path_set (glusterd_volinfo_t *volinfo,
}
static void
-glusterd_store_snap_list_path_set (glusterd_volinfo_t *volinfo,
- char *snap_list_path, size_t len)
+glusterd_store_missed_snaps_list_path_set (char *missed_snaps_list,
+ size_t len)
{
- char voldirpath[PATH_MAX] = {0,};
- GF_ASSERT (volinfo);
- GF_ASSERT (snap_list_path);
- GF_ASSERT (len <= PATH_MAX);
-
- glusterd_store_voldirpath_set (volinfo, voldirpath,
- sizeof (voldirpath));
- snprintf (snap_list_path, len, "%s/%s", voldirpath,
- GLUSTERD_VOL_SNAP_FILE);
-}
+ glusterd_conf_t *priv = NULL;
-static void
-glusterd_store_snap_volfpath_set (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo,
- char *volfpath, size_t len)
-{
- char voldirpath[PATH_MAX] = {0,};
- GF_ASSERT (volinfo);
- GF_ASSERT (volfpath);
+ priv = THIS->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (missed_snaps_list);
GF_ASSERT (len <= PATH_MAX);
- glusterd_store_snap_vol_dirpath_set (volinfo, voldirpath,
- sizeof (voldirpath),
- snapinfo->volname);
- snprintf (volfpath, len, "%s/%s", voldirpath, GLUSTERD_VOLUME_INFO_FILE);
-}
-
-int32_t
-glusterd_store_create_snap_vol_shandle_on_absence (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo)
-{
- char volfpath[PATH_MAX] = {0};
- int32_t ret = 0;
-
- GF_ASSERT (volinfo);
-
- glusterd_store_snap_volfpath_set (volinfo, snapinfo,
- volfpath, sizeof (volfpath));
- ret = gf_store_handle_create_on_absence (&snapinfo->shandle, volfpath);
- return ret;
+ snprintf (missed_snaps_list, len, "%s/snaps/"
+ GLUSTERD_MISSED_SNAPS_LIST_FILE, priv->workdir);
}
static void
-glusterd_store_snap_cgfpath_set (char *cg_name,
- char *cgfpath, size_t len)
+glusterd_store_snapfpath_set (glusterd_snap_t *snap, char *snap_fpath,
+ size_t len)
{
- char cgdirpath[PATH_MAX] = {0,};
- GF_ASSERT (cg_name);
- GF_ASSERT (cgfpath);
+ glusterd_conf_t *priv = NULL;
+
+ priv = THIS->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (snap);
+ GF_ASSERT (snap_fpath);
GF_ASSERT (len <= PATH_MAX);
- glusterd_store_vol_snaps_cg_dirpath_set (cgdirpath,
- sizeof (cgdirpath));
- snprintf (cgfpath, len, "%s/%s.info", cgdirpath, cg_name);
+ snprintf (snap_fpath, len, "%s/snaps/%s/%s", priv->workdir,
+ snap->snapname, GLUSTERD_SNAP_INFO_FILE);
}
int32_t
-glusterd_store_create_snap_cg_shandle_on_absence (glusterd_snap_cg_t *cg)
-{
- char cgfpath[PATH_MAX] = {0};
- int32_t ret = 0;
-
- GF_ASSERT (cg);
-
- glusterd_store_snap_cgfpath_set (cg->cg_name, cgfpath,
- sizeof (cgfpath));
- ret = gf_store_handle_create_on_absence (&cg->shandle, cgfpath);
- return ret;
-}
-int32_t
glusterd_store_create_rbstate_shandle_on_absence (glusterd_volinfo_t *volinfo)
{
char rbstatepath[PATH_MAX] = {0};
@@ -1007,47 +876,44 @@ glusterd_store_create_nodestate_sh_on_absence (glusterd_volinfo_t *volinfo)
return ret;
}
-int32_t
-glusterd_store_create_snap_list_sh_on_absence (glusterd_volinfo_t *volinfo)
+static int32_t
+glusterd_store_create_missed_snaps_list_shandle_on_absence ()
{
- char snap_list_path[PATH_MAX] = {0};
- int32_t ret = 0;
+ char missed_snaps_list[PATH_MAX] = "";
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
- GF_ASSERT (volinfo);
+ this = THIS;
+ GF_ASSERT (this);
- glusterd_store_snap_list_path_set (volinfo, snap_list_path,
- sizeof (snap_list_path));
- ret =
- gf_store_handle_create_on_absence (&volinfo->snap_list_shandle,
- snap_list_path);
+ priv = this->private;
+ GF_ASSERT (priv);
+ glusterd_store_missed_snaps_list_path_set (missed_snaps_list,
+ sizeof(missed_snaps_list));
+
+ ret = gf_store_handle_create_on_absence
+ (&priv->missed_snaps_list_shandle,
+ missed_snaps_list);
return ret;
}
int32_t
-glusterd_store_brickinfos (glusterd_volinfo_t *volinfo, int vol_fd)
+glusterd_store_create_snap_shandle_on_absence (glusterd_snap_t *snap)
{
- int32_t ret = 0;
- glusterd_brickinfo_t *brickinfo = NULL;
- int32_t brick_count = 0;
+ char snapfpath[PATH_MAX] = {0};
+ int32_t ret = 0;
- GF_ASSERT (volinfo);
+ GF_ASSERT (snap);
- list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- ret = glusterd_store_brickinfo (volinfo, brickinfo,
- brick_count, vol_fd);
- if (ret)
- goto out;
- brick_count++;
- }
-out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
+ glusterd_store_snapfpath_set (snap, snapfpath, sizeof (snapfpath));
+ ret = gf_store_handle_create_on_absence (&snap->shandle, snapfpath);
return ret;
}
int32_t
-glusterd_store_snap_brickinfos (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo, int vol_fd)
+glusterd_store_brickinfos (glusterd_volinfo_t *volinfo, int vol_fd)
{
int32_t ret = 0;
glusterd_brickinfo_t *brickinfo = NULL;
@@ -1055,10 +921,9 @@ glusterd_store_snap_brickinfos (glusterd_volinfo_t *volinfo,
GF_ASSERT (volinfo);
- list_for_each_entry (brickinfo, &snapinfo->bricks, brick_list) {
- ret = glusterd_store_snap_brickinfo (volinfo, snapinfo,
- brickinfo, brick_count,
- vol_fd);
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ ret = glusterd_store_brickinfo (volinfo, brickinfo,
+ brick_count, vol_fd);
if (ret)
goto out;
brick_count++;
@@ -1067,6 +932,7 @@ out:
gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
+
int32_t
glusterd_store_rbstate_write (int fd, glusterd_volinfo_t *volinfo)
{
@@ -1190,365 +1056,6 @@ out:
}
int32_t
-glusterd_store_snap_list_write (int fd, glusterd_snap_t *snap, uint64_t count)
-{
- int ret = -1;
- char key[256] = {0, };
- char buf[PATH_MAX] = {0, };
-
- GF_ASSERT (fd > 0);
- GF_ASSERT (snap);
-
- snprintf (key, sizeof (key), "%s-%"PRIu64, GLUSTERD_STORE_KEY_SNAP_NAME,
- count);
- ret = gf_store_save_value (fd, key, snap->snap_name);
- if (ret)
- goto out;
-
- snprintf (key, sizeof (key), "%s-%"PRIu64, GLUSTERD_STORE_KEY_SNAP_ID,
- count);
- ret = gf_store_save_value (fd, key, uuid_utoa(snap->snap_id));
- if (ret)
- goto out;
-
- snprintf (key, sizeof (key), "%s-%"PRIu64,
- GLUSTERD_STORE_KEY_SNAP_CG_ID, count);
- ret = gf_store_save_value (fd, key, uuid_utoa(snap->cg_id));
- if (ret)
- goto out;
-
- snprintf (buf, sizeof (buf), "%d", snap->snap_status);
- snprintf (key, sizeof (key), "%s-%"PRIu64,
- GLUSTERD_STORE_KEY_SNAP_STATUS, count);
- ret = gf_store_save_value (fd, key, buf);
- if (ret)
- goto out;
-out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-
-int32_t
-glusterd_store_snap_cg_write (int fd, glusterd_snap_cg_t *cg)
-{
- int ret = -1;
- char buf[PATH_MAX] = {0, };
- uint64_t count = 0;
-
- GF_ASSERT (fd > 0);
- GF_ASSERT (cg);
-
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_NAME,
- cg->cg_name);
- if (ret)
- goto out;
-
- snprintf (buf, sizeof (buf), "%"PRIu64, cg->volume_count);
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_CG_VOL_COUNT, buf);
- if (ret)
- goto out;
-
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_CG_ID,
- uuid_utoa(cg->cg_id));
- if (ret)
- goto out;
-
- snprintf (buf, sizeof (buf), "%d", cg->cg_status);
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_STATUS, buf);
- if (ret)
- goto out;
- while (count < cg->volume_count) {
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_VOL_ID,
- uuid_utoa (cg->volumes[count].volume_id));
- if (ret)
- goto out;
-
- count++;
- }
-out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-int32_t
-glusterd_store_perform_snap_cg_store (glusterd_snap_cg_t *cg)
-{
- int fd = -1;
- int32_t ret = -1;
- GF_ASSERT (cg);
-
- ret = glusterd_store_create_snap_cg_shandle_on_absence (cg);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to create "
- " shandle for cg %s ", cg->cg_name);
- goto out;
- }
- fd = gf_store_mkstemp (cg->shandle);
- if (fd <= 0) {
- ret = -1;
- goto out;
- }
-
- ret = glusterd_store_snap_cg_write (fd, cg);
- if (ret)
- goto out;
-out:
- if (ret && (fd > 0))
- gf_store_unlink_tmppath (cg->shandle);
- if (fd > 0)
- close (fd);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-
-int32_t
-glusterd_store_perform_snap_volume_store (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snap_volinfo)
-{
- int fd = -1;
- int32_t ret = -1;
- GF_ASSERT (volinfo);
- GF_ASSERT (snap_volinfo);
-
- ret = glusterd_store_create_snap_vol_shandle_on_absence (volinfo,
- snap_volinfo);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to create "
- " shandle for snap %s of volume %s", volinfo->volname,
- snap_volinfo->volname);
- goto out;
- }
- fd = gf_store_mkstemp (snap_volinfo->shandle);
- if (fd <= 0) {
- ret = -1;
- goto out;
- }
-
- ret = glusterd_store_volinfo_write (fd, snap_volinfo);
- if (ret)
- goto out;
-
- ret = glusterd_store_snap_brickinfos (volinfo, snap_volinfo, fd);
- if (ret)
- goto out;
-
- ret = gf_store_rename_tmppath (snap_volinfo->shandle);
- if (ret)
- goto out;
-out:
- if (ret && (fd > 0))
- gf_store_unlink_tmppath (volinfo->shandle);
- if (fd > 0)
- close (fd);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-int32_t
-glusterd_store_snap_volume (glusterd_volinfo_t *volinfo, glusterd_snap_t *snap)
-{
- int32_t ret = -1;
- glusterd_volinfo_t *snap_volinfo = NULL;
-
- GF_ASSERT (volinfo);
- GF_ASSERT (snap);
-
-
- LOCK (&snap->lock);
- {
- snap_volinfo = snap->snap_volume;
-
- if (!snap_volinfo)
- goto unlock;
-
- ret = glusterd_store_create_snap_vol_dir (volinfo,
- snap->snap_name);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed create snapshot dir (%s) for volume "
- "%s", snap->snap_name, volinfo->volname);
- goto unlock;
- }
-
- ret = glusterd_store_perform_snap_volume_store (volinfo,
- snap_volinfo);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed store snapshot volinfo (%s) for volume "
- "%s", snap->snap_name, volinfo->volname);
- goto unlock;
- }
- }
-unlock:
- UNLOCK (&snap->lock);
-
- return 0;
-
-}
-
-int32_t
-glusterd_store_perform_snap_list_store (glusterd_volinfo_t *volinfo)
-{
- int fd = -1;
- int32_t ret = -1;
- glusterd_snap_t *entry = NULL;
- glusterd_snap_t *tmp = NULL;
- uint64_t count = 0;
- char buf[PATH_MAX] = {0,};
-
- GF_ASSERT (volinfo);
-
- ret = glusterd_store_create_snap_list_sh_on_absence (volinfo);
- if (ret)
- goto out;
- fd = gf_store_mkstemp (volinfo->snap_list_shandle);
- if (fd <= 0) {
- ret = -1;
- goto out;
- }
-
- LOCK (&volinfo->lock);
- {
- list_for_each_entry_safe (entry, tmp, &volinfo->snaps,
- snap_list) {
- ret = glusterd_store_snap_list_write (fd, entry, count);
- if (ret)
- goto unlock;
- count++;
- }
- snprintf (buf, sizeof(buf), "%"PRIu64, count);
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_COUNT,
- buf);
- if (ret)
- goto unlock;
- }
-unlock:
- UNLOCK (&volinfo->lock);
-
- ret = gf_store_rename_tmppath (volinfo->snap_list_shandle);
- if (ret)
- goto out;
-
-out:
- if (ret && (fd > 0))
- gf_store_unlink_tmppath (volinfo->snap_list_shandle);
- if (fd > 0)
- close (fd);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-int32_t
-glusterd_store_perform_snap_store (glusterd_volinfo_t *volinfo)
-{
- int fd = -1;
- int32_t ret = -1;
- glusterd_snap_t *entry = NULL;
- glusterd_snap_t *tmp = NULL;
- uint64_t count = 0;
- char buf[PATH_MAX] = {0,};
-
- GF_ASSERT (volinfo);
-
- ret = glusterd_store_create_snaps_dir (volinfo);
- if (ret)
- goto out;
-
- ret = glusterd_store_create_snap_list_sh_on_absence (volinfo);
- if (ret)
- goto out;
- fd = gf_store_mkstemp (volinfo->snap_list_shandle);
- if (fd <= 0) {
- ret = -1;
- goto out;
- }
-
- LOCK (&volinfo->lock);
- {
- list_for_each_entry_safe (entry, tmp, &volinfo->snaps,
- snap_list) {
- ret = glusterd_store_snap_list_write (fd, entry, count);
- if (ret)
- goto unlock;
- ret = glusterd_store_snap_volume (volinfo, entry);
- if (ret)
- goto unlock;
- count++;
- }
- snprintf (buf, sizeof(buf), "%"PRIu64, count);
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_COUNT,
- buf);
- if (ret)
- goto unlock;
- }
-unlock:
- UNLOCK (&volinfo->lock);
-
- ret = gf_store_rename_tmppath (volinfo->snap_list_shandle);
- if (ret)
- goto out;
-
-out:
- if (ret && (fd > 0))
- gf_store_unlink_tmppath (volinfo->snap_list_shandle);
- if (fd > 0)
- close (fd);
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-
-int32_t
-glusterd_store_snap_cg (glusterd_snap_cg_t *cg)
-{
- int32_t ret = -1;
-
- GF_ASSERT (cg);
-
-
- LOCK (&cg->lock);
- {
- ret = glusterd_store_perform_snap_cg_store (cg);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed store snap cg (%s)", cg->cg_name);
- goto unlock;
- }
- }
-unlock:
- UNLOCK (&cg->lock);
-
- return 0;
-
-}
-int32_t
-glusterd_store_perform_snap_cgs_store (glusterd_conf_t *priv)
-{
- int32_t ret = -1;
- glusterd_snap_cg_t *entry = NULL;
- glusterd_snap_cg_t *tmp = NULL;
-
- GF_ASSERT (priv);
-
-
- ret = glusterd_store_create_snaps_cg_dir (priv);
- if (ret)
- goto out;
-
-// LOCK (&priv->lock);
- {
- list_for_each_entry_safe (entry, tmp, &priv->snap_cg,
- cg_list) {
- ret = glusterd_store_snap_cg (entry);
- if (ret)
- goto unlock;
- }
- }
-unlock:
-// UNLOCK (&priv->lock);
-
-out:
- gf_log (THIS->name, GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-
-int32_t
glusterd_store_perform_node_state_store (glusterd_volinfo_t *volinfo)
{
int fd = -1;
@@ -1650,8 +1157,14 @@ glusterd_store_volume_cleanup_tmp (glusterd_volinfo_t *volinfo)
gf_store_unlink_tmppath (volinfo->rb_shandle);
gf_store_unlink_tmppath (volinfo->node_state_shandle);
+}
- gf_store_unlink_tmppath (volinfo->snap_list_shandle);
+void
+glusterd_store_snap_cleanup_tmp (glusterd_snap_t *snap)
+{
+ GF_ASSERT (snap);
+
+ gf_store_unlink_tmppath (snap->shandle);
}
int32_t
@@ -1680,11 +1193,7 @@ glusterd_store_volinfo_atomic_update (glusterd_volinfo_t *volinfo)
ret = gf_store_rename_tmppath (volinfo->shandle);
if (ret)
goto out;
- snprintf (buf, sizeof (buf), "%"PRIu64, volinfo->snap_max_limit);
- ret = gf_store_save_value (fd, GLUSTERD_STORE_KEY_SNAP_MAX_LIMIT,
- buf);
- if (ret)
- goto out;
+
out:
if (ret)
gf_log (THIS->name, GF_LOG_ERROR, "Couldn't rename "
@@ -1709,6 +1218,60 @@ out:
}
int32_t
+glusterd_store_snap_atomic_update (glusterd_snap_t *snap)
+{
+ int ret = -1;
+ GF_ASSERT (snap);
+
+ ret = gf_store_rename_tmppath (snap->shandle);
+ if (ret)
+ gf_log (THIS->name, GF_LOG_ERROR, "Couldn't rename "
+ "temporary file(s): Reason %s", strerror (errno));
+
+ return ret;
+}
+
+int32_t
+glusterd_store_snap (glusterd_snap_t *snap)
+{
+ int32_t ret = -1;
+
+ GF_ASSERT (snap);
+
+ ret = glusterd_store_create_snap_dir (snap);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to create snap dir");
+ goto out;
+ }
+
+ ret = glusterd_store_create_snap_shandle_on_absence (snap);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to create snap info "
+ "file");
+ goto out;
+ }
+
+ ret = glusterd_store_snapinfo_write (snap);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR, "Failed to write snap info");
+ goto out;
+ }
+
+ ret = glusterd_store_snap_atomic_update (snap);
+ if (ret) {
+ gf_log (THIS->name, GF_LOG_ERROR,"Failed to do automic update");
+ goto out;
+ }
+
+out:
+ if (ret)
+ glusterd_store_snap_cleanup_tmp (snap);
+
+ gf_log (THIS->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
+
+int32_t
glusterd_store_volinfo (glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t ac)
{
int32_t ret = -1;
@@ -1732,9 +1295,6 @@ glusterd_store_volinfo (glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t a
if (ret)
goto out;
- ret = glusterd_store_create_snap_list_sh_on_absence (volinfo);
- if (ret)
- goto out;
ret = glusterd_store_perform_volume_store (volinfo);
if (ret)
goto out;
@@ -1754,12 +1314,8 @@ glusterd_store_volinfo (glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t a
if (ret)
goto out;
- ret = glusterd_store_perform_snap_store (volinfo);
- if (ret)
- goto out;
-
//checksum should be computed at the end
- ret = glusterd_volume_compute_cksum (volinfo, NULL);
+ ret = glusterd_volume_compute_cksum (volinfo);
if (ret)
goto out;
@@ -1773,32 +1329,36 @@ out:
}
int32_t
-glusterd_store_delete_snap_cg (glusterd_snap_cg_t *cg)
+glusterd_store_delete_volume (glusterd_volinfo_t *volinfo)
{
char pathname[PATH_MAX] = {0,};
int32_t ret = 0;
glusterd_conf_t *priv = NULL;
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
char path[PATH_MAX] = {0,};
char delete_path[PATH_MAX] = {0,};
char trashdir[PATH_MAX] = {0,};
+ struct stat st = {0, };
xlator_t *this = NULL;
gf_boolean_t rename_fail = _gf_false;
this = THIS;
GF_ASSERT (this);
- GF_ASSERT (cg);
+ GF_ASSERT (volinfo);
priv = this->private;
GF_ASSERT (priv);
- glusterd_store_snap_cgfpath_set (cg->cg_name, pathname, sizeof (path));
+ GLUSTERD_GET_VOLUME_DIR (pathname, volinfo, priv);
+
snprintf (delete_path, sizeof (delete_path),
- "%s/"GLUSTERD_TRASH"/%s/%s.deleted", priv->workdir,
- GLUSTERD_VOL_SNAP_CG_DIR_PREFIX, cg->cg_name);
+ "%s/"GLUSTERD_TRASH"/%s.deleted", priv->workdir,
+ uuid_utoa (volinfo->volume_id));
- snprintf (trashdir, sizeof (trashdir), "%s/"GLUSTERD_TRASH"/%s",
- priv->workdir, GLUSTERD_VOL_SNAP_CG_DIR_PREFIX);
+ snprintf (trashdir, sizeof (trashdir), "%s/"GLUSTERD_TRASH,
+ priv->workdir);
ret = mkdir (trashdir, 0777);
if (ret && errno != EEXIST) {
@@ -1810,19 +1370,66 @@ glusterd_store_delete_snap_cg (glusterd_snap_cg_t *cg)
ret = rename (pathname, delete_path);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to rename %s cg "
- "info file", cg->cg_name);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to rename volume "
+ "directory for volume %s", volinfo->volname);
rename_fail = _gf_true;
goto out;
}
- ret = unlink (delete_path);
+ dir = opendir (delete_path);
+ if (!dir) {
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to open directory %s."
+ " Reason : %s", delete_path, strerror (errno));
+ ret = 0;
+ goto out;
+ }
+ ret = glusterd_store_remove_bricks (volinfo, delete_path);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "Remove bricks failed for %s",
+ volinfo->volname);
+ }
+
+ glusterd_for_each_entry (entry, dir);
+ while (entry) {
+
+ snprintf (path, PATH_MAX, "%s/%s", delete_path, entry->d_name);
+ ret = stat (path, &st);
+ if (ret == -1) {
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to stat "
+ "entry %s : %s", path, strerror (errno));
+ goto stat_failed;
+ }
+
+ if (S_ISDIR (st.st_mode))
+ ret = rmdir (path);
+ else
+ ret = unlink (path);
+
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, " Failed to remove "
+ "%s. Reason : %s", path, strerror (errno));
+ }
+ gf_log (this->name, GF_LOG_DEBUG, "%s %s",
+ ret ? "Failed to remove":"Removed",
+ entry->d_name);
+stat_failed:
+ memset (path, 0, sizeof(path));
+ glusterd_for_each_entry (entry, dir);
+ }
+
+ ret = closedir (dir);
if (ret) {
- gf_log (this->name, GF_LOG_DEBUG, " Failed to remove "
- "%s. Reason : %s", delete_path, strerror (errno));
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to close dir %s. "
+ "Reason : %s",delete_path, strerror (errno));
}
+ ret = rmdir (delete_path);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "Failed to rmdir: %s,err: %s",
+ delete_path, strerror (errno));
+ }
ret = rmdir (trashdir);
if (ret) {
gf_log (this->name, GF_LOG_DEBUG, "Failed to rmdir: %s, Reason:"
@@ -1830,18 +1437,20 @@ glusterd_store_delete_snap_cg (glusterd_snap_cg_t *cg)
}
out:
- if (!ret && cg->shandle) {
- gf_store_handle_destroy (cg->shandle);
- cg->shandle = NULL;
+ if (volinfo->shandle) {
+ gf_store_handle_destroy (volinfo->shandle);
+ volinfo->shandle = NULL;
}
ret = (rename_fail == _gf_true) ? -1: 0;
gf_log (this->name, GF_LOG_DEBUG, "Returning %d", ret);
return ret;
}
+
+/*TODO: cleanup the duplicate code and implement a generic function for
+ * deleting snap/volume depending on the parameter flag */
int32_t
-glusterd_store_delete_volume (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo)
+glusterd_store_delete_snap (glusterd_snap_t *snap)
{
char pathname[PATH_MAX] = {0,};
int32_t ret = 0;
@@ -1856,23 +1465,15 @@ glusterd_store_delete_volume (glusterd_volinfo_t *volinfo,
gf_boolean_t rename_fail = _gf_false;
this = THIS;
- GF_ASSERT (this);
-
- GF_ASSERT (volinfo);
priv = this->private;
-
GF_ASSERT (priv);
- if (snapinfo) {
- GLUSTERD_GET_SNAP_DIR (pathname, volinfo, snapinfo->volname,
- priv);
- } else {
- GLUSTERD_GET_VOLUME_DIR (pathname, volinfo, priv);
- }
+ GF_ASSERT (snap);
+ GLUSTERD_GET_SNAP_DIR (pathname, snap, priv);
snprintf (delete_path, sizeof (delete_path),
- "%s/"GLUSTERD_TRASH"/%s.deleted", priv->workdir,
- uuid_utoa (volinfo->volume_id));
+ "%s/"GLUSTERD_TRASH"/snap-%s.deleted", priv->workdir,
+ uuid_utoa (snap->snap_id));
snprintf (trashdir, sizeof (trashdir), "%s/"GLUSTERD_TRASH,
priv->workdir);
@@ -1887,8 +1488,8 @@ glusterd_store_delete_volume (glusterd_volinfo_t *volinfo,
ret = rename (pathname, delete_path);
if (ret) {
- gf_log (this->name, GF_LOG_ERROR, "Failed to rename volume "
- "directory for volume %s", volinfo->volname);
+ gf_log (this->name, GF_LOG_ERROR, "Failed to rename snap "
+ "directory %s to %s", snap->snapname, delete_path);
rename_fail = _gf_true;
goto out;
}
@@ -1900,16 +1501,9 @@ glusterd_store_delete_volume (glusterd_volinfo_t *volinfo,
ret = 0;
goto out;
}
- ret = glusterd_store_remove_bricks (volinfo, delete_path);
-
- if (ret) {
- gf_log (this->name, GF_LOG_DEBUG, "Remove bricks failed for %s",
- volinfo->volname);
- }
glusterd_for_each_entry (entry, dir);
while (entry) {
-
snprintf (path, PATH_MAX, "%s/%s", delete_path, entry->d_name);
ret = stat (path, &st);
if (ret == -1) {
@@ -1954,9 +1548,9 @@ stat_failed:
}
out:
- if (volinfo->shandle) {
- gf_store_handle_destroy (volinfo->shandle);
- volinfo->shandle = NULL;
+ if (snap->shandle) {
+ gf_store_handle_destroy (snap->shandle);
+ snap->shandle = NULL;
}
ret = (rename_fail == _gf_true) ? -1: 0;
@@ -1964,7 +1558,6 @@ out:
return ret;
}
-
int
glusterd_store_global_info (xlator_t *this)
{
@@ -2027,14 +1620,24 @@ glusterd_store_global_info (xlator_t *this)
goto out;
}
- snprintf (buf, sizeof (buf), "%"PRIu64, conf->snap_max_limit);
+ snprintf (buf, sizeof (buf), "%"PRIu64, conf->snap_max_hard_limit);
ret = gf_store_save_value (handle->fd,
- GLUSTERD_STORE_KEY_SNAP_MAX_LIMIT, buf);
+ GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT, buf);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
- "Storing snap-max-limit failed ret = %d", ret);
+ "Storing snap-max-hard-limit failed ret = %d", ret);
goto out;
}
+
+ snprintf (buf, sizeof (buf), "%"PRIu64, conf->snap_max_soft_limit);
+ ret = gf_store_save_value (handle->fd,
+ GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT, buf);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Storing snap-max-soft-limit failed ret = %d", ret);
+ goto out;
+ }
+
ret = gf_store_rename_tmppath (handle);
out:
if (ret && (handle->fd > 0))
@@ -2107,7 +1710,8 @@ out:
}
int
-glusterd_retrieve_sys_snap_max_limit (xlator_t *this, uint64_t *limit)
+glusterd_retrieve_sys_snap_max_limit (xlator_t *this, uint64_t *limit,
+ char *key)
{
char *limit_str = NULL;
glusterd_conf_t *priv = NULL;
@@ -2117,8 +1721,13 @@ glusterd_retrieve_sys_snap_max_limit (xlator_t *this, uint64_t *limit)
char path[PATH_MAX] = {0,};
gf_store_handle_t *handle = NULL;
+ GF_ASSERT (this);
priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (limit);
+ GF_ASSERT (key);
+
if (!priv->handle) {
snprintf (path, PATH_MAX, "%s/%s", priv->workdir,
GLUSTERD_INFO_FILE);
@@ -2134,11 +1743,11 @@ glusterd_retrieve_sys_snap_max_limit (xlator_t *this, uint64_t *limit)
}
ret = gf_store_retrieve_value (priv->handle,
- GLUSTERD_STORE_KEY_SNAP_MAX_LIMIT,
+ key,
&limit_str);
if (ret) {
gf_log (this->name, GF_LOG_DEBUG,
- "No previous snap limit present");
+ "No previous %s present", key);
goto out;
}
@@ -2160,12 +1769,34 @@ out:
static int
glusterd_restore_op_version (xlator_t *this)
{
- glusterd_conf_t *conf = NULL;
- int ret = 0;
- int op_version = 0;
+ glusterd_conf_t *conf = NULL;
+ int ret = 0;
+ int op_version = 0;
conf = this->private;
+ ret = glusterd_retrieve_sys_snap_max_limit (this,
+ &conf->snap_max_hard_limit,
+ GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Unable to retrieve system snap-max-hard-limit, "
+ "setting it to default value(%d)",
+ GLUSTERD_SNAPS_MAX_HARD_LIMIT);
+ conf->snap_max_hard_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
+ }
+
+ ret = glusterd_retrieve_sys_snap_max_limit (this,
+ &conf->snap_max_soft_limit,
+ GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT);
+ if (ret) {
+ gf_log (this->name, GF_LOG_WARNING,
+ "Unable to retrieve system snap-max-soft-limit, "
+ "setting it to default value(%d)",
+ GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT);
+ conf->snap_max_soft_limit = GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT;
+ }
+
ret = glusterd_retrieve_op_version (this, &op_version);
if (!ret) {
if ((op_version < GD_OP_VERSION_MIN) ||
@@ -2204,9 +1835,6 @@ glusterd_restore_op_version (xlator_t *this)
" op-version to minimum : %d", GD_OP_VERSION_MIN);
conf->op_version = GD_OP_VERSION_MIN;
}
- ret = glusterd_retrieve_sys_snap_max_limit (this, &conf->snap_max_limit);
- if (ret)
- conf->snap_max_limit = GLUSTERD_SNAPS_MAX_LIMIT;
ret = 0;
out:
return ret;
@@ -2255,10 +1883,8 @@ out:
int32_t
-glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snap_volinfo)
+glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo)
{
-
int32_t ret = 0;
glusterd_brickinfo_t *brickinfo = NULL;
gf_store_iter_t *iter = NULL;
@@ -2273,34 +1899,19 @@ glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo,
char *tmpvalue = NULL;
struct pmap_registry *pmap = NULL;
gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
- glusterd_volinfo_t *tmp_volinfo = NULL;
GF_ASSERT (volinfo);
GF_ASSERT (volinfo->volname);
priv = THIS->private;
- if (snap_volinfo) {
- GLUSTERD_GET_SNAP_BRICK_DIR (brickdir, volinfo,
- snap_volinfo->volname, priv);
- } else {
- GLUSTERD_GET_BRICK_DIR (brickdir, volinfo, priv);
- }
-
- if (snap_volinfo)
- ret = gf_store_iter_new (snap_volinfo->shandle, &tmpiter);
- else
- ret = gf_store_iter_new (volinfo->shandle, &tmpiter);
+ GLUSTERD_GET_BRICK_DIR (brickdir, volinfo, priv);
+ ret = gf_store_iter_new (volinfo->shandle, &tmpiter);
if (ret)
goto out;
- if (snap_volinfo)
- tmp_volinfo = snap_volinfo;
- else
- tmp_volinfo = volinfo;
-
- while (brick_count < tmp_volinfo->brick_count) {
+ while (brick_count < volinfo->brick_count) {
ret = glusterd_brickinfo_new (&brickinfo);
if (ret)
@@ -2376,6 +1987,13 @@ glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo,
} else if (!strncmp (key, GLUSTERD_STORE_KEY_BRICK_DECOMMISSIONED,
strlen (GLUSTERD_STORE_KEY_BRICK_DECOMMISSIONED))) {
gf_string2int (value, &brickinfo->decommissioned);
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_BRICK_DEVICE_PATH,
+ strlen (GLUSTERD_STORE_KEY_BRICK_DEVICE_PATH))) {
+ strncpy (brickinfo->device_path, value,
+ sizeof (brickinfo->device_path));
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS,
+ strlen (GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS))) {
+ gf_string2int (value, &brickinfo->snap_status);
} else if (!strncmp (key,
GLUSTERD_STORE_KEY_BRICK_VGNAME,
strlen (GLUSTERD_STORE_KEY_BRICK_VGNAME))) {
@@ -2402,7 +2020,7 @@ glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo,
if (ret)
goto out;
- list_add_tail (&brickinfo->brick_list, &tmp_volinfo->bricks);
+ list_add_tail (&brickinfo->brick_list, &volinfo->bricks);
brick_count++;
}
@@ -2417,10 +2035,9 @@ out:
int32_t
-glusterd_store_retrieve_rbstate (char *volname)
+glusterd_store_retrieve_rbstate (glusterd_volinfo_t *volinfo)
{
int32_t ret = -1;
- glusterd_volinfo_t *volinfo = NULL;
gf_store_iter_t *iter = NULL;
char *key = NULL;
char *value = NULL;
@@ -2428,15 +2045,13 @@ glusterd_store_retrieve_rbstate (char *volname)
glusterd_conf_t *priv = NULL;
char path[PATH_MAX] = {0,};
gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
+ xlator_t *this = NULL;
- priv = THIS->private;
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get"
- "volinfo for %s.", volname);
- goto out;
- }
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (volinfo);
GLUSTERD_GET_VOLUME_DIR(volpath, volinfo, priv);
snprintf (path, sizeof (path), "%s/%s", volpath,
@@ -2513,16 +2128,15 @@ glusterd_store_retrieve_rbstate (char *volname)
goto out;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning with %d", ret);
return ret;
}
int32_t
-glusterd_store_retrieve_node_state (char *volname)
+glusterd_store_retrieve_node_state (glusterd_volinfo_t *volinfo)
{
int32_t ret = -1;
- glusterd_volinfo_t *volinfo = NULL;
gf_store_iter_t *iter = NULL;
char *key = NULL;
char *value = NULL;
@@ -2530,15 +2144,13 @@ glusterd_store_retrieve_node_state (char *volname)
glusterd_conf_t *priv = NULL;
char path[PATH_MAX] = {0,};
gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
+ xlator_t *this = NULL;
- priv = THIS->private;
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get"
- "volinfo for %s.", volname);
- goto out;
- }
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (volinfo);
GLUSTERD_GET_VOLUME_DIR(volpath, volinfo, priv);
snprintf (path, sizeof (path), "%s/%s", volpath,
@@ -2590,60 +2202,58 @@ glusterd_store_retrieve_node_state (char *volname)
goto out;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+ gf_log (this->name, GF_LOG_TRACE, "Returning with %d", ret);
return ret;
}
-int32_t
-glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap)
-{
- int32_t ret = -1;
- glusterd_volinfo_t *volinfo = NULL;
- gf_store_iter_t *iter = NULL;
- char *key = NULL;
- char *value = NULL;
- char volpath[PATH_MAX] = {0,};
- glusterd_conf_t *priv = NULL;
- char path[PATH_MAX] = {0,};
- int exists = 0;
- gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
- glusterd_volinfo_t *parent_vol = NULL;
- GF_ASSERT (volname);
+int
+glusterd_store_update_volinfo (glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ int exists = 0;
+ char *key = NULL;
+ char *value = NULL;
+ char volpath[PATH_MAX] = {0,};
+ char path[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_store_iter_t *iter = NULL;
+ gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
- ret = glusterd_volinfo_new (&volinfo);
- if (ret)
- goto out;
+ this = THIS;
+ GF_ASSERT (this);
+ conf = THIS->private;
+ GF_ASSERT (volinfo);
- priv = THIS->private;
+ GLUSTERD_GET_VOLUME_DIR(volpath, volinfo, conf);
- if (snap) {
- ret = glusterd_volinfo_find (volname, &parent_vol);
- if (ret)
- goto out;
- strncpy (volinfo->volname, snap->snap_name, GLUSTERD_MAX_VOLUME_NAME);
- GLUSTERD_GET_SNAP_DIR (volpath, parent_vol, snap->snap_name, priv);
- } else {
- strncpy (volinfo->volname, volname, GLUSTERD_MAX_VOLUME_NAME);
- GLUSTERD_GET_VOLUME_DIR(volpath, volinfo, priv);
- }
snprintf (path, sizeof (path), "%s/%s", volpath,
GLUSTERD_VOLUME_INFO_FILE);
ret = gf_store_handle_retrieve (path, &volinfo->shandle);
- if (ret)
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "volinfo handle is NULL");
goto out;
+ }
ret = gf_store_iter_new (volinfo->shandle, &iter);
- if (ret)
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get new store "
+ "iter");
goto out;
+ }
ret = gf_store_iter_get_next (iter, &key, &value, &op_errno);
- if (ret)
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get next store "
+ "iter");
goto out;
+ }
while (!ret) {
+ gf_log ("", GF_LOG_DEBUG, "key = %s value = %s", key, value);
if (!strncmp (key, GLUSTERD_STORE_KEY_VOL_TYPE,
strlen (GLUSTERD_STORE_KEY_VOL_TYPE))) {
volinfo->type = atoi (value);
@@ -2712,9 +2322,15 @@ glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap)
} else if (!strncmp (key, GLUSTERD_STORE_KEY_VOL_CAPS,
strlen (GLUSTERD_STORE_KEY_VOL_CAPS))) {
volinfo->caps = atoi (value);
- } else if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_MAX_LIMIT,
- strlen (GLUSTERD_STORE_KEY_SNAP_MAX_LIMIT))) {
- volinfo->snap_max_limit = (uint64_t) atoll (value);
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT,
+ strlen (GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT))) {
+ volinfo->snap_max_hard_limit = (uint64_t) atoll (value);
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_VOL_IS_RESTORED,
+ strlen (GLUSTERD_STORE_KEY_VOL_IS_RESTORED))) {
+ volinfo->is_volume_restored = atoi (value);
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_PARENT_VOLNAME,
+ strlen (GLUSTERD_STORE_KEY_PARENT_VOLNAME))) {
+ strncpy (volinfo->parent_volname, value, sizeof(volinfo->parent_volname) - 1);
} else {
if (is_key_glusterd_hooks_friendly (key)) {
@@ -2802,38 +2418,80 @@ glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap)
goto out;
ret = gf_store_iter_destroy (iter);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to destroy store "
+ "iter");
+ goto out;
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+glusterd_volinfo_t*
+glusterd_store_retrieve_volume (char *volname, glusterd_snap_t *snap)
+{
+ int32_t ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_volinfo_t *origin_volinfo = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (volname);
+
+ ret = glusterd_volinfo_new (&volinfo);
if (ret)
goto out;
+ priv = THIS->private;
+
+ strncpy (volinfo->volname, volname, GLUSTERD_MAX_VOLUME_NAME);
+ volinfo->snapshot = snap;
if (snap)
- ret = glusterd_store_retrieve_bricks (parent_vol, volinfo);
- else
- ret = glusterd_store_retrieve_bricks (volinfo, NULL);
- if (ret)
+ volinfo->is_snap_volume = _gf_true;
+
+ ret = glusterd_store_update_volinfo (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to update volinfo "
+ "for %s volume", volname);
goto out;
+ }
- if (snap)
- ret = glusterd_volume_compute_cksum (parent_vol, volinfo);
- else
- ret = glusterd_volume_compute_cksum (volinfo, NULL);
+ ret = glusterd_store_retrieve_bricks (volinfo);
if (ret)
goto out;
+ ret = glusterd_volume_compute_cksum (volinfo);
+ if (ret)
+ goto out;
if (!snap) {
list_add_tail (&volinfo->vol_list, &priv->volumes);
} else {
- // as of now snap volume are also added to the list of volume
- snap->snap_volume = volinfo;
- list_add_tail (&volinfo->vol_list, &priv->volumes);
+ ret = glusterd_volinfo_find (volinfo->parent_volname,
+ &origin_volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Parent volinfo "
+ "not found for %s volume", volname);
+ goto out;
+ }
+ glusterd_list_add_snapvol (origin_volinfo, volinfo);
}
-
-
out:
- gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+ if (ret) {
+ if (volinfo)
+ glusterd_volinfo_delete (volinfo);
+ volinfo = NULL;
+ }
- return ret;
+ gf_log (this->name, GF_LOG_TRACE, "Returning with %d", ret);
+
+ return volinfo;
}
inline void
@@ -2933,235 +2591,378 @@ out:
}
int32_t
-glusterd_store_retrieve_snap_cg (char *cg_name, glusterd_conf_t *priv)
+glusterd_store_retrieve_volumes (xlator_t *this, glusterd_snap_t *snap)
{
- int32_t ret = -1;
- gf_store_iter_t *iter = NULL;
- char *key = NULL;
- char *value = NULL;
- char cg_path[PATH_MAX] = {0,};
- char path[PATH_MAX] = {0,};
- gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
- glusterd_snap_cg_t *cg = NULL;
- uint64_t vol_count = 0;
- gf_store_handle_t *tmp_shandle = NULL;
- uint64_t count = 0;
- glusterd_volinfo_t *volinfo = NULL;
- glusterd_volinfo_t *tmpinfo = NULL;
- uuid_t vol_id = {0, };
+ int32_t ret = -1;
+ char path[PATH_MAX] = {0,};
+ glusterd_conf_t *priv = NULL;
+ DIR *dir = NULL;
+ struct dirent *entry = NULL;
+ glusterd_volinfo_t *volinfo = NULL;
+
+ GF_ASSERT (this);
+ priv = this->private;
- GF_ASSERT (cg_name);
GF_ASSERT (priv);
- glusterd_store_snap_cgfpath_set (cg_name, path, sizeof (path));
- ret = gf_store_handle_retrieve (path, &tmp_shandle);
- if (ret)
- goto out;
+ if (snap)
+ snprintf (path, PATH_MAX, "%s/snaps/%s", priv->workdir,
+ snap->snapname);
+ else
+ snprintf (path, PATH_MAX, "%s/%s", priv->workdir,
+ GLUSTERD_VOLUME_DIR_PREFIX);
- ret = gf_store_iter_new (tmp_shandle, &iter);
- if (ret)
- goto out;
- ret = gf_store_iter_get_matching (iter,
- GLUSTERD_STORE_KEY_CG_VOL_COUNT,
- &value);
- if (ret)
+ dir = opendir (path);
+
+ if (!dir) {
+ gf_log ("", GF_LOG_ERROR, "Unable to open dir %s", path);
goto out;
+ }
- vol_count = atoi (value);
+ glusterd_for_each_entry (entry, dir);
- GF_FREE (value);
- value = NULL;
+ while (entry) {
+ if ( entry->d_type != DT_DIR )
+ goto next;
- cg = glusterd_new_snap_cg_object (vol_count);
- if (!cg) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to create snap cg object");
- goto out;
+ volinfo = glusterd_store_retrieve_volume (entry->d_name, snap);
+ if (!volinfo) {
+ gf_log ("", GF_LOG_ERROR, "Unable to restore "
+ "volume: %s", entry->d_name);
+ ret = -1;
+ goto out;
+ }
+
+ ret = glusterd_store_retrieve_rbstate (volinfo);
+ if (ret) {
+ /* Backward compatibility */
+ gf_log ("", GF_LOG_INFO, "Creating a new rbstate "
+ "for volume: %s.", entry->d_name);
+ ret = glusterd_store_create_rbstate_shandle_on_absence (volinfo);
+ ret = glusterd_store_perform_rbstate_store (volinfo);
+ }
+
+ ret = glusterd_store_retrieve_node_state (volinfo);
+ if (ret) {
+ /* Backward compatibility */
+ gf_log ("", GF_LOG_INFO, "Creating a new node_state "
+ "for volume: %s.", entry->d_name);
+ glusterd_store_create_nodestate_sh_on_absence (volinfo);
+ ret = glusterd_store_perform_node_state_store (volinfo);
+
+ }
+
+next:
+ glusterd_for_each_entry (entry, dir);
}
- cg->shandle = tmp_shandle;
+ ret = 0;
+out:
+ if (dir)
+ closedir (dir);
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
+}
+
+int32_t
+glusterd_resolve_snap_bricks (xlator_t *this, glusterd_snap_t *snap)
+{
+ int32_t ret = -1;
+ glusterd_volinfo_t *volinfo = NULL;
+ glusterd_brickinfo_t *brickinfo = NULL;
+
+ GF_ASSERT (this);
+ GF_VALIDATE_OR_GOTO (this->name, snap, out);
+
+ list_for_each_entry (volinfo, &snap->volumes, vol_list) {
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ ret = glusterd_resolve_brick (brickinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "resolve brick failed in restore");
+ goto out;
+ }
+ }
+ }
+
+ ret = 0;
+
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning with %d", ret);
+
+ return ret;
+}
+
+int
+glusterd_store_update_snap (glusterd_snap_t *snap)
+{
+ int ret = -1;
+ char *key = NULL;
+ char *value = NULL;
+ char snappath[PATH_MAX] = {0,};
+ char path[PATH_MAX] = {0,};
+ xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
+ gf_store_iter_t *iter = NULL;
+ gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
- GLUSTERD_GET_SNAP_CG_DIR (cg_path, priv);
- snprintf (path, sizeof (path), "%s/%s", cg_path,
- cg_name);
- cg->volume_count = atoi (value);
+ this = THIS;
+ conf = this->private;
+ GF_ASSERT (snap);
+
+ GLUSTERD_GET_SNAP_DIR (snappath, snap, conf);
+
+ snprintf (path, sizeof (path), "%s/%s", snappath,
+ GLUSTERD_SNAP_INFO_FILE);
+
+ ret = gf_store_handle_retrieve (path, &snap->shandle);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "snap handle is NULL");
+ goto out;
+ }
+
+ ret = gf_store_iter_new (snap->shandle, &iter);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get new store "
+ "iter");
+ goto out;
+ }
ret = gf_store_iter_get_next (iter, &key, &value, &op_errno);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to get next store "
+ "iter");
+ goto out;
+ }
+
while (!ret) {
+ gf_log (this->name, GF_LOG_DEBUG, "key = %s value = %s",
+ key, value);
- if (!strncmp(key, GLUSTERD_STORE_KEY_SNAP_NAME,
- sizeof (*key))) {
- strncpy (cg->cg_name, value, sizeof (*value));
- } else if (!strncmp(key, GLUSTERD_STORE_KEY_SNAP_STATUS,
- sizeof (*key))) {
- cg->cg_status = atoi (value);
- } else if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_CG_ID,
- sizeof (*key))) {
- uuid_parse (value, cg->cg_id);
- } else if (!strncmp (key, GLUSTERD_STORE_KEY_VOL_ID,
- sizeof (*key))) {
- uuid_parse (key, vol_id);
- ret = glusterd_volinfo_find_by_volume_id (vol_id, &volinfo);
+ if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_ID,
+ strlen (GLUSTERD_STORE_KEY_SNAP_ID))) {
+ ret = uuid_parse (value, snap->snap_id);
if (ret)
- break;
- if (count < vol_count) {
- tmpinfo = &cg->volumes[count];
- tmpinfo = volinfo;
- count++;
- }
+ gf_log (this->name, GF_LOG_WARNING,
+ "Failed to parse uuid");
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_RESTORED,
+ strlen (GLUSTERD_STORE_KEY_SNAP_RESTORED))) {
+ snap->snap_restored = atoi (value);
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_STATUS,
+ strlen (GLUSTERD_STORE_KEY_SNAP_STATUS))) {
+ snap->snap_status = atoi (value);
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_DESC,
+ strlen (GLUSTERD_STORE_KEY_SNAP_DESC))) {
+ snap->description = gf_strdup (value);
+ } else if (!strncmp (key, GLUSTERD_STORE_KEY_SNAP_TIMESTAMP,
+ strlen (GLUSTERD_STORE_KEY_SNAP_TIMESTAMP))) {
+ snap->time_stamp = atoi (value);
}
+ GF_FREE (key);
GF_FREE (value);
+ key = NULL;
value = NULL;
- }
- ret = glusterd_add_snap_cg (priv, cg);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to add %s to"
- " cg_list", cg_name);
- goto out;
+ ret = gf_store_iter_get_next (iter, &key, &value, &op_errno);
}
if (op_errno != GD_STORE_EOF)
goto out;
ret = gf_store_iter_destroy (iter);
-
- if (ret)
- goto out;
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to destroy store "
+ "iter");
+ }
out:
- gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
-
return ret;
}
+
int32_t
-glusterd_store_retrieve_snap_list (char *volname)
+glusterd_store_retrieve_snap (char *snapname)
{
- int32_t ret = -1;
- glusterd_volinfo_t *volinfo = NULL;
- gf_store_iter_t *iter = NULL;
- char key[256] = {0, };
- char *value = NULL;
- char volpath[PATH_MAX] = {0,};
- glusterd_conf_t *priv = NULL;
- char path[PATH_MAX] = {0,};
- gf_store_op_errno_t op_errno = GD_STORE_SUCCESS;
- glusterd_snap_t *snap = NULL;
- uint64_t count = 0;
- gf_store_handle_t *shandle = NULL;
+ int32_t ret = -1;
+ dict_t *dict = NULL;
+ glusterd_snap_t *snap = NULL;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
- priv = THIS->private;
+ this = THIS;
+ priv = this->private;
+ GF_ASSERT (priv);
+ GF_ASSERT (snapname);
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Couldn't get"
- "volinfo for %s.", volname);
+ dict = dict_new();
+ if (!dict) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create dict");
+ ret = -1;
goto out;
}
- GLUSTERD_GET_VOLUME_DIR(volpath, volinfo, priv);
- snprintf (path, sizeof (path), "%s/%s", volpath,
- GLUSTERD_VOL_SNAP_FILE);
-
- ret = gf_store_handle_retrieve (path, &volinfo->snap_list_shandle);
- if (ret)
+ snap = glusterd_new_snap_object ();
+ if (!snap) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to create "
+ " snap object");
goto out;
+ }
- ret = gf_store_iter_new (volinfo->snap_list_shandle, &iter);
- if (ret)
+ strncpy (snap->snapname, snapname, strlen(snapname));
+ ret = glusterd_store_update_snap (snap);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to update snapshot "
+ "for %s snap", snapname);
goto out;
+ }
- ret = gf_store_iter_get_matching (iter, GLUSTERD_STORE_KEY_SNAP_COUNT,
- &value);
- if(ret)
+ ret = glusterd_store_retrieve_volumes (this, snap);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to retrieve "
+ "snap volumes for snap %s", snapname);
+ goto out;
+ }
+
+ /* Unlike bricks of normal volumes which are resolved at the end of
+ the glusterd restore, the bricks belonging to the snap volumes of
+ each snap should be resolved as part of snapshot restore itself.
+ Because if the snapshot has to be removed, then resolving bricks
+ helps glusterd in understanding what all bricks have its own uuid
+ and killing those bricks.
+ */
+ ret = glusterd_resolve_snap_bricks (this, snap);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "resolving the snap bricks"
+ " failed (snap: %s)", snap?snap->snapname:"");
+
+ /* When the snapshot command from cli is received, the on disk and
+ in memory structures for the snapshot are created (with the status)
+ being marked as GD_SNAP_STATUS_INIT. Once the backend snapshot is
+ taken, the status is changed to GD_SNAP_STATUS_IN_USE. If glusterd
+ dies after taking the backend snapshot, but before updating the
+ status, then when glusterd comes up, it should treat that snapshot
+ as a failed snapshot and clean it up.
+ */
+ if (snap->snap_status != GD_SNAP_STATUS_IN_USE) {
+ ret = glusterd_snap_remove (dict, snap, _gf_true, _gf_true);
+ if (ret)
+ gf_log (this->name, GF_LOG_WARNING, "failed to remove"
+ " the snapshot %s", snap->snapname);
goto out;
+ }
- volinfo->snap_count = atol(value);
- GF_FREE (value);
- value = NULL;
+ /* TODO: list_add_order can do 'N-square' comparisions and
+ is not efficient. Find a better solution to store the snap
+ in order */
+ list_add_order (&snap->snap_list, &priv->snapshots,
+ glusterd_compare_snap_time);
- shandle = volinfo->snap_list_shandle;
- while (count <= volinfo->snap_count) {
- snap = glusterd_new_snap_object ();
- if (!snap) {
- gf_log (THIS->name, GF_LOG_ERROR,
- "Failed to create snap object");
- goto out;
- }
-
- snprintf (key, sizeof (key), "%s-%"PRIu64,
- GLUSTERD_STORE_KEY_SNAP_NAME, count);
- ret = gf_store_retrieve_value (shandle, key, &value);
- if (ret)
- goto out;
+out:
+ if (dict)
+ dict_unref (dict);
- strncpy (snap->snap_name, value, strlen(value));
- GF_FREE (value);
- value = NULL;
+ gf_log (this->name, GF_LOG_TRACE, "Returning with %d", ret);
+ return ret;
+}
- snprintf (key, sizeof (key), "%s-%"PRIu64,
- GLUSTERD_STORE_KEY_SNAP_STATUS, count);
- ret = gf_store_retrieve_value (shandle, key,
- &value);
- if (ret)
- goto out;
+/* Read the missed_snap_list and update the in-memory structs */
+int32_t
+glusterd_store_retrieve_missed_snaps_list (xlator_t *this)
+{
+ char buf[PATH_MAX] = "";
+ char path[PATH_MAX] = "";
+ char *missed_node_info = NULL;
+ char *brick_path = NULL;
+ char *value = NULL;
+ char *save_ptr = NULL;
+ FILE *fp = NULL;
+ int32_t brick_num = -1;
+ int32_t snap_op = -1;
+ int32_t snap_status = -1;
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ gf_store_op_errno_t store_errno = GD_STORE_SUCCESS;
- snap->snap_status = atoi (value);
- GF_FREE (value);
- value = NULL;
+ GF_ASSERT (this);
+ priv = this->private;
+ GF_ASSERT (priv);
- snprintf (key, sizeof (key), "%s-%"PRIu64,
- GLUSTERD_STORE_KEY_SNAP_ID, count);
- ret = gf_store_retrieve_value (shandle, key, &value);
- if (ret)
- goto out;
+ /* Get the path of the missed_snap_list */
+ glusterd_store_missed_snaps_list_path_set (path, sizeof(path));
- uuid_parse (value, snap->snap_id);
- GF_FREE (value);
- value = NULL;
+ fp = fopen (path, "r");
+ if (!fp) {
+ /* If errno is ENOENT then there are no missed snaps yet */
+ if (errno != ENOENT) {
+ ret = -1;
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to open %s. "
+ "Error: %s", path, strerror(errno));
+ } else {
+ gf_log (this->name, GF_LOG_INFO,
+ "No missed snaps list.");
+ ret = 0;
+ }
+ goto out;
+ }
- snprintf (key, sizeof (key), "%s-%"PRIu64,
- GLUSTERD_STORE_KEY_SNAP_CG_ID, count);
- ret = gf_store_retrieve_value (shandle, key, &value);
- if (ret)
+ do {
+ ret = gf_store_read_and_tokenize (fp, buf,
+ &missed_node_info, &value,
+ &store_errno);
+ if (ret) {
+ if (store_errno == GD_STORE_EOF) {
+ gf_log (this->name,
+ GF_LOG_DEBUG,
+ "EOF for missed_snap_list");
+ ret = 0;
+ break;
+ }
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to fetch data from "
+ "missed_snaps_list. Error: %s",
+ gf_store_strerror (store_errno));
goto out;
+ }
- uuid_parse (value, snap->cg_id);
- GF_FREE (value);
- value = NULL;
+ /* Fetch the brick_num, brick_path, snap_op and snap status */
+ brick_num = atoi(strtok_r (value, ":", &save_ptr));
+ brick_path = strtok_r (NULL, ":", &save_ptr);
+ snap_op = atoi(strtok_r (NULL, ":", &save_ptr));
+ snap_status = atoi(strtok_r (NULL, ":", &save_ptr));
- ret = glusterd_add_snap (volinfo, snap);
- if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to add %s to"
- " snap_list", value);
+ if (!missed_node_info || !brick_path ||
+ brick_num < 1 || snap_op < 1 ||
+ snap_status < 1) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Invalid missed_snap_entry");
+ ret = -1;
goto out;
}
- ret = glusterd_store_retrieve_volume (volname, snap);
+
+ ret = glusterd_store_missed_snaps_list (missed_node_info,
+ brick_num,
+ brick_path,
+ snap_op,
+ snap_status);
if (ret) {
- gf_log (THIS->name, GF_LOG_ERROR, "Failed to retrieve "
- "snap %s for volume %s", snap->snap_name, volname);
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to store missed snaps_list");
goto out;
}
- count++;
- }
-
- if (op_errno != GD_STORE_EOF)
- goto out;
- ret = gf_store_iter_destroy (iter);
-
- if (ret)
- goto out;
+ } while (store_errno == GD_STORE_SUCCESS);
+ ret = 0;
out:
- gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
-
+ gf_log (this->name, GF_LOG_TRACE, "Returning with %d", ret);
return ret;
}
int32_t
-glusterd_store_retrieve_snap_cgs (xlator_t *this)
+glusterd_store_retrieve_snaps (xlator_t *this)
{
int32_t ret = 0;
char path[PATH_MAX] = {0,};
@@ -3174,106 +2975,169 @@ glusterd_store_retrieve_snap_cgs (xlator_t *this)
GF_ASSERT (priv);
- snprintf (path, PATH_MAX, "%s/%s", priv->workdir,
- GLUSTERD_VOL_SNAP_CG_DIR_PREFIX);
+ snprintf (path, PATH_MAX, "%s/snaps", priv->workdir);
dir = opendir (path);
if (!dir) {
- gf_log ("", GF_LOG_ERROR, "Unable to open dir %s", path);
- ret = -1;
+ /* If snaps dir doesn't exists ignore the error for
+ backward compatibility */
+ if (errno != ENOENT) {
+ ret = -1;
+ gf_log ("", GF_LOG_ERROR, "Unable to open dir %s", path);
+ }
goto out;
}
glusterd_for_each_entry (entry, dir);
while (entry) {
- ret = glusterd_store_retrieve_snap_cg (entry->d_name, priv);
- if (ret)
- goto out;
+ if (entry->d_type == DT_DIR) {
+ ret = glusterd_store_retrieve_snap (entry->d_name);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Unable to restore snapshot: %s",
+ entry->d_name);
+ goto out;
+ }
+ }
+
glusterd_for_each_entry (entry, dir);
}
+
+ /* Retrieve missed_snaps_list */
+ ret = glusterd_store_retrieve_missed_snaps_list (this);
+ if (ret) {
+ gf_log (this->name, GF_LOG_DEBUG,
+ "Failed to retrieve missed_snaps_list");
+ goto out;
+ }
+
out:
- return 0;
+ if (dir)
+ closedir (dir);
+ gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+
+ return ret;
}
+/* Writes all the contents of conf->missed_snap_list */
int32_t
-glusterd_store_retrieve_volumes (xlator_t *this)
+glusterd_store_write_missed_snapinfo (int32_t fd)
{
- int32_t ret = 0;
- char path[PATH_MAX] = {0,};
- glusterd_conf_t *priv = NULL;
- DIR *dir = NULL;
- struct dirent *entry = NULL;
- glusterd_volinfo_t *volinfo = NULL;
+ char value[PATH_MAX] = "";
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ glusterd_missed_snap_info *missed_snapinfo = NULL;
+ glusterd_snap_op_t *snap_opinfo = NULL;
+ xlator_t *this = NULL;
- GF_ASSERT (this);
- priv = this->private;
+ this = THIS;
+ GF_ASSERT(this);
+ priv = this->private;
GF_ASSERT (priv);
- snprintf (path, PATH_MAX, "%s/%s", priv->workdir,
- GLUSTERD_VOLUME_DIR_PREFIX);
+ /* Write the missed_snap_entry */
+ list_for_each_entry (missed_snapinfo, &priv->missed_snaps_list,
+ missed_snaps) {
+ list_for_each_entry (snap_opinfo,
+ &missed_snapinfo->snap_ops,
+ snap_ops_list) {
+ snprintf (value, sizeof(value), "%d:%s:%d:%d",
+ snap_opinfo->brick_num,
+ snap_opinfo->brick_path,
+ snap_opinfo->op, snap_opinfo->status);
+ ret = gf_store_save_value
+ (fd,
+ missed_snapinfo->node_snap_info,
+ value);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to write missed snapinfo");
+ goto out;
+ }
+ }
+ }
- dir = opendir (path);
+ ret = 0;
+out:
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
+ return ret;
+}
- if (!dir) {
- gf_log ("", GF_LOG_ERROR, "Unable to open dir %s", path);
- ret = -1;
+/* Adds the missed snap entries to the in-memory conf->missed_snap_list *
+ * and writes them to disk */
+int32_t
+glusterd_store_update_missed_snaps (dict_t *dict, int32_t missed_snap_count)
+{
+ int32_t fd = -1;
+ int32_t ret = -1;
+ glusterd_conf_t *priv = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+ GF_ASSERT(dict);
+
+ priv = this->private;
+ GF_ASSERT (priv);
+
+ if (missed_snap_count < 1) {
+ gf_log (this->name, GF_LOG_DEBUG, "No missed snaps");
+ ret = 0;
goto out;
}
- glusterd_for_each_entry (entry, dir);
+ ret = glusterd_store_create_missed_snaps_list_shandle_on_absence ();
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Unable to obtain "
+ "missed_snaps_list store handle.");
+ goto out;
+ }
- while (entry) {
- ret = glusterd_store_retrieve_volume (entry->d_name, NULL);
- if (ret) {
- gf_log ("", GF_LOG_ERROR, "Unable to restore "
- "volume: %s", entry->d_name);
- goto out;
- }
+ fd = gf_store_mkstemp (priv->missed_snaps_list_shandle);
+ if (fd <= 0) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to create tmp file");
+ ret = -1;
+ goto out;
+ }
- ret = glusterd_store_retrieve_rbstate (entry->d_name);
- if (ret) {
- /* Backward compatibility */
- gf_log ("", GF_LOG_INFO, "Creating a new rbstate "
- "for volume: %s.", entry->d_name);
- ret = glusterd_volinfo_find (entry->d_name, &volinfo);
- ret = glusterd_store_create_rbstate_shandle_on_absence (volinfo);
- ret = glusterd_store_perform_rbstate_store (volinfo);
- }
+ ret = glusterd_add_missed_snaps_to_list (dict, missed_snap_count);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to add missed snaps to list");
+ goto out;
+ }
- ret = glusterd_store_retrieve_node_state (entry->d_name);
- if (ret) {
- /* Backward compatibility */
- gf_log ("", GF_LOG_INFO, "Creating a new node_state "
- "for volume: %s.", entry->d_name);
- ret = glusterd_volinfo_find (entry->d_name, &volinfo);
- ret =
- glusterd_store_create_nodestate_sh_on_absence (volinfo);
- ret = glusterd_store_perform_node_state_store (volinfo);
+ ret = glusterd_store_write_missed_snapinfo (fd);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to write missed snaps to disk");
+ goto out;
+ }
- }
- ret = glusterd_store_retrieve_snap_list (entry->d_name);
+ ret = gf_store_rename_tmppath (priv->missed_snaps_list_shandle);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to rename the tmp file");
+ goto out;
+ }
+out:
+ if (ret && (fd > 0)) {
+ ret = gf_store_unlink_tmppath (priv->missed_snaps_list_shandle);
if (ret) {
- /* Backward compatibility */
- gf_log ("", GF_LOG_INFO, "Creating a new snap_list "
- "for volume: %s.", entry->d_name);
- ret = glusterd_volinfo_find (entry->d_name, &volinfo);
- ret =
- glusterd_store_create_snap_list_sh_on_absence (volinfo);
- ret = glusterd_store_perform_snap_store (volinfo);
- //ret = glusterd_store_perform_snap_list_store (volinfo); TODO: Fix this
-
+ gf_log (this->name, GF_LOG_ERROR,
+ "Failed to unlink the tmp file");
}
- glusterd_for_each_entry (entry, dir);
+ ret = -1;
}
-out:
- if (dir)
- closedir (dir);
- gf_log ("", GF_LOG_DEBUG, "Returning with %d", ret);
+ if (fd > 0)
+ close (fd);
+ gf_log (this->name, GF_LOG_TRACE, "Returning %d", ret);
return ret;
}
@@ -3678,11 +3542,11 @@ glusterd_restore ()
goto out;
}
- ret = glusterd_store_retrieve_volumes (this);
+ ret = glusterd_store_retrieve_volumes (this, NULL);
if (ret)
goto out;
- ret = glusterd_store_retrieve_snap_cgs (this);
+ ret = glusterd_store_retrieve_snaps (this);
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h
index 927ca8284..1b5cebc0c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.h
+++ b/xlators/mgmt/glusterd/src/glusterd-store.h
@@ -35,52 +35,55 @@ typedef enum glusterd_store_ver_ac_{
} glusterd_volinfo_ver_ac_t;
-#define GLUSTERD_STORE_UUID_KEY "UUID"
-
-#define GLUSTERD_STORE_KEY_VOL_TYPE "type"
-#define GLUSTERD_STORE_KEY_VOL_COUNT "count"
-#define GLUSTERD_STORE_KEY_VOL_STATUS "status"
-#define GLUSTERD_STORE_KEY_VOL_PORT "port"
-#define GLUSTERD_STORE_KEY_VOL_SUB_COUNT "sub_count"
-#define GLUSTERD_STORE_KEY_VOL_STRIPE_CNT "stripe_count"
-#define GLUSTERD_STORE_KEY_VOL_REPLICA_CNT "replica_count"
-#define GLUSTERD_STORE_KEY_VOL_BRICK "brick"
-#define GLUSTERD_STORE_KEY_VOL_VERSION "version"
-#define GLUSTERD_STORE_KEY_VOL_TRANSPORT "transport-type"
-#define GLUSTERD_STORE_KEY_VOL_ID "volume-id"
-#define GLUSTERD_STORE_KEY_RB_STATUS "rb_status"
-#define GLUSTERD_STORE_KEY_RB_SRC_BRICK "rb_src"
-#define GLUSTERD_STORE_KEY_RB_DST_BRICK "rb_dst"
-#define GLUSTERD_STORE_KEY_RB_DST_PORT "rb_port"
-#define GLUSTERD_STORE_KEY_VOL_DEFRAG "rebalance_status"
-#define GLUSTERD_STORE_KEY_DEFRAG_OP "rebalance_op"
-#define GLUSTERD_STORE_KEY_USERNAME "username"
-#define GLUSTERD_STORE_KEY_PASSWORD "password"
-#define GLUSTERD_STORE_KEY_VOL_OP_VERSION "op-version"
+#define GLUSTERD_STORE_UUID_KEY "UUID"
+
+#define GLUSTERD_STORE_KEY_VOL_TYPE "type"
+#define GLUSTERD_STORE_KEY_VOL_COUNT "count"
+#define GLUSTERD_STORE_KEY_VOL_STATUS "status"
+#define GLUSTERD_STORE_KEY_VOL_PORT "port"
+#define GLUSTERD_STORE_KEY_VOL_SUB_COUNT "sub_count"
+#define GLUSTERD_STORE_KEY_VOL_STRIPE_CNT "stripe_count"
+#define GLUSTERD_STORE_KEY_VOL_REPLICA_CNT "replica_count"
+#define GLUSTERD_STORE_KEY_VOL_BRICK "brick"
+#define GLUSTERD_STORE_KEY_VOL_VERSION "version"
+#define GLUSTERD_STORE_KEY_VOL_TRANSPORT "transport-type"
+#define GLUSTERD_STORE_KEY_VOL_ID "volume-id"
+#define GLUSTERD_STORE_KEY_VOL_IS_RESTORED "is-volume-restored"
+#define GLUSTERD_STORE_KEY_RB_STATUS "rb_status"
+#define GLUSTERD_STORE_KEY_RB_SRC_BRICK "rb_src"
+#define GLUSTERD_STORE_KEY_RB_DST_BRICK "rb_dst"
+#define GLUSTERD_STORE_KEY_RB_DST_PORT "rb_port"
+#define GLUSTERD_STORE_KEY_VOL_DEFRAG "rebalance_status"
+#define GLUSTERD_STORE_KEY_DEFRAG_OP "rebalance_op"
+#define GLUSTERD_STORE_KEY_USERNAME "username"
+#define GLUSTERD_STORE_KEY_PASSWORD "password"
+#define GLUSTERD_STORE_KEY_PARENT_VOLNAME "parent_volname"
+#define GLUSTERD_STORE_KEY_VOL_OP_VERSION "op-version"
#define GLUSTERD_STORE_KEY_VOL_CLIENT_OP_VERSION "client-op-version"
-#define GLUSTERD_STORE_KEY_SNAP_NAME "name"
-#define GLUSTERD_STORE_KEY_SNAP_ID "snap-id"
-#define GLUSTERD_STORE_KEY_SNAP_CG_ID "cg-id"
-#define GLUSTERD_STORE_KEY_SNAP_DESC "desc"
-#define GLUSTERD_STORE_KEY_SNAP_TIMESTAMP "time-stamp"
-#define GLUSTERD_STORE_KEY_SNAP_STATUS "status"
-#define GLUSTERD_STORE_KEY_SNAP_COUNT "count"
-#define GLUSTERD_STORE_KEY_CG_VOL_COUNT "count"
-#define GLUSTERD_STORE_KEY_SNAP_MAX_LIMIT "snap-max-limit"
-
-#define GLUSTERD_STORE_KEY_BRICK_HOSTNAME "hostname"
-#define GLUSTERD_STORE_KEY_BRICK_PATH "path"
-#define GLUSTERD_STORE_KEY_BRICK_PORT "listen-port"
-#define GLUSTERD_STORE_KEY_BRICK_RDMA_PORT "rdma.listen-port"
+#define GLUSTERD_STORE_KEY_SNAP_NAME "name"
+#define GLUSTERD_STORE_KEY_SNAP_ID "snap-id"
+#define GLUSTERD_STORE_KEY_SNAP_DESC "desc"
+#define GLUSTERD_STORE_KEY_SNAP_TIMESTAMP "time-stamp"
+#define GLUSTERD_STORE_KEY_SNAP_STATUS "status"
+#define GLUSTERD_STORE_KEY_SNAP_RESTORED "snap-restored"
+#define GLUSTERD_STORE_KEY_SNAP_MAX_HARD_LIMIT "snap-max-hard-limit"
+#define GLUSTERD_STORE_KEY_SNAP_MAX_SOFT_LIMIT "snap-max-soft-limit"
+
+#define GLUSTERD_STORE_KEY_BRICK_HOSTNAME "hostname"
+#define GLUSTERD_STORE_KEY_BRICK_PATH "path"
+#define GLUSTERD_STORE_KEY_BRICK_PORT "listen-port"
+#define GLUSTERD_STORE_KEY_BRICK_RDMA_PORT "rdma.listen-port"
#define GLUSTERD_STORE_KEY_BRICK_DECOMMISSIONED "decommissioned"
-#define GLUSTERD_STORE_KEY_BRICK_VGNAME "vg"
+#define GLUSTERD_STORE_KEY_BRICK_VGNAME "vg"
+#define GLUSTERD_STORE_KEY_BRICK_DEVICE_PATH "device_path"
+#define GLUSTERD_STORE_KEY_BRICK_SNAP_STATUS "snap-status"
-#define GLUSTERD_STORE_KEY_PEER_UUID "uuid"
-#define GLUSTERD_STORE_KEY_PEER_HOSTNAME "hostname"
-#define GLUSTERD_STORE_KEY_PEER_STATE "state"
+#define GLUSTERD_STORE_KEY_PEER_UUID "uuid"
+#define GLUSTERD_STORE_KEY_PEER_HOSTNAME "hostname"
+#define GLUSTERD_STORE_KEY_PEER_STATE "state"
-#define GLUSTERD_STORE_KEY_VOL_CAPS "caps"
+#define GLUSTERD_STORE_KEY_VOL_CAPS "caps"
#define glusterd_for_each_entry(entry, dir) \
do {\
@@ -100,8 +103,10 @@ int32_t
glusterd_store_volinfo (glusterd_volinfo_t *volinfo, glusterd_volinfo_ver_ac_t ac);
int32_t
-glusterd_store_delete_volume (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t *snapinfo);
+glusterd_store_delete_volume (glusterd_volinfo_t *volinfo);
+
+int32_t
+glusterd_store_delete_snap (glusterd_snap_t *snap);
int32_t
glusterd_retrieve_uuid ();
@@ -138,14 +143,22 @@ int32_t
glusterd_store_retrieve_options (xlator_t *this);
int32_t
+glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo);
+
+int32_t
glusterd_store_options (xlator_t *this, dict_t *opts);
void
glusterd_replace_slash_with_hyphen (char *str);
+
int32_t
glusterd_store_perform_volume_store (glusterd_volinfo_t *volinfo);
+
int32_t
-glusterd_store_perform_snap_store (glusterd_volinfo_t *volinfo);
+glusterd_store_snap (glusterd_snap_t *snap);
+
int32_t
-glusterd_store_perform_snap_list_store (glusterd_volinfo_t *volinfo);
+glusterd_store_update_missed_snaps (dict_t *dict,
+ int32_t missed_snap_count);
+
#endif
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 953b9f701..438df8266 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -59,17 +59,17 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
}
switch (op_code){
- case GLUSTERD_MGMT_V3_VOLUME_LOCK:
+ case GLUSTERD_MGMT_V3_LOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Locking volume failed "
+ "Locking failed "
"on %s. %s", peer_str, err_str);
break;
}
- case GLUSTERD_MGMT_V3_VOLUME_UNLOCK:
+ case GLUSTERD_MGMT_V3_UNLOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Unlocking volume failed "
+ "Unlocking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -162,7 +162,7 @@ gd_collate_errors (struct syncargs *args, int op_ret, int op_errno,
return;
}
-static void
+void
gd_syncargs_init (struct syncargs *args, dict_t *op_ctx)
{
args->dict = op_ctx;
@@ -266,7 +266,7 @@ extern struct rpc_clnt_program gd_mgmt_prog;
extern struct rpc_clnt_program gd_brick_prog;
extern struct rpc_clnt_program gd_mgmt_v3_prog;
-static int
+int
glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
{
int ret = 0;
@@ -333,6 +333,12 @@ glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp)
goto out;
break;
+ case GD_OP_SNAP:
+ ret = glusterd_snap_use_rsp_dict (aggr, rsp);
+ if (ret)
+ goto out;
+ break;
+
default:
break;
}
@@ -411,13 +417,13 @@ gd_syncop_mgmt_lock (glusterd_peerinfo_t *peerinfo, struct syncargs *args,
}
int32_t
-_gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -438,7 +444,7 @@ _gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0)
goto out;
@@ -448,7 +454,7 @@ _gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
+ GLUSTERD_MGMT_V3_LOCK,
peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
@@ -456,21 +462,21 @@ out:
}
int32_t
-gd_syncop_mgmt_volume_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- _gd_syncop_mgmt_volume_lock_cbk);
+ gd_syncop_mgmt_v3_lock_cbk_fn);
}
int
-gd_syncop_mgmt_volume_lock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid, uuid_t txn_id)
+gd_syncop_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid, uuid_t txn_id)
{
int ret = -1;
- gd1_mgmt_v3_vol_lock_req req = {{0},};
+ gd1_mgmt_v3_lock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -489,9 +495,9 @@ gd_syncop_mgmt_volume_lock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- gd_syncop_mgmt_volume_lock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_lock_req);
+ GLUSTERD_MGMT_V3_LOCK,
+ gd_syncop_mgmt_v3_lock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
@@ -499,13 +505,13 @@ out:
}
int32_t
-_gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -526,7 +532,7 @@ _gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0)
goto out;
@@ -539,7 +545,7 @@ _gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
+ GLUSTERD_MGMT_V3_UNLOCK,
peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
@@ -547,20 +553,20 @@ out:
}
int32_t
-gd_syncop_mgmt_volume_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_syncop_mgmt_v3_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- _gd_syncop_mgmt_volume_unlock_cbk);
+ gd_syncop_mgmt_v3_unlock_cbk_fn);
}
int
-gd_syncop_mgmt_volume_unlock (dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid, uuid_t txn_id)
+gd_syncop_mgmt_v3_unlock (dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid, uuid_t txn_id)
{
int ret = -1;
- gd1_mgmt_v3_vol_unlock_req req = {{0},};
+ gd1_mgmt_v3_unlock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -578,9 +584,9 @@ gd_syncop_mgmt_volume_unlock (dict_t *op_ctx, glusterd_peerinfo_t *peerinfo,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- gd_syncop_mgmt_volume_unlock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_unlock_req);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ gd_syncop_mgmt_v3_unlock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
@@ -876,10 +882,12 @@ gd_syncop_mgmt_brick_op (struct rpc_clnt *rpc, glusterd_pending_node_t *pnode,
GD_SYNCOP (rpc, (&args), NULL, gd_syncop_brick_op_cbk, req,
&gd_brick_prog, req->op, xdr_gd1_mgmt_brick_op_req);
- if (args.errstr && errstr)
- *errstr = args.errstr;
- else
- GF_FREE (args.errstr);
+ if (args.errstr) {
+ if ((strlen(args.errstr) > 0) && errstr)
+ *errstr = args.errstr;
+ else
+ GF_FREE (args.errstr);
+ }
if (GD_OP_STATUS_VOLUME == op) {
ret = dict_set_int32 (args.dict, "index", pnode->index);
@@ -1077,8 +1085,8 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
gd_syncop_mgmt_lock (peerinfo, &args,
MY_UUID, peer_uuid);
} else
- gd_syncop_mgmt_volume_lock (op, op_ctx, peerinfo, &args,
- MY_UUID, peer_uuid, txn_id);
+ gd_syncop_mgmt_v3_lock (op, op_ctx, peerinfo, &args,
+ MY_UUID, peer_uuid, txn_id);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1306,9 +1314,9 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int op_ret,
gd_syncop_mgmt_unlock (peerinfo, &args,
MY_UUID, tmp_uuid);
} else
- gd_syncop_mgmt_volume_unlock (op_ctx, peerinfo,
- &args, MY_UUID,
- tmp_uuid, txn_id);
+ gd_syncop_mgmt_v3_unlock (op_ctx, peerinfo,
+ &args, MY_UUID,
+ tmp_uuid, txn_id);
peer_cnt++;
list_del_init (&peerinfo->op_peers_list);
}
@@ -1323,11 +1331,13 @@ out:
glusterd_op_send_cli_response (op, op_ret, 0, req, op_ctx, op_errstr);
glusterd_op_clear_op (op);
if (is_acquired) {
- /* Based on the op-version, we release the cluster or volume lock */
+ /* Based on the op-version, we release *
+ * the cluster or mgmt_v3 lock */
if (conf->op_version < 3)
glusterd_unlock (MY_UUID);
else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID,
+ "vol");
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release lock for %s",
@@ -1454,7 +1464,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
uuid_generate (*txn_id);
ret = dict_set_bin (op_ctx, "transaction_id",
- txn_id, sizeof (uuid_t));
+ txn_id, sizeof(*txn_id));
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Failed to set transaction id.");
@@ -1489,7 +1499,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
op = tmp_op;
- /* Based on the op_version, acquire a cluster or volume lock */
+ /* Based on the op_version, acquire a cluster or mgmt_v3 lock */
if (conf->op_version < 3) {
ret = glusterd_lock (MY_UUID);
if (ret) {
@@ -1518,7 +1528,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
goto out;
}
- ret = glusterd_volume_lock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID, "vol");
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire lock for %s", volname);
@@ -1535,7 +1545,7 @@ gd_sync_task_begin (dict_t *op_ctx, rpcsvc_request_t * req)
local_locking_done:
/* Save opinfo for this transaction with the transaction id */
- txn_opinfo.op = op;
+ glusterd_txn_opinfo_init (&txn_opinfo, NULL, &op, NULL, NULL);
ret = glusterd_set_txn_opinfo (txn_id, &txn_opinfo);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.h b/xlators/mgmt/glusterd/src/glusterd-syncop.h
index 35215a78a..e83ea2f4c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.h
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.h
@@ -62,4 +62,10 @@ gd_build_peers_list (struct list_head *peers, struct list_head *xact_peers,
int
gd_brick_op_phase (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
char **op_errstr);
+
+int
+glusterd_syncop_aggr_rsp_dict (glusterd_op_t op, dict_t *aggr, dict_t *rsp);
+
+void
+gd_syncargs_init (struct syncargs *args, dict_t *op_ctx);
#endif /* __RPC_SYNCOP_H */
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index 1f5cc741b..e8ae05851 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -427,12 +427,6 @@ glusterd_volinfo_new (glusterd_volinfo_t **volinfo)
{
glusterd_volinfo_t *new_volinfo = NULL;
int32_t ret = -1;
- xlator_t *this = NULL;
- glusterd_conf_t *conf = NULL;
-
- this = THIS;
-
- conf = this->private;
GF_ASSERT (volinfo);
@@ -444,8 +438,9 @@ glusterd_volinfo_new (glusterd_volinfo_t **volinfo)
LOCK_INIT (&new_volinfo->lock);
INIT_LIST_HEAD (&new_volinfo->vol_list);
+ INIT_LIST_HEAD (&new_volinfo->snapvol_list);
INIT_LIST_HEAD (&new_volinfo->bricks);
- INIT_LIST_HEAD (&new_volinfo->snaps);
+ INIT_LIST_HEAD (&new_volinfo->snap_volumes);
new_volinfo->dict = dict_new ();
if (!new_volinfo->dict) {
@@ -461,10 +456,9 @@ glusterd_volinfo_new (glusterd_volinfo_t **volinfo)
goto out;
}
- if (conf)
- new_volinfo->snap_max_limit = conf->snap_max_limit;
- else
- new_volinfo->snap_max_limit = GLUSTERD_SNAPS_MAX_LIMIT;
+ snprintf (new_volinfo->parent_volname, GLUSTERD_MAX_VOLUME_NAME, "N/A");
+
+ new_volinfo->snap_max_hard_limit = GLUSTERD_SNAPS_MAX_HARD_LIMIT;
new_volinfo->xl = THIS;
@@ -477,16 +471,28 @@ out:
return ret;
}
+/* This function will create a new volinfo and then
+ * dup the entries from volinfo to the new_volinfo.
+ *
+ * @param volinfo volinfo which will be duplicated
+ * @param dup_volinfo new volinfo which will be created
+ * @param set_userauth if this true then auth info is also set
+ *
+ * @return 0 on success else -1
+ */
int32_t
glusterd_volinfo_dup (glusterd_volinfo_t *volinfo,
- glusterd_volinfo_t **dup_volinfo)
+ glusterd_volinfo_t **dup_volinfo,
+ gf_boolean_t set_userauth)
{
- int32_t ret = -1;
- glusterd_volinfo_t *new_volinfo = NULL;
- xlator_t *this = NULL;
+ int32_t ret = -1;
+ xlator_t *this = NULL;
+ glusterd_volinfo_t *new_volinfo = NULL;
this = THIS;
GF_ASSERT (this);
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);