summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-mgmt.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-mgmt.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-mgmt.c126
1 files changed, 62 insertions, 64 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 424bcca8a..7359169dd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -49,10 +49,10 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
}
switch (op_code){
- case GLUSTERD_MGMT_V3_VOLUME_LOCK:
+ case GLUSTERD_MGMT_V3_LOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Locking volume failed "
+ "Locking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -84,10 +84,10 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
"on %s. %s", peer_str, err_str);
break;
}
- case GLUSTERD_MGMT_V3_VOLUME_UNLOCK:
+ case GLUSTERD_MGMT_V3_UNLOCK:
{
len = snprintf (op_err, sizeof(op_err) - 1,
- "Unlocking volume failed "
+ "Unlocking failed "
"on %s. %s", peer_str, err_str);
break;
}
@@ -199,13 +199,13 @@ gd_mgmt_v3_post_validate_fn (glusterd_op_t op, dict_t *dict,
}
int32_t
-gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_lock_rsp rsp = {{0},};
+ gd1_mgmt_v3_lock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -226,7 +226,7 @@ gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_lock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_lock_rsp);
if (ret < 0)
goto out;
@@ -236,29 +236,29 @@ gd_mgmt_v3_vol_lock_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- peerinfo, rsp.uuid);
+ GLUSTERD_MGMT_V3_LOCK,
+ peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
}
int32_t
-gd_mgmt_v3_vol_lock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_lock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_vol_lock_cbk_fn);
+ gd_mgmt_v3_lock_cbk_fn);
}
int
-gd_mgmt_v3_vol_lock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_lock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
int ret = -1;
- gd1_mgmt_v3_vol_lock_req req = {{0},};
+ gd1_mgmt_v3_lock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -276,9 +276,9 @@ gd_mgmt_v3_vol_lock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_LOCK,
- gd_mgmt_v3_vol_lock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_lock_req);
+ GLUSTERD_MGMT_V3_LOCK,
+ gd_mgmt_v3_lock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_lock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
@@ -302,18 +302,18 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
this = THIS;
peers = &conf->xaction_peers;
- /* Volume(s) lock on local node */
+ /* mgmt_v3 lock on local node */
ret = dict_get_str (dict, "volname", &volname);
if (ret) {
- /* Trying to acquire volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_lock (dict, MY_UUID);
+ /* Trying to acquire multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_lock (dict, MY_UUID);
if (ret) {
gf_log ("", GF_LOG_ERROR,
- "Failed to acquire volume locks on localhost");
+ "Failed to acquire mgmt_v3 locks on localhost");
goto out;
}
} else {
- ret = glusterd_volume_lock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_lock (volname, MY_UUID);
if (ret) {
gf_log (this->name, GF_LOG_ERROR,
"Unable to acquire local lock for %s", volname);
@@ -328,13 +328,13 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
- /* Sending Volume lock req to other nodes in the cluster */
+ /* Sending mgmt_v3 lock req to other nodes in the cluster */
gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_vol_lock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_lock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1189,13 +1189,13 @@ out:
}
int32_t
-gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
int ret = -1;
struct syncargs *args = NULL;
glusterd_peerinfo_t *peerinfo = NULL;
- gd1_mgmt_v3_vol_unlock_rsp rsp = {{0},};
+ gd1_mgmt_v3_unlock_rsp rsp = {{0},};
call_frame_t *frame = NULL;
int op_ret = -1;
int op_errno = -1;
@@ -1216,7 +1216,7 @@ gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
}
ret = xdr_to_generic (*iov, &rsp,
- (xdrproc_t)xdr_gd1_mgmt_v3_vol_unlock_rsp);
+ (xdrproc_t)xdr_gd1_mgmt_v3_unlock_rsp);
if (ret < 0)
goto out;
@@ -1226,29 +1226,29 @@ gd_mgmt_v3_vol_unlock_cbk_fn (struct rpc_req *req, struct iovec *iov,
op_errno = rsp.op_errno;
out:
gd_mgmt_v3_collate_errors (args, op_ret, op_errno, NULL,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- peerinfo, rsp.uuid);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ peerinfo, rsp.uuid);
STACK_DESTROY (frame->root);
synctask_barrier_wake(args);
return 0;
}
int32_t
-gd_mgmt_v3_vol_unlock_cbk (struct rpc_req *req, struct iovec *iov,
- int count, void *myframe)
+gd_mgmt_v3_unlock_cbk (struct rpc_req *req, struct iovec *iov,
+ int count, void *myframe)
{
return glusterd_big_locked_cbk (req, iov, count, myframe,
- gd_mgmt_v3_vol_unlock_cbk_fn);
+ gd_mgmt_v3_unlock_cbk_fn);
}
int
-gd_mgmt_v3_vol_unlock (glusterd_op_t op, dict_t *op_ctx,
- glusterd_peerinfo_t *peerinfo,
- struct syncargs *args, uuid_t my_uuid,
- uuid_t recv_uuid)
+gd_mgmt_v3_unlock (glusterd_op_t op, dict_t *op_ctx,
+ glusterd_peerinfo_t *peerinfo,
+ struct syncargs *args, uuid_t my_uuid,
+ uuid_t recv_uuid)
{
int ret = -1;
- gd1_mgmt_v3_vol_unlock_req req = {{0},};
+ gd1_mgmt_v3_unlock_req req = {{0},};
glusterd_conf_t *conf = THIS->private;
GF_ASSERT(op_ctx);
@@ -1266,9 +1266,9 @@ gd_mgmt_v3_vol_unlock (glusterd_op_t op, dict_t *op_ctx,
synclock_unlock (&conf->big_lock);
ret = gd_syncop_submit_request (peerinfo->rpc, &req, args, peerinfo,
&gd_mgmt_v3_prog,
- GLUSTERD_MGMT_V3_VOLUME_UNLOCK,
- gd_mgmt_v3_vol_unlock_cbk,
- (xdrproc_t) xdr_gd1_mgmt_v3_vol_unlock_req);
+ GLUSTERD_MGMT_V3_UNLOCK,
+ gd_mgmt_v3_unlock_cbk,
+ (xdrproc_t) xdr_gd1_mgmt_v3_unlock_req);
synclock_lock (&conf->big_lock);
out:
gf_log ("", GF_LOG_TRACE, "Returning %d", ret);
@@ -1301,13 +1301,13 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_conf_t *conf, glusterd_op_t op,
goto out;
}
- /* Sending Volume unlock req to other nodes in the cluster */
+ /* Sending mgmt_v3 unlock req to other nodes in the cluster */
gd_syncargs_init (&args, NULL);
synctask_barrier_init((&args));
peer_cnt = 0;
list_for_each_entry (peerinfo, peers, op_peers_list) {
- gd_mgmt_v3_vol_unlock (op, dict, peerinfo, &args,
- MY_UUID, peer_uuid);
+ gd_mgmt_v3_unlock (op, dict, peerinfo, &args,
+ MY_UUID, peer_uuid);
peer_cnt++;
}
gd_synctask_barrier_wait((&args), peer_cnt);
@@ -1389,12 +1389,11 @@ glusterd_mgmt_v3_initiate_all_phases (rpcsvc_request_t *req, glusterd_op_t op,
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- /* LOCKDOWN PHASE - Based on the number of volumes either single
- * or multiple volume locks is acquired */
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
- npeers, &is_acquired);
+ npeers, &is_acquired);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
+ gf_log ("", GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
}
@@ -1447,13 +1446,13 @@ out:
ret = dict_get_str (tmp_dict, "volname", &volname);
if (ret) {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
if (ret)
gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks on localhost");
+ "Failed to release mgmt_v3 locks on localhost");
} else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release local lock for %s", volname);
@@ -1536,12 +1535,11 @@ glusterd_mgmt_v3_initiate_snap_phases (rpcsvc_request_t *req, glusterd_op_t op,
INIT_LIST_HEAD (&conf->xaction_peers);
npeers = gd_build_peers_list (&conf->peers, &conf->xaction_peers, op);
- /* LOCKDOWN PHASE - Based on the number of volumes either single
- * or multiple volume locks is acquired */
+ /* LOCKDOWN PHASE - Acquire mgmt_v3 locks */
ret = glusterd_mgmt_v3_initiate_lockdown (conf, op, dict, &op_errstr,
npeers, &is_acquired);
if (ret) {
- gf_log ("", GF_LOG_ERROR, "Volume lockdown failed.");
+ gf_log ("", GF_LOG_ERROR, "mgmt_v3 lockdown failed.");
goto out;
}
@@ -1642,13 +1640,13 @@ out:
ret = dict_get_str (tmp_dict, "volname", &volname);
if (ret) {
- /* Trying to release volume locks on multiple volumes */
- ret = glusterd_multiple_volumes_unlock (tmp_dict, MY_UUID);
+ /* Trying to release multiple mgmt_v3 locks */
+ ret = glusterd_multiple_mgmt_v3_unlock (tmp_dict, MY_UUID);
if (ret)
gf_log ("", GF_LOG_ERROR,
- "Failed to release volume locks on localhost");
+ "Failed to release mgmt_v3 locks on localhost");
} else {
- ret = glusterd_volume_unlock (volname, MY_UUID);
+ ret = glusterd_mgmt_v3_unlock (volname, MY_UUID);
if (ret)
gf_log (this->name, GF_LOG_ERROR,
"Unable to release local lock for %s", volname);