summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-handler.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-handler.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-handler.c1237
1 files changed, 633 insertions, 604 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index ef1df3c3788..1b21c40596d 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -9,24 +9,25 @@
*/
#include <inttypes.h>
-#include "glusterfs.h"
-#include "compat.h"
-#include "dict.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/dict.h>
#include "protocol-common.h"
-#include "xlator.h"
-#include "logging.h"
-#include "syscall.h"
-#include "timer.h"
-#include "defaults.h"
-#include "compat.h"
-#include "compat-errno.h"
-#include "statedump.h"
-#include "run.h"
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/syscall.h>
+#include <glusterfs/timer.h>
+#include <glusterfs/defaults.h>
+#include <glusterfs/compat.h>
+#include <glusterfs/compat-errno.h>
+#include <glusterfs/statedump.h>
+#include <glusterfs/run.h>
#include "glusterd-mem-types.h"
#include "glusterd.h"
#include "glusterd-sm.h"
#include "glusterd-op-sm.h"
#include "glusterd-utils.h"
+#include "glusterd-mgmt.h"
#include "glusterd-server-quorum.h"
#include "glusterd-store.h"
#include "glusterd-locks.h"
@@ -45,15 +46,11 @@
#include <sys/resource.h>
#include <inttypes.h>
-#include "common-utils.h"
+#include <glusterfs/common-utils.h>
#include "glusterd-syncop.h"
#include "glusterd-messages.h"
-#ifdef HAVE_BD_XLATOR
-#include <lvm2app.h>
-#endif
-
extern glusterd_op_info_t opinfo;
static int volcount;
@@ -94,16 +91,17 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
glusterd_friend_sm_event_t *event = NULL;
glusterd_friend_req_ctx_t *ctx = NULL;
char rhost[UNIX_PATH_MAX + 1] = {0};
- uuid_t friend_uuid = {0};
dict_t *dict = NULL;
- gf_uuid_parse(uuid_utoa(uuid), friend_uuid);
if (!port)
port = GF_DEFAULT_BASE_PORT;
ret = glusterd_remote_hostname_get(req, rhost, sizeof(rhost));
- rcu_read_lock();
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
+ dict = dict_new();
+
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, rhost);
@@ -129,8 +127,6 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
event->peername = gf_strdup(peerinfo->hostname);
gf_uuid_copy(event->peerid, peerinfo->uuid);
- ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
-
if (!ctx) {
gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
"Unable to allocate memory");
@@ -143,8 +139,8 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ctx->hostname = gf_strdup(hostname);
ctx->req = req;
- dict = dict_new();
if (!dict) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -152,9 +148,11 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = dict_unserialize(friend_req->vols.vols_val, friend_req->vols.vols_len,
&dict);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
- else
+ } else
dict->extra_stdfree = friend_req->vols.vols_val;
ctx->vols = dict;
@@ -174,7 +172,7 @@ glusterd_handle_friend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = GLUSTERD_CONNECTION_AWAITED;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
if (ctx && ctx->hostname)
@@ -207,11 +205,14 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
if (!port)
port = GF_DEFAULT_BASE_PORT;
- rcu_read_lock();
+ ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
+
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hostname);
if (peerinfo == NULL) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
"Received remove-friend from unknown peer %s", hostname);
ret = glusterd_xfer_friend_remove_resp(req, hostname, port);
@@ -222,6 +223,7 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
&event);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_NEW_GET_FAIL,
"event generation failed: %d", ret);
goto out;
@@ -232,12 +234,11 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
gf_uuid_copy(event->peerid, uuid);
- ctx = GF_CALLOC(1, sizeof(*ctx), gf_gld_mt_friend_req_ctx_t);
-
if (!ctx) {
+ RCU_READ_UNLOCK;
+ ret = -1;
gf_msg("glusterd", GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
"Unable to allocate memory");
- ret = -1;
goto out;
}
@@ -251,6 +252,7 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
ret = glusterd_friend_sm_inject_event(event);
if (ret) {
+ RCU_READ_UNLOCK;
gf_msg("glusterd", GF_LOG_ERROR, 0, GD_MSG_EVENT_INJECT_FAIL,
"Unable to inject event %d, "
"ret = %d",
@@ -258,10 +260,11 @@ glusterd_handle_unfriend_req(rpcsvc_request_t *req, uuid_t uuid, char *hostname,
goto out;
}
- ret = 0;
+ RCU_READ_UNLOCK;
+
+ return 0;
out:
- rcu_read_unlock();
if (0 != ret) {
if (ctx && ctx->hostname)
@@ -326,81 +329,6 @@ _build_option_key(dict_t *d, char *k, data_t *v, void *tmp)
}
int
-glusterd_add_tier_volume_detail_to_dict(glusterd_volinfo_t *volinfo,
- dict_t *dict, int count)
-{
- int ret = -1;
- char key[64] = {
- 0,
- };
- int keylen;
-
- GF_ASSERT(volinfo);
- GF_ASSERT(dict);
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_type", count);
- ret = dict_set_int32n(dict, key, keylen, volinfo->tier_info.cold_type);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_brick_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_dist_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_dist_leaf_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_replica_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_replica_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_arbiter_count", count);
- ret = dict_set_int32n(dict, key, keylen, volinfo->arbiter_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_disperse_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_disperse_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.cold_redundancy_count",
- count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.cold_redundancy_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_type", count);
- ret = dict_set_int32n(dict, key, keylen, volinfo->tier_info.hot_type);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_brick_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.hot_brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_replica_count", count);
- ret = dict_set_int32n(dict, key, keylen,
- volinfo->tier_info.hot_replica_count);
- if (ret)
- goto out;
-
-out:
- return ret;
-}
-
-int
glusterd_add_arbiter_info_to_bricks(glusterd_volinfo_t *volinfo,
dict_t *volumes, int count)
{
@@ -409,41 +337,18 @@ glusterd_add_arbiter_info_to_bricks(glusterd_volinfo_t *volinfo,
};
int keylen;
int i = 0;
- int start_index = 0;
int ret = 0;
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- /*TODO: Add info for hot tier once attach tier of arbiter
- * volumes is supported. */
-
- /* cold tier */
- if (volinfo->tier_info.cold_replica_count == 1 ||
- volinfo->arbiter_count != 1)
- return 0;
-
- i = start_index = volinfo->tier_info.hot_brick_count + 1;
- for (; i <= volinfo->brick_count; i++) {
- if ((i - start_index + 1) % volinfo->tier_info.cold_replica_count !=
- 0)
- continue;
- keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
- count, i);
- ret = dict_set_int32n(volumes, key, keylen, 1);
- if (ret)
- return ret;
- }
- } else {
- if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
- return 0;
- for (i = 1; i <= volinfo->brick_count; i++) {
- if (i % volinfo->replica_count != 0)
- continue;
- keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter",
- count, i);
- ret = dict_set_int32n(volumes, key, keylen, 1);
- if (ret)
- return ret;
- }
+ if (volinfo->replica_count == 1 || volinfo->arbiter_count != 1)
+ return 0;
+ for (i = 1; i <= volinfo->brick_count; i++) {
+ if (i % volinfo->replica_count != 0)
+ continue;
+ keylen = snprintf(key, sizeof(key), "volume%d.brick%d.isArbiter", count,
+ i);
+ ret = dict_set_int32n(volumes, key, keylen, 1);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -458,6 +363,7 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
};
int keylen;
glusterd_brickinfo_t *brickinfo = NULL;
+ glusterd_brickinfo_t *ta_brickinfo = NULL;
char *buf = NULL;
int i = 1;
dict_t *dict = NULL;
@@ -467,9 +373,12 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
0,
};
xlator_t *this = NULL;
- GF_UNUSED int caps = 0;
int32_t len = 0;
+ char ta_brick[4096] = {
+ 0,
+ };
+
GF_ASSERT(volinfo);
GF_ASSERT(volumes);
@@ -480,172 +389,129 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
keylen = snprintf(key, sizeof(key), "volume%d.name", count);
ret = dict_set_strn(volumes, key, keylen, volinfo->volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.type", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.status", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->status);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.brick_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->brick_count);
- if (ret)
- goto out;
-
- keylen = snprintf(key, sizeof(key), "volume%d.hot_brick_count", count);
- ret = dict_set_int32n(volumes, key, keylen,
- volinfo->tier_info.hot_brick_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
-
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_add_tier_volume_detail_to_dict(volinfo, volumes, count);
- if (ret)
- goto out;
}
keylen = snprintf(key, sizeof(key), "volume%d.dist_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->dist_leaf_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.stripe_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->stripe_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.replica_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->replica_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.disperse_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->disperse_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.redundancy_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->redundancy_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.arbiter_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->arbiter_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.transport", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->transport_type);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
+
+ keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_count", count);
+ ret = dict_set_int32n(volumes, key, keylen, volinfo->thin_arbiter_count);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
+ goto out;
+ }
volume_id_str = gf_strdup(uuid_utoa(volinfo->volume_id));
- if (!volume_id_str)
+ if (!volume_id_str) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.volume_id", count);
ret = dict_set_dynstrn(volumes, key, keylen, volume_id_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.rebalance", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->rebal.defrag_cmd);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.snap_count", count);
ret = dict_set_int32n(volumes, key, keylen, volinfo->snap_count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
-
-#ifdef HAVE_BD_XLATOR
- if (volinfo->caps) {
- caps = 0;
- keylen = snprintf(key, sizeof(key), "volume%d.xlator0", count);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- if (volinfo->caps & CAPS_BD)
- snprintf(buf, 256, "BD");
- ret = dict_set_dynstrn(volumes, key, keylen, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
-
- if (volinfo->caps & CAPS_THIN) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "thin");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_COPY) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "offload_copy");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_SNAPSHOT) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "offload_snapshot");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
-
- if (volinfo->caps & CAPS_OFFLOAD_ZERO) {
- snprintf(key, sizeof(key), "volume%d.xlator0.caps%d", count,
- caps++);
- buf = GF_MALLOC(256, gf_common_mt_char);
- if (!buf) {
- ret = ENOMEM;
- goto out;
- }
- snprintf(buf, 256, "offload_zerofill");
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret) {
- GF_FREE(buf);
- goto out;
- }
- }
}
-#endif
cds_list_for_each_entry(brickinfo, &volinfo->bricks, brick_list)
{
@@ -658,42 +524,67 @@ glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
len = snprintf(brick, sizeof(brick), "%s:%s", brickinfo->hostname,
brickinfo->path);
if ((len < 0) || (len >= sizeof(brick))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
ret = -1;
goto out;
}
buf = gf_strdup(brick);
keylen = snprintf(key, sizeof(key), "volume%d.brick%d", count, i);
ret = dict_set_dynstrn(volumes, key, keylen, buf);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "volume%d.brick%d.uuid", count, i);
snprintf(brick_uuid, sizeof(brick_uuid), "%s",
uuid_utoa(brickinfo->uuid));
buf = gf_strdup(brick_uuid);
- if (!buf)
+ if (!buf) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "brick_uuid=%s", brick_uuid, NULL);
goto out;
+ }
ret = dict_set_dynstrn(volumes, key, keylen, buf);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
-
-#ifdef HAVE_BD_XLATOR
- if (volinfo->caps & CAPS_BD) {
- snprintf(key, sizeof(key), "volume%d.vg%d", count, i);
- snprintf(brick, sizeof(brick), "%s", brickinfo->vg);
- buf = gf_strdup(brick);
- ret = dict_set_dynstr(volumes, key, buf);
- if (ret)
- goto out;
}
-#endif
+
i++;
}
+ if (volinfo->thin_arbiter_count == 1) {
+ ta_brickinfo = list_first_entry(&volinfo->ta_bricks,
+ glusterd_brickinfo_t, brick_list);
+ len = snprintf(ta_brick, sizeof(ta_brick), "%s:%s",
+ ta_brickinfo->hostname, ta_brickinfo->path);
+ if ((len < 0) || (len >= sizeof(ta_brick))) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_COPY_FAIL, NULL);
+ ret = -1;
+ goto out;
+ }
+ buf = gf_strdup(ta_brick);
+ keylen = snprintf(key, sizeof(key), "volume%d.thin_arbiter_brick",
+ count);
+ ret = dict_set_dynstrn(volumes, key, keylen, buf);
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
+ goto out;
+ }
+ }
+
ret = glusterd_add_arbiter_info_to_bricks(volinfo, volumes, count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_ARBITER_BRICK_SET_INFO_FAIL, NULL);
goto out;
+ }
dict = volinfo->dict;
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = 0;
goto out;
}
@@ -904,9 +795,9 @@ __glusterd_handle_cluster_lock(rpcsvc_request_t *req)
gf_msg_debug(this->name, 0, "Received LOCK from uuid: %s",
uuid_utoa(lock_req.uuid));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(lock_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -929,6 +820,7 @@ __glusterd_handle_cluster_lock(rpcsvc_request_t *req)
op_ctx = dict_new();
if (!op_ctx) {
+ ret = -1;
gf_msg(this->name, GF_LOG_ERROR, ENOMEM, GD_MSG_DICT_CREATE_FAIL,
"Unable to set new dict");
goto out;
@@ -955,6 +847,9 @@ out:
glusterd_friend_sm();
glusterd_op_sm();
+ if (ret)
+ GF_FREE(ctx);
+
return ret;
}
@@ -985,11 +880,14 @@ glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid,
gf_msg_debug(this->name, 0, "Received op from uuid %s", str);
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
req_ctx = GF_CALLOC(1, sizeof(*req_ctx), mem_type);
if (!req_ctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
}
@@ -997,8 +895,8 @@ glusterd_req_ctx_create(rpcsvc_request_t *rpc_req, int op, uuid_t uuid,
req_ctx->op = op;
ret = dict_unserialize(buf_val, buf_len, &dict);
if (ret) {
- gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
- "failed to unserialize the dictionary");
+ gf_smsg(this->name, GF_LOG_WARNING, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ NULL);
goto out;
}
@@ -1063,9 +961,9 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
ret = dict_get_bin(req_ctx->dict, "transaction_id", (void **)&txn_id);
gf_msg_debug(this->name, 0, "transaction ID = %s", uuid_utoa(*txn_id));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -1077,7 +975,11 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
/* In cases where there is no volname, the receivers won't have a
* transaction opinfo created, as for those operations, the locking
- * phase where the transaction opinfos are created, won't be called. */
+ * phase where the transaction opinfos are created, won't be called.
+ * skip_locking will be true for all such transaction and we clear
+ * the txn_opinfo after the staging phase, except for geo-replication
+ * operations where we need to access txn_opinfo in the later phases also.
+ */
ret = glusterd_get_txn_opinfo(txn_id, &txn_op_info);
if (ret) {
gf_msg_debug(this->name, 0, "No transaction's opinfo set");
@@ -1086,7 +988,8 @@ __glusterd_handle_stage_op(rpcsvc_request_t *req)
glusterd_txn_opinfo_init(&txn_op_info, &state, &op_req.op,
req_ctx->dict, req);
- txn_op_info.skip_locking = _gf_true;
+ if (req_ctx->op != GD_OP_GSYNC_SET)
+ txn_op_info.skip_locking = _gf_true;
ret = glusterd_set_txn_opinfo(txn_id, &txn_op_info);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_TRANS_OPINFO_SET_FAIL,
@@ -1144,9 +1047,9 @@ __glusterd_handle_commit_op(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(op_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -1267,12 +1170,12 @@ __glusterd_handle_cli_probe(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname(hostname);
ret = (peerinfo && gd_peer_has_address(peerinfo, hostname));
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg_debug("glusterd", 0,
@@ -1336,6 +1239,7 @@ __glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
glusterd_volinfo_t *tmp = NULL;
glusterd_snap_t *snapinfo = NULL;
glusterd_snap_t *tmpsnap = NULL;
+ gf_boolean_t need_free = _gf_false;
this = THIS;
GF_ASSERT(this);
@@ -1356,6 +1260,13 @@ __glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
if (cli_req.dict.dict_len) {
dict = dict_new();
+ if (dict) {
+ need_free = _gf_true;
+ } else {
+ ret = -1;
+ goto out;
+ }
+
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
&dict);
if (ret < 0) {
@@ -1451,12 +1362,17 @@ __glusterd_handle_cli_deprobe(rpcsvc_request_t *req)
&op_errno);
}
+ need_free = _gf_false;
+
out:
free(cli_req.dict.dict_val);
if (ret) {
ret = glusterd_xfer_cli_deprobe_resp(req, ret, op_errno, NULL, hostname,
dict);
+ if (need_free) {
+ dict_unref(dict);
+ }
}
glusterd_friend_sm();
@@ -1554,7 +1470,7 @@ __glusterd_handle_cli_get_volume(rpcsvc_request_t *req)
goto out;
}
- gf_msg(this->name, GF_LOG_INFO, 0, GD_MSG_GET_VOL_REQ_RCVD,
+ gf_msg(this->name, GF_LOG_DEBUG, 0, GD_MSG_GET_VOL_REQ_RCVD,
"Received get vol req");
if (cli_req.dict.dict_len) {
@@ -1756,6 +1672,8 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
if (cli_req.dict.dict_len) {
dict = dict_new();
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
ret = -1;
goto out;
}
@@ -1778,6 +1696,7 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
rsp_dict = dict_new();
if (!rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -1794,9 +1713,8 @@ __glusterd_handle_cli_uuid_get(rpcsvc_request_t *req)
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "Failed to serialize "
- "dictionary.");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
ret = 0;
@@ -1815,6 +1733,10 @@ out:
glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
+ if (rsp_dict)
+ dict_unref(rsp_dict);
+ GF_FREE(rsp.dict.dict_val);
+
return 0;
}
int
@@ -1845,8 +1767,10 @@ __glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
GF_ASSERT(priv);
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
cds_list_for_each_entry(volinfo, &priv->volumes, vol_list)
{
@@ -1858,8 +1782,11 @@ __glusterd_handle_cli_list_volume(rpcsvc_request_t *req)
}
ret = dict_set_int32n(dict, "count", SLEN("count"), count);
- if (ret)
+ if (ret) {
+ gf_smsg("glusterd", GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
@@ -1881,6 +1808,8 @@ out:
if (dict)
dict_unref(dict);
+ GF_FREE(rsp.dict.dict_val);
+
glusterd_friend_sm();
glusterd_op_sm();
@@ -1904,6 +1833,85 @@ glusterd_op_begin(rpcsvc_request_t *req, glusterd_op_t op, void *ctx,
return ret;
}
+int
+__glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
+{
+ int32_t ret = -1;
+ gf_cli_req cli_req = {{
+ 0,
+ }};
+ dict_t *dict = NULL;
+ glusterd_op_t cli_op = GD_OP_GANESHA;
+ char *op_errstr = NULL;
+ char err_str[2048] = {
+ 0,
+ };
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT(this);
+
+ GF_ASSERT(req);
+
+ ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
+ if (ret < 0) {
+ snprintf(err_str, sizeof(err_str),
+ "Failed to decode "
+ "request received from cli");
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_REQ_DECODE_FAIL, "%s",
+ err_str);
+ req->rpc_err = GARBAGE_ARGS;
+ goto out;
+ }
+
+ if (cli_req.dict.dict_len) {
+ /* Unserialize the dictionary */
+ dict = dict_new();
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
+ ret = -1;
+ goto out;
+ }
+
+ ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
+ &dict);
+ if (ret < 0) {
+ gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_UNSERIALIZE_FAIL,
+ "failed to "
+ "unserialize req-buffer to dictionary");
+ snprintf(err_str, sizeof(err_str),
+ "Unable to decode "
+ "the command");
+ goto out;
+ } else {
+ dict->extra_stdfree = cli_req.dict.dict_val;
+ }
+ }
+
+ gf_msg_trace(this->name, 0, "Received global option request");
+
+ ret = glusterd_op_begin_synctask(req, GD_OP_GANESHA, dict);
+out:
+ if (ret) {
+ if (err_str[0] == '\0')
+ snprintf(err_str, sizeof(err_str), "Operation failed");
+ ret = glusterd_op_send_cli_response(cli_op, ret, 0, req, dict, err_str);
+ }
+ if (op_errstr)
+ GF_FREE(op_errstr);
+ if (dict)
+ dict_unref(dict);
+
+ return ret;
+}
+
+int
+glusterd_handle_ganesha_cmd(rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler(req, __glusterd_handle_ganesha_cmd);
+}
+
static int
__glusterd_handle_reset_volume(rpcsvc_request_t *req)
{
@@ -2230,9 +2238,8 @@ glusterd_fsm_log_send_resp(rpcsvc_request_t *req, int op_ret, char *op_errstr,
ret = dict_allocate_and_serialize(dict, &rsp.fsm_log.fsm_log_val,
&rsp.fsm_log.fsm_log_len);
if (ret < 0) {
- gf_msg("glusterd", GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg("glusterd", GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
return ret;
}
}
@@ -2278,6 +2285,7 @@ __glusterd_handle_fsm_log(rpcsvc_request_t *req)
dict = dict_new();
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -2286,17 +2294,17 @@ __glusterd_handle_fsm_log(rpcsvc_request_t *req)
conf = this->private;
ret = glusterd_sm_tr_log_add_to_dict(dict, &conf->op_sm_log);
} else {
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname(cli_req.name);
if (!peerinfo) {
+ RCU_READ_UNLOCK;
ret = -1;
snprintf(msg, sizeof(msg), "%s is not a peer", cli_req.name);
} else {
ret = glusterd_sm_tr_log_add_to_dict(dict, &peerinfo->sm_log);
+ RCU_READ_UNLOCK;
}
-
- rcu_read_unlock();
}
out:
@@ -2440,9 +2448,9 @@ __glusterd_handle_cluster_unlock(rpcsvc_request_t *req)
gf_msg_debug(this->name, 0, "Received UNLOCK from uuid: %s",
uuid_utoa(unlock_req.uuid));
- rcu_read_lock();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid(unlock_req.uuid) == NULL);
- rcu_read_unlock();
+ RCU_READ_LOCK;
if (ret) {
gf_msg(this->name, GF_LOG_WARNING, 0, GD_MSG_PEER_NOT_FOUND,
"%s doesn't "
@@ -2504,8 +2512,8 @@ glusterd_op_stage_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
return ret;
}
@@ -2544,9 +2552,8 @@ glusterd_op_commit_send_resp(rpcsvc_request_t *req, int32_t op, int32_t status,
ret = dict_allocate_and_serialize(rsp_dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
if (ret < 0) {
- gf_msg(this->name, GF_LOG_ERROR, 0,
- GD_MSG_DICT_SERL_LENGTH_GET_FAIL,
- "failed to get serialized length of dict");
+ gf_smsg(this->name, GF_LOG_ERROR, errno,
+ GD_MSG_DICT_ALLOC_AND_SERL_LENGTH_GET_FAIL, NULL);
goto out;
}
}
@@ -2721,7 +2728,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
{0},
};
dict_t *dict = NULL;
- char key[100] = {
+ char key[32] = {
0,
};
int keylen;
@@ -2753,11 +2760,11 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
if (glusterd_peerinfo_find(friend_req.uuid, NULL) == NULL) {
ret = -1;
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg(this->name, GF_LOG_CRITICAL, 0, GD_MSG_REQ_FROM_UNKNOWN_PEER,
"Received friend update request "
@@ -2787,12 +2794,18 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
ret = dict_get_int32n(dict, "count", SLEN("count"), &count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
ret = dict_get_int32n(dict, "op", SLEN("op"), &op);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_GET_FAILED,
+ "Key=op", NULL);
goto out;
+ }
if (GD_FRIEND_UPDATE_DEL == op) {
(void)glusterd_handle_friend_update_delete(dict);
@@ -2816,7 +2829,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
snprintf(key, sizeof(key), "friend%d", i);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, NULL);
if (peerinfo == NULL) {
/* Create a new peer and add it to the list as there is
@@ -2861,7 +2874,7 @@ __glusterd_handle_friend_update(rpcsvc_request_t *req)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
break;
@@ -2964,7 +2977,7 @@ __glusterd_handle_probe_query(rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(probe_req.uuid, remote_hostname);
if ((peerinfo == NULL) && (!cds_list_empty(&conf->peers))) {
rsp.op_ret = -1;
@@ -2984,7 +2997,7 @@ __glusterd_handle_probe_query(rpcsvc_request_t *req)
rsp.op_errno = GF_PROBE_ADD_FAILED;
}
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
respond:
gf_uuid_copy(rsp.uuid, MY_UUID);
@@ -3031,10 +3044,13 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
0,
};
xlator_t *this = NULL;
+ glusterd_conf_t *conf = NULL;
GF_ASSERT(req);
this = THIS;
GF_ASSERT(this);
+ conf = this->private;
+ GF_VALIDATE_OR_GOTO(this->name, conf, out);
ret = xdr_to_generic(req->msg[0], &cli_req, (xdrproc_t)xdr_gf_cli_req);
if (ret < 0) {
@@ -3048,8 +3064,11 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
if (cli_req.dict.dict_len > 0) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len, &dict);
}
@@ -3075,12 +3094,21 @@ __glusterd_handle_cli_profile_volume(rpcsvc_request_t *req)
goto out;
}
- ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
+ if (conf->op_version < GD_OP_VERSION_6_0) {
+ gf_msg_debug(this->name, 0,
+ "The cluster is operating at "
+ "version less than %d. Falling back "
+ "to op-sm framework.",
+ GD_OP_VERSION_6_0);
+ ret = glusterd_op_begin(req, cli_op, dict, err_str, sizeof(err_str));
+ glusterd_friend_sm();
+ glusterd_op_sm();
+ } else {
+ ret = glusterd_mgmt_v3_initiate_all_phases_with_brickop_phase(
+ req, cli_op, dict);
+ }
out:
- glusterd_friend_sm();
- glusterd_op_sm();
-
free(cli_req.dict.dict_val);
if (ret) {
@@ -3267,6 +3295,7 @@ __glusterd_handle_umount(rpcsvc_request_t *req)
/* check if it is allowed to umount path */
path = gf_strdup(umnt_req.path);
if (!path) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED, NULL);
rsp.op_errno = ENOMEM;
goto out;
}
@@ -3334,25 +3363,26 @@ glusterd_friend_remove(uuid_t uuid, char *hostname)
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hostname);
if (peerinfo == NULL) {
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
goto out;
}
ret = glusterd_friend_remove_cleanup_vols(peerinfo->uuid);
+ RCU_READ_UNLOCK;
if (ret)
gf_msg(THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
"Volumes cleanup failed");
- rcu_read_unlock();
/* Giving up the critical section here as glusterd_peerinfo_cleanup must
* be called from outside a critical section
*/
ret = glusterd_peerinfo_cleanup(peerinfo);
out:
gf_msg_debug(THIS->name, 0, "returning %d", ret);
+ /* coverity[LOCK] */
return ret;
}
@@ -3369,6 +3399,7 @@ glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
GF_ASSERT(this);
GF_ASSERT(options);
+ GF_VALIDATE_OR_GOTO(this->name, rpc, out);
if (force && rpc && *rpc) {
(void)rpc_clnt_unref(*rpc);
@@ -3381,7 +3412,6 @@ glusterd_rpc_create(struct rpc_clnt **rpc, dict_t *options,
goto out;
ret = rpc_clnt_register_notify(new_rpc, notify_fn, notify_data);
- *rpc = new_rpc;
if (ret)
goto out;
ret = rpc_clnt_start(new_rpc);
@@ -3390,6 +3420,8 @@ out:
if (new_rpc) {
(void)rpc_clnt_unref(new_rpc);
}
+ } else {
+ *rpc = new_rpc;
}
gf_msg_debug(this->name, 0, "returning %d", ret);
@@ -3397,11 +3429,10 @@ out:
}
int
-glusterd_transport_inet_options_build(dict_t **options, const char *hostname,
- int port)
+glusterd_transport_inet_options_build(dict_t *dict, const char *hostname,
+ int port, char *af)
{
xlator_t *this = NULL;
- dict_t *dict = NULL;
int32_t interval = -1;
int32_t time = -1;
int32_t timeout = -1;
@@ -3409,14 +3440,14 @@ glusterd_transport_inet_options_build(dict_t **options, const char *hostname,
this = THIS;
GF_ASSERT(this);
- GF_ASSERT(options);
+ GF_ASSERT(dict);
GF_ASSERT(hostname);
if (!port)
port = GLUSTERD_DEFAULT_PORT;
/* Build default transport options */
- ret = rpc_transport_inet_options_build(&dict, hostname, port);
+ ret = rpc_transport_inet_options_build(dict, hostname, port, af);
if (ret)
goto out;
@@ -3456,7 +3487,6 @@ glusterd_transport_inet_options_build(dict_t **options, const char *hostname,
if ((interval > 0) || (time > 0))
ret = rpc_transport_keepalive_options_set(dict, interval, time,
timeout);
- *options = dict;
out:
gf_msg_debug("glusterd", 0, "Returning %d", ret);
return ret;
@@ -3470,10 +3500,19 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
int ret = -1;
glusterd_peerctx_t *peerctx = NULL;
data_t *data = NULL;
+ char *af = NULL;
peerctx = GF_CALLOC(1, sizeof(*peerctx), gf_gld_mt_peerctx_t);
- if (!peerctx)
+ if (!peerctx) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_NO_MEMORY, NULL);
goto out;
+ }
+
+ options = dict_new();
+ if (!options) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
+ goto out;
+ }
if (args)
peerctx->args = *args;
@@ -3485,8 +3524,12 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
uniquely identify a
peerinfo */
- ret = glusterd_transport_inet_options_build(&options, peerinfo->hostname,
- peerinfo->port);
+ ret = dict_get_str(this->options, "transport.address-family", &af);
+ if (ret)
+ gf_log(this->name, GF_LOG_TRACE,
+ "option transport.address-family is not set in xlator options");
+ ret = glusterd_transport_inet_options_build(options, peerinfo->hostname,
+ peerinfo->port, af);
if (ret)
goto out;
@@ -3495,6 +3538,7 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
* create our RPC endpoint with the same address that the peer would
* use to reach us.
*/
+
if (this->options) {
data = dict_getn(this->options, "transport.socket.bind-address",
SLEN("transport.socket.bind-address"));
@@ -3536,6 +3580,9 @@ glusterd_friend_rpc_create(xlator_t *this, glusterd_peerinfo_t *peerinfo,
peerctx = NULL;
ret = 0;
out:
+ if (options)
+ dict_unref(options);
+
GF_FREE(peerctx);
return ret;
}
@@ -3559,6 +3606,7 @@ glusterd_friend_add(const char *hoststr, int port,
*friend = glusterd_peerinfo_new(state, uuid, hoststr, port);
if (*friend == NULL) {
ret = -1;
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_PEER_ADD_FAIL, NULL);
goto out;
}
@@ -3657,7 +3705,7 @@ glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT(hoststr);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(NULL, hoststr);
if (peerinfo == NULL) {
@@ -3702,7 +3750,7 @@ glusterd_probe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
gf_msg_debug("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -3719,7 +3767,7 @@ glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT(hoststr);
GF_ASSERT(req);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find(uuid, hoststr);
if (peerinfo == NULL) {
@@ -3780,7 +3828,7 @@ glusterd_deprobe_begin(rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo->detaching = _gf_true;
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -4027,8 +4075,11 @@ set_deprobe_error_str(int op_ret, int op_errno, char *op_errstr, char *errstr,
case GF_DEPROBE_BRICK_EXIST:
snprintf(errstr, len,
- "Brick(s) with the peer "
- "%s exist in cluster",
+ "Peer %s hosts one or more bricks. If the peer is in "
+ "not recoverable state then use either replace-brick "
+ "or remove-brick command with force to remove all "
+ "bricks from the peer and attempt the peer detach "
+ "again.",
hostname);
break;
@@ -4133,19 +4184,21 @@ glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
};
int keylen;
- priv = THIS->private;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
+ priv = this->private;
GF_ASSERT(priv);
friends = dict_new();
if (!friends) {
- gf_msg(THIS->name, GF_LOG_ERROR, ENOMEM, GD_MSG_NO_MEMORY,
- "Out of Memory");
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
}
/* Reset ret to 0, needed to prevent failure in case no peers exist */
ret = 0;
- rcu_read_lock();
+ RCU_READ_LOCK;
if (!cds_list_empty(&priv->peers)) {
cds_list_for_each_entry_rcu(entry, &priv->peers, uuid_list)
{
@@ -4156,7 +4209,7 @@ glusterd_list_friends(rpcsvc_request_t *req, dict_t *dict, int32_t flags)
}
}
unlock:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
@@ -4165,24 +4218,36 @@ unlock:
keylen = snprintf(key, sizeof(key), "friend%d.uuid", count);
uuid_utoa_r(MY_UUID, my_uuid_str);
ret = dict_set_strn(friends, key, keylen, my_uuid_str);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.hostname", count);
ret = dict_set_nstrn(friends, key, keylen, "localhost",
SLEN("localhost"));
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
keylen = snprintf(key, sizeof(key), "friend%d.connected", count);
ret = dict_set_int32n(friends, key, keylen, 1);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=%s", key, NULL);
goto out;
+ }
}
ret = dict_set_int32n(friends, "count", SLEN("count"), count);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=count", NULL);
goto out;
+ }
ret = dict_allocate_and_serialize(friends, &rsp.friends.friends_val,
&rsp.friends.friends_len);
@@ -4354,8 +4419,11 @@ __glusterd_handle_status_volume(rpcsvc_request_t *req)
if (cli_req.dict.dict_len > 0) {
dict = dict_new();
- if (!dict)
+ if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL,
+ NULL);
goto out;
+ }
ret = dict_unserialize(cli_req.dict.dict_val, cli_req.dict.dict_len,
&dict);
if (ret < 0) {
@@ -4429,17 +4497,6 @@ __glusterd_handle_status_volume(rpcsvc_request_t *req)
goto out;
}
- if ((cmd & GF_CLI_STATUS_TIERD) &&
- (conf->op_version < GD_OP_VERSION_3_10_0)) {
- snprintf(err_str, sizeof(err_str),
- "The cluster is operating "
- "at a lesser version than %d. Getting the status of "
- "tierd is not allowed in this state",
- GD_OP_VERSION_3_6_0);
- ret = -1;
- goto out;
- }
-
if ((cmd & GF_CLI_STATUS_SCRUB) &&
(conf->op_version < GD_OP_VERSION_3_7_0)) {
snprintf(err_str, sizeof(err_str),
@@ -4634,6 +4691,7 @@ __glusterd_handle_barrier(rpcsvc_request_t *req)
dict = dict_new();
if (!dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
ret = -1;
goto out;
}
@@ -4986,6 +5044,7 @@ out:
&rsp.dict.dict_len);
glusterd_submit_reply(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp);
+ GF_FREE(rsp.dict.dict_val);
GF_FREE(key_fixed);
return ret;
}
@@ -5167,12 +5226,17 @@ glusterd_print_gsync_status_by_vol(FILE *fp, glusterd_volinfo_t *volinfo)
0,
};
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
GF_VALIDATE_OR_GOTO(THIS->name, volinfo, out);
GF_VALIDATE_OR_GOTO(THIS->name, fp, out);
gsync_rsp_dict = dict_new();
- if (!gsync_rsp_dict)
+ if (!gsync_rsp_dict) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_CREATE_FAIL, NULL);
goto out;
+ }
ret = gethostname(my_hostname, sizeof(my_hostname));
if (ret) {
@@ -5199,7 +5263,7 @@ glusterd_print_snapinfo_by_vol(FILE *fp, glusterd_volinfo_t *volinfo,
glusterd_volinfo_t *tmp_vol = NULL;
glusterd_snap_t *snapinfo = NULL;
int snapcount = 0;
- char timestr[64] = {
+ char timestr[GF_TIMESTR_SIZE] = {
0,
};
char snap_status_str[STATUS_STRLEN] = {
@@ -5312,19 +5376,30 @@ glusterd_print_client_details(FILE *fp, dict_t *dict,
brick_req->op = GLUSTERD_BRICK_STATUS;
brick_req->name = "";
+ brick_req->dict.dict_val = NULL;
+ brick_req->dict.dict_len = 0;
ret = dict_set_strn(dict, "brick-name", SLEN("brick-name"),
brickinfo->path);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=brick-name", NULL);
goto out;
+ }
ret = dict_set_int32n(dict, "cmd", SLEN("cmd"), GF_CLI_STATUS_CLIENTS);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=cmd", NULL);
goto out;
+ }
ret = dict_set_strn(dict, "volname", SLEN("volname"), volinfo->volname);
- if (ret)
+ if (ret) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_DICT_SET_FAILED,
+ "Key=volname", NULL);
goto out;
+ }
ret = dict_allocate_and_serialize(dict, &brick_req->input.input_val,
&brick_req->input.input_len);
@@ -5455,14 +5530,11 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
uint32_t get_state_cmd = 0;
uint64_t memtotal = 0;
uint64_t memfree = 0;
- int start_index = 0;
char id_str[64] = {
0,
};
char *vol_type_str = NULL;
- char *hot_tier_type_str = NULL;
- char *cold_tier_type_str = NULL;
char transport_type_str[STATUS_STRLEN] = {
0,
@@ -5476,7 +5548,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
char vol_status_str[STATUS_STRLEN] = {
0,
};
-
+ char brick_status_str[STATUS_STRLEN] = {
+ 0,
+ };
this = THIS;
GF_VALIDATE_OR_GOTO(THIS->name, this, out);
@@ -5519,7 +5593,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
ret = dict_get_strn(dict, "filename", SLEN("filename"), &tmp_str);
if (ret) {
- now = time(NULL);
+ now = gf_time();
strftime(timestamp, sizeof(timestamp), "%Y%m%d_%H%M%S",
localtime(&now));
gf_asprintf(&filename, "%s_%s", "glusterd_state", timestamp);
@@ -5530,10 +5604,9 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
gf_asprintf(&filename, "%s", tmp_str);
}
- if (odir[odirlen - 1] != '/')
- strcat(odir, "/");
+ ret = gf_asprintf(&ofilepath, "%s%s%s", odir,
+ ((odir[odirlen - 1] != '/') ? "/" : ""), filename);
- ret = gf_asprintf(&ofilepath, "%s%s", odir, filename);
if (ret < 0) {
GF_FREE(odir);
GF_FREE(filename);
@@ -5585,6 +5658,8 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
"fetch the value of all volume options "
"for volume %s",
volinfo->volname);
+ if (vol_all_opts)
+ dict_unref(vol_all_opts);
continue;
}
@@ -5609,8 +5684,8 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
if (priv->opts)
dict_foreach(priv->opts, glusterd_print_global_options, fp);
- rcu_read_lock();
fprintf(fp, "\n[Peers]\n");
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu(peerinfo, &priv->peers, uuid_list)
{
@@ -5639,7 +5714,7 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
count_bkp = 0;
fprintf(fp, "\n");
}
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
count = 0;
fprintf(fp, "\n[Volumes]\n");
@@ -5708,26 +5783,11 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
brickinfo->hostname);
/* Determine which one is the arbiter brick */
if (volinfo->arbiter_count == 1) {
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- if (volinfo->tier_info.cold_replica_count != 1) {
- start_index = volinfo->tier_info.hot_brick_count + 1;
- if (count >= start_index &&
- ((count - start_index + 1) %
- volinfo->tier_info.cold_replica_count ==
- 0)) {
- fprintf(fp,
- "Volume%d.Brick%d."
- "is_arbiter: 1\n",
- count_bkp, count);
- }
- }
- } else {
- if (count % volinfo->replica_count == 0) {
- fprintf(fp,
- "Volume%d.Brick%d."
- "is_arbiter: 1\n",
- count_bkp, count);
- }
+ if (count % volinfo->replica_count == 0) {
+ fprintf(fp,
+ "Volume%d.Brick%d."
+ "is_arbiter: 1\n",
+ count_bkp, count);
}
}
/* Add following information only for bricks
@@ -5740,27 +5800,21 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
brickinfo->rdma_port);
fprintf(fp, "Volume%d.Brick%d.port_registered: %d\n", count_bkp,
count, brickinfo->port_registered);
+ glusterd_brick_get_status_str(brickinfo, brick_status_str);
fprintf(fp, "Volume%d.Brick%d.status: %s\n", count_bkp, count,
- brickinfo->status ? "Started" : "Stopped");
-
- /*FIXME: This is a hacky way of figuring out whether a
- * brick belongs to the hot or cold tier */
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- fprintf(fp, "Volume%d.Brick%d.tier: %s\n", count_bkp, count,
- count <= volinfo->tier_info.hot_brick_count ? "Hot"
- : "Cold");
- }
+ brick_status_str);
ret = sys_statvfs(brickinfo->path, &brickstat);
if (ret) {
gf_msg(this->name, GF_LOG_ERROR, errno, GD_MSG_FILE_OP_FAILED,
"statfs error: %s ", strerror(errno));
- goto out;
+ memfree = 0;
+ memtotal = 0;
+ } else {
+ memfree = brickstat.f_bfree * brickstat.f_bsize;
+ memtotal = brickstat.f_blocks * brickstat.f_bsize;
}
- memfree = brickstat.f_bfree * brickstat.f_bsize;
- memtotal = brickstat.f_blocks * brickstat.f_bsize;
-
fprintf(fp, "Volume%d.Brick%d.spacefree: %" PRIu64 "Bytes\n",
count_bkp, count, memfree);
fprintf(fp, "Volume%d.Brick%d.spacetotal: %" PRIu64 "Bytes\n",
@@ -5826,50 +5880,10 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
GF_FREE(rebal_data);
- if (volinfo->type == GF_CLUSTER_TYPE_TIER) {
- ret = glusterd_volume_get_hot_tier_type_str(volinfo,
- &hot_tier_type_str);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get hot tier type for "
- "volume: %s",
- volinfo->volname);
- goto out;
- }
-
- ret = glusterd_volume_get_cold_tier_type_str(volinfo,
- &cold_tier_type_str);
- if (ret) {
- gf_msg(this->name, GF_LOG_ERROR, 0, GD_MSG_STATE_STR_GET_FAILED,
- "Failed to get cold tier type for "
- "volume: %s",
- volinfo->volname);
- goto out;
- }
-
- fprintf(fp, "Volume%d.tier_info.cold_tier_type: %s\n", count,
- cold_tier_type_str);
- fprintf(fp, "Volume%d.tier_info.cold_brick_count: %d\n", count,
- volinfo->tier_info.cold_brick_count);
- fprintf(fp, "Volume%d.tier_info.cold_replica_count: %d\n", count,
- volinfo->tier_info.cold_replica_count);
- fprintf(fp, "Volume%d.tier_info.cold_disperse_count: %d\n", count,
- volinfo->tier_info.cold_disperse_count);
- fprintf(fp, "Volume%d.tier_info.cold_dist_leaf_count: %d\n", count,
- volinfo->tier_info.cold_dist_leaf_count);
- fprintf(fp, "Volume%d.tier_info.cold_redundancy_count: %d\n", count,
- volinfo->tier_info.cold_redundancy_count);
- fprintf(fp, "Volume%d.tier_info.hot_tier_type: %s\n", count,
- hot_tier_type_str);
- fprintf(fp, "Volume%d.tier_info.hot_brick_count: %d\n", count,
- volinfo->tier_info.hot_brick_count);
- fprintf(fp, "Volume%d.tier_info.hot_replica_count: %d\n", count,
- volinfo->tier_info.hot_replica_count);
- fprintf(fp, "Volume%d.tier_info.promoted: %d\n", count,
- volinfo->tier_info.promoted);
- fprintf(fp, "Volume%d.tier_info.demoted: %d\n", count,
- volinfo->tier_info.demoted);
- }
+ fprintf(fp, "Volume%d.shd_svc.online_status: %s\n", count,
+ volinfo->shd.svc.online ? "Online" : "Offline");
+ fprintf(fp, "Volume%d.shd_svc.inited: %s\n", count,
+ volinfo->shd.svc.inited ? "True" : "False");
if (volinfo->rep_brick.src_brick && volinfo->rep_brick.dst_brick) {
fprintf(fp, "Volume%d.replace_brick.src: %s:%s\n", count,
@@ -5894,19 +5908,13 @@ glusterd_get_state(rpcsvc_request_t *req, dict_t *dict)
count = 0;
fprintf(fp, "\n[Services]\n");
-
- if (priv->shd_svc.inited) {
- fprintf(fp, "svc%d.name: %s\n", ++count, priv->shd_svc.name);
- fprintf(fp, "svc%d.online_status: %s\n\n", count,
- priv->shd_svc.online ? "Online" : "Offline");
- }
-
+#ifdef BUILD_GNFS
if (priv->nfs_svc.inited) {
fprintf(fp, "svc%d.name: %s\n", ++count, priv->nfs_svc.name);
fprintf(fp, "svc%d.online_status: %s\n\n", count,
priv->nfs_svc.online ? "Online" : "Offline");
}
-
+#endif
if (priv->bitd_svc.inited) {
fprintf(fp, "svc%d.name: %s\n", ++count, priv->bitd_svc.name);
fprintf(fp, "svc%d.online_status: %s\n\n", count,
@@ -5942,6 +5950,7 @@ out:
ret = dict_allocate_and_serialize(dict, &rsp.dict.dict_val,
&rsp.dict.dict_len);
glusterd_to_cli(req, &rsp, NULL, 0, NULL, (xdrproc_t)xdr_gf_cli_rsp, dict);
+ GF_FREE(rsp.dict.dict_val);
return ret;
}
@@ -6028,14 +6037,27 @@ get_brickinfo_from_brickid(char *brickid, glusterd_brickinfo_t **brickinfo)
uuid_t volid = {0};
int ret = -1;
+ xlator_t *this = THIS;
+ GF_ASSERT(this);
+
brickid_dup = gf_strdup(brickid);
- if (!brickid_dup)
+ if (!brickid_dup) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRDUP_FAILED,
+ "brick_id=%s", brickid, NULL);
goto out;
+ }
volid_str = brickid_dup;
brick = strchr(brickid_dup, ':');
- if (!volid_str || !brick)
+ if (!volid_str) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
goto out;
+ }
+
+ if (!brick) {
+ gf_smsg(this->name, GF_LOG_ERROR, errno, GD_MSG_STRCHR_FAIL, NULL);
+ goto out;
+ }
*brick = '\0';
brick++;
@@ -6253,7 +6275,7 @@ glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
GF_ASSERT(peerctx);
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
gf_msg_debug(THIS->name, 0,
@@ -6293,7 +6315,7 @@ glusterd_friend_remove_notify(glusterd_peerctx_t *peerctx, int32_t op_errno)
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -6340,7 +6362,7 @@ __glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
event, peerctx->peername);
return 0;
}
- rcu_read_lock();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation(peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -6453,7 +6475,7 @@ __glusterd_peer_rpc_notify(struct rpc_clnt *rpc, void *mydata,
}
out:
- rcu_read_unlock();
+ RCU_READ_UNLOCK;
glusterd_friend_sm();
glusterd_op_sm();
@@ -6476,20 +6498,26 @@ glusterd_null(rpcsvc_request_t *req)
return 0;
}
-rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
- [GLUSTERD_MGMT_NULL] = {"NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL, 0,
- DRC_NA},
- [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK", GLUSTERD_MGMT_CLUSTER_LOCK,
- glusterd_handle_cluster_lock, NULL, 0,
- DRC_NA},
+static rpcsvc_actor_t gd_svc_mgmt_actors[GLUSTERD_MGMT_MAXVALUE] = {
+ [GLUSTERD_MGMT_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
+ DRC_NA, 0},
+ [GLUSTERD_MGMT_CLUSTER_LOCK] = {"CLUSTER_LOCK",
+ glusterd_handle_cluster_lock, NULL,
+ GLUSTERD_MGMT_CLUSTER_LOCK, DRC_NA, 0},
[GLUSTERD_MGMT_CLUSTER_UNLOCK] = {"CLUSTER_UNLOCK",
- GLUSTERD_MGMT_CLUSTER_UNLOCK,
- glusterd_handle_cluster_unlock, NULL, 0,
- DRC_NA},
- [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", GLUSTERD_MGMT_STAGE_OP,
- glusterd_handle_stage_op, NULL, 0, DRC_NA},
- [GLUSTERD_MGMT_COMMIT_OP] = {"COMMIT_OP", GLUSTERD_MGMT_COMMIT_OP,
- glusterd_handle_commit_op, NULL, 0, DRC_NA},
+ glusterd_handle_cluster_unlock, NULL,
+ GLUSTERD_MGMT_CLUSTER_UNLOCK, DRC_NA, 0},
+ [GLUSTERD_MGMT_STAGE_OP] = {"STAGE_OP", glusterd_handle_stage_op, NULL,
+ GLUSTERD_MGMT_STAGE_OP, DRC_NA, 0},
+ [GLUSTERD_MGMT_COMMIT_OP] =
+ {
+ "COMMIT_OP",
+ glusterd_handle_commit_op,
+ NULL,
+ GLUSTERD_MGMT_COMMIT_OP,
+ DRC_NA,
+ 0,
+ },
};
struct rpcsvc_program gd_svc_mgmt_prog = {
@@ -6501,19 +6529,18 @@ struct rpcsvc_program gd_svc_mgmt_prog = {
.synctask = _gf_true,
};
-rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
- [GLUSTERD_FRIEND_NULL] = {"NULL", GLUSTERD_MGMT_NULL, glusterd_null, NULL,
- 0, DRC_NA},
- [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", GLUSTERD_PROBE_QUERY,
- glusterd_handle_probe_query, NULL, 0, DRC_NA},
- [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", GLUSTERD_FRIEND_ADD,
- glusterd_handle_incoming_friend_req, NULL, 0,
- DRC_NA},
- [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE", GLUSTERD_FRIEND_REMOVE,
- glusterd_handle_incoming_unfriend_req, NULL, 0,
- DRC_NA},
- [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", GLUSTERD_FRIEND_UPDATE,
- glusterd_handle_friend_update, NULL, 0, DRC_NA},
+static rpcsvc_actor_t gd_svc_peer_actors[GLUSTERD_FRIEND_MAXVALUE] = {
+ [GLUSTERD_FRIEND_NULL] = {"NULL", glusterd_null, NULL, GLUSTERD_MGMT_NULL,
+ DRC_NA, 0},
+ [GLUSTERD_PROBE_QUERY] = {"PROBE_QUERY", glusterd_handle_probe_query, NULL,
+ GLUSTERD_PROBE_QUERY, DRC_NA, 0},
+ [GLUSTERD_FRIEND_ADD] = {"FRIEND_ADD", glusterd_handle_incoming_friend_req,
+ NULL, GLUSTERD_FRIEND_ADD, DRC_NA, 0},
+ [GLUSTERD_FRIEND_REMOVE] = {"FRIEND_REMOVE",
+ glusterd_handle_incoming_unfriend_req, NULL,
+ GLUSTERD_FRIEND_REMOVE, DRC_NA, 0},
+ [GLUSTERD_FRIEND_UPDATE] = {"FRIEND_UPDATE", glusterd_handle_friend_update,
+ NULL, GLUSTERD_FRIEND_UPDATE, DRC_NA, 0},
};
struct rpcsvc_program gd_svc_peer_prog = {
@@ -6525,116 +6552,109 @@ struct rpcsvc_program gd_svc_peer_prog = {
.synctask = _gf_false,
};
-rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
- [GLUSTER_CLI_PROBE] = {"CLI_PROBE", GLUSTER_CLI_PROBE,
- glusterd_handle_cli_probe, NULL, 0, DRC_NA},
+static rpcsvc_actor_t gd_svc_cli_actors[GLUSTER_CLI_MAXVALUE] = {
+ [GLUSTER_CLI_PROBE] = {"CLI_PROBE", glusterd_handle_cli_probe, NULL,
+ GLUSTER_CLI_PROBE, DRC_NA, 0},
[GLUSTER_CLI_CREATE_VOLUME] = {"CLI_CREATE_VOLUME",
- GLUSTER_CLI_CREATE_VOLUME,
- glusterd_handle_create_volume, NULL, 0,
- DRC_NA},
+ glusterd_handle_create_volume, NULL,
+ GLUSTER_CLI_CREATE_VOLUME, DRC_NA, 0},
[GLUSTER_CLI_DEFRAG_VOLUME] = {"CLI_DEFRAG_VOLUME",
- GLUSTER_CLI_DEFRAG_VOLUME,
- glusterd_handle_defrag_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_DEPROBE] = {"FRIEND_REMOVE", GLUSTER_CLI_DEPROBE,
- glusterd_handle_cli_deprobe, NULL, 0, DRC_NA},
- [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS,
- glusterd_handle_cli_list_friends, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", GLUSTER_CLI_UUID_RESET,
- glusterd_handle_cli_uuid_reset, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_UUID_GET] = {"UUID_GET", GLUSTER_CLI_UUID_GET,
- glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
- [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME", GLUSTER_CLI_START_VOLUME,
- glusterd_handle_cli_start_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", GLUSTER_CLI_STOP_VOLUME,
- glusterd_handle_cli_stop_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME", GLUSTER_CLI_DELETE_VOLUME,
- glusterd_handle_cli_delete_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", GLUSTER_CLI_GET_VOLUME,
- glusterd_handle_cli_get_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", GLUSTER_CLI_ADD_BRICK,
- glusterd_handle_add_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", GLUSTER_CLI_ATTACH_TIER,
- glusterd_handle_attach_tier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK", GLUSTER_CLI_REPLACE_BRICK,
- glusterd_handle_replace_brick, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", GLUSTER_CLI_REMOVE_BRICK,
- glusterd_handle_remove_brick, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_LOG_ROTATE] = {"LOG FILENAME", GLUSTER_CLI_LOG_ROTATE,
- glusterd_handle_log_rotate, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", GLUSTER_CLI_SET_VOLUME,
- glusterd_handle_set_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", GLUSTER_CLI_SYNC_VOLUME,
- glusterd_handle_sync_volume, NULL, 0, DRC_NA},
- [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", GLUSTER_CLI_RESET_VOLUME,
- glusterd_handle_reset_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", GLUSTER_CLI_FSM_LOG,
- glusterd_handle_fsm_log, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", GLUSTER_CLI_GSYNC_SET,
- glusterd_handle_gsync_set, NULL, 0, DRC_NA},
- [GLUSTER_CLI_PROFILE_VOLUME] = {"STATS_VOLUME", GLUSTER_CLI_PROFILE_VOLUME,
- glusterd_handle_cli_profile_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_QUOTA] = {"QUOTA", GLUSTER_CLI_QUOTA, glusterd_handle_quota,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_GETWD] = {"GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME,
- glusterd_handle_status_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_MOUNT] = {"MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_UMOUNT] = {"UMOUNT", GLUSTER_CLI_UMOUNT,
- glusterd_handle_umount, NULL, 1, DRC_NA},
- [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", GLUSTER_CLI_HEAL_VOLUME,
- glusterd_handle_cli_heal_volume, NULL, 0,
- DRC_NA},
+ glusterd_handle_defrag_volume, NULL,
+ GLUSTER_CLI_DEFRAG_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_DEPROBE] = {"FRIEND_REMOVE", glusterd_handle_cli_deprobe, NULL,
+ GLUSTER_CLI_DEPROBE, DRC_NA, 0},
+ [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
+ glusterd_handle_cli_list_friends, NULL,
+ GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
+ [GLUSTER_CLI_UUID_RESET] = {"UUID_RESET", glusterd_handle_cli_uuid_reset,
+ NULL, GLUSTER_CLI_UUID_RESET, DRC_NA, 0},
+ [GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
+ GLUSTER_CLI_UUID_GET, DRC_NA, 0},
+ [GLUSTER_CLI_START_VOLUME] = {"START_VOLUME",
+ glusterd_handle_cli_start_volume, NULL,
+ GLUSTER_CLI_START_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_STOP_VOLUME] = {"STOP_VOLUME", glusterd_handle_cli_stop_volume,
+ NULL, GLUSTER_CLI_STOP_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_DELETE_VOLUME] = {"DELETE_VOLUME",
+ glusterd_handle_cli_delete_volume, NULL,
+ GLUSTER_CLI_DELETE_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
+ NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_ADD_BRICK] = {"ADD_BRICK", glusterd_handle_add_brick, NULL,
+ GLUSTER_CLI_ADD_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_ATTACH_TIER] = {"ATTACH_TIER", glusterd_handle_attach_tier,
+ NULL, GLUSTER_CLI_ATTACH_TIER, DRC_NA, 0},
+ [GLUSTER_CLI_REPLACE_BRICK] = {"REPLACE_BRICK",
+ glusterd_handle_replace_brick, NULL,
+ GLUSTER_CLI_REPLACE_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_REMOVE_BRICK] = {"REMOVE_BRICK", glusterd_handle_remove_brick,
+ NULL, GLUSTER_CLI_REMOVE_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_LOG_ROTATE] = {"LOG FILENAME", glusterd_handle_log_rotate,
+ NULL, GLUSTER_CLI_LOG_ROTATE, DRC_NA, 0},
+ [GLUSTER_CLI_SET_VOLUME] = {"SET_VOLUME", glusterd_handle_set_volume, NULL,
+ GLUSTER_CLI_SET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_SYNC_VOLUME] = {"SYNC_VOLUME", glusterd_handle_sync_volume,
+ NULL, GLUSTER_CLI_SYNC_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_RESET_VOLUME] = {"RESET_VOLUME", glusterd_handle_reset_volume,
+ NULL, GLUSTER_CLI_RESET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_FSM_LOG] = {"FSM_LOG", glusterd_handle_fsm_log, NULL,
+ GLUSTER_CLI_FSM_LOG, DRC_NA, 0},
+ [GLUSTER_CLI_GSYNC_SET] = {"GSYNC_SET", glusterd_handle_gsync_set, NULL,
+ GLUSTER_CLI_GSYNC_SET, DRC_NA, 0},
+ [GLUSTER_CLI_PROFILE_VOLUME] = {"STATS_VOLUME",
+ glusterd_handle_cli_profile_volume, NULL,
+ GLUSTER_CLI_PROFILE_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_QUOTA] = {"QUOTA", glusterd_handle_quota, NULL,
+ GLUSTER_CLI_QUOTA, DRC_NA, 0},
+ [GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
+ GLUSTER_CLI_GETWD, DRC_NA, 1},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
+ glusterd_handle_status_volume, NULL,
+ GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
+ GLUSTER_CLI_MOUNT, DRC_NA, 1},
+ [GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
+ GLUSTER_CLI_UMOUNT, DRC_NA, 1},
+ [GLUSTER_CLI_HEAL_VOLUME] = {"HEAL_VOLUME", glusterd_handle_cli_heal_volume,
+ NULL, GLUSTER_CLI_HEAL_VOLUME, DRC_NA, 0},
[GLUSTER_CLI_STATEDUMP_VOLUME] = {"STATEDUMP_VOLUME",
- GLUSTER_CLI_STATEDUMP_VOLUME,
glusterd_handle_cli_statedump_volume,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME,
- glusterd_handle_cli_list_volume, NULL, 0,
- DRC_NA},
+ NULL, GLUSTER_CLI_STATEDUMP_VOLUME,
+ DRC_NA, 0},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
+ NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
[GLUSTER_CLI_CLRLOCKS_VOLUME] = {"CLEARLOCKS_VOLUME",
- GLUSTER_CLI_CLRLOCKS_VOLUME,
glusterd_handle_cli_clearlocks_volume,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", GLUSTER_CLI_COPY_FILE,
- glusterd_handle_copy_file, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", GLUSTER_CLI_SYS_EXEC,
- glusterd_handle_sys_exec, NULL, 0, DRC_NA},
- [GLUSTER_CLI_SNAP] = {"SNAP", GLUSTER_CLI_SNAP, glusterd_handle_snapshot,
- NULL, 0, DRC_NA},
- [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME",
- GLUSTER_CLI_BARRIER_VOLUME,
- glusterd_handle_barrier, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", GLUSTER_CLI_GET_VOL_OPT,
- glusterd_handle_get_vol_opt, NULL, 0, DRC_NA},
- [GLUSTER_CLI_BITROT] = {"BITROT", GLUSTER_CLI_BITROT,
- glusterd_handle_bitrot, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_STATE] = {"GET_STATE", GLUSTER_CLI_GET_STATE,
- glusterd_handle_get_state, NULL, 0, DRC_NA},
- [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", GLUSTER_CLI_RESET_BRICK,
- glusterd_handle_reset_brick, NULL, 0, DRC_NA},
- [GLUSTER_CLI_TIER] = {"TIER", GLUSTER_CLI_TIER, glusterd_handle_tier, NULL,
- 0, DRC_NA},
+ NULL, GLUSTER_CLI_CLRLOCKS_VOLUME, DRC_NA,
+ 0},
+ [GLUSTER_CLI_COPY_FILE] = {"COPY_FILE", glusterd_handle_copy_file, NULL,
+ GLUSTER_CLI_COPY_FILE, DRC_NA, 0},
+ [GLUSTER_CLI_SYS_EXEC] = {"SYS_EXEC", glusterd_handle_sys_exec, NULL,
+ GLUSTER_CLI_SYS_EXEC, DRC_NA, 0},
+ [GLUSTER_CLI_SNAP] = {"SNAP", glusterd_handle_snapshot, NULL,
+ GLUSTER_CLI_SNAP, DRC_NA, 0},
+ [GLUSTER_CLI_BARRIER_VOLUME] = {"BARRIER_VOLUME", glusterd_handle_barrier,
+ NULL, GLUSTER_CLI_BARRIER_VOLUME, DRC_NA,
+ 0},
+ [GLUSTER_CLI_GANESHA] = {"GANESHA", glusterd_handle_ganesha_cmd, NULL,
+ GLUSTER_CLI_GANESHA, DRC_NA, 0},
+ [GLUSTER_CLI_GET_VOL_OPT] = {"GET_VOL_OPT", glusterd_handle_get_vol_opt,
+ NULL, DRC_NA, 0},
+ [GLUSTER_CLI_BITROT] = {"BITROT", glusterd_handle_bitrot, NULL,
+ GLUSTER_CLI_BITROT, DRC_NA, 0},
+ [GLUSTER_CLI_GET_STATE] = {"GET_STATE", glusterd_handle_get_state, NULL,
+ GLUSTER_CLI_GET_STATE, DRC_NA, 0},
+ [GLUSTER_CLI_RESET_BRICK] = {"RESET_BRICK", glusterd_handle_reset_brick,
+ NULL, GLUSTER_CLI_RESET_BRICK, DRC_NA, 0},
+ [GLUSTER_CLI_TIER] = {"TIER", glusterd_handle_tier, NULL, GLUSTER_CLI_TIER,
+ DRC_NA, 0},
[GLUSTER_CLI_REMOVE_TIER_BRICK] = {"REMOVE_TIER_BRICK",
- GLUSTER_CLI_REMOVE_TIER_BRICK,
- glusterd_handle_tier, NULL, 0, DRC_NA},
+ glusterd_handle_tier, NULL,
+ GLUSTER_CLI_REMOVE_TIER_BRICK, DRC_NA,
+ 0},
[GLUSTER_CLI_ADD_TIER_BRICK] = {"ADD_TIER_BRICK",
- GLUSTER_CLI_ADD_TIER_BRICK,
- glusterd_handle_add_tier_brick, NULL, 0,
- DRC_NA},
+ glusterd_handle_add_tier_brick, NULL,
+ GLUSTER_CLI_ADD_TIER_BRICK, DRC_NA, 0},
};
struct rpcsvc_program gd_svc_cli_prog = {
@@ -6651,27 +6671,25 @@ struct rpcsvc_program gd_svc_cli_prog = {
* read only queries, the only exception being MOUNT/UMOUNT which is required
* by geo-replication to support unprivileged master -> slave sessions.
*/
-rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
- [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS", GLUSTER_CLI_LIST_FRIENDS,
- glusterd_handle_cli_list_friends, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_UUID_GET] = {"UUID_GET", GLUSTER_CLI_UUID_GET,
- glusterd_handle_cli_uuid_get, NULL, 0, DRC_NA},
- [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", GLUSTER_CLI_GET_VOLUME,
- glusterd_handle_cli_get_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_GETWD] = {"GETWD", GLUSTER_CLI_GETWD, glusterd_handle_getwd,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME", GLUSTER_CLI_STATUS_VOLUME,
- glusterd_handle_status_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", GLUSTER_CLI_LIST_VOLUME,
- glusterd_handle_cli_list_volume, NULL, 0,
- DRC_NA},
- [GLUSTER_CLI_MOUNT] = {"MOUNT", GLUSTER_CLI_MOUNT, glusterd_handle_mount,
- NULL, 1, DRC_NA},
- [GLUSTER_CLI_UMOUNT] = {"UMOUNT", GLUSTER_CLI_UMOUNT,
- glusterd_handle_umount, NULL, 1, DRC_NA},
+static rpcsvc_actor_t gd_svc_cli_trusted_actors[GLUSTER_CLI_MAXVALUE] = {
+ [GLUSTER_CLI_LIST_FRIENDS] = {"LIST_FRIENDS",
+ glusterd_handle_cli_list_friends, NULL,
+ GLUSTER_CLI_LIST_FRIENDS, DRC_NA, 0},
+ [GLUSTER_CLI_UUID_GET] = {"UUID_GET", glusterd_handle_cli_uuid_get, NULL,
+ GLUSTER_CLI_UUID_GET, DRC_NA, 0},
+ [GLUSTER_CLI_GET_VOLUME] = {"GET_VOLUME", glusterd_handle_cli_get_volume,
+ NULL, GLUSTER_CLI_GET_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_GETWD] = {"GETWD", glusterd_handle_getwd, NULL,
+ GLUSTER_CLI_GETWD, DRC_NA, 1},
+ [GLUSTER_CLI_STATUS_VOLUME] = {"STATUS_VOLUME",
+ glusterd_handle_status_volume, NULL,
+ GLUSTER_CLI_STATUS_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_LIST_VOLUME] = {"LIST_VOLUME", glusterd_handle_cli_list_volume,
+ NULL, GLUSTER_CLI_LIST_VOLUME, DRC_NA, 0},
+ [GLUSTER_CLI_MOUNT] = {"MOUNT", glusterd_handle_mount, NULL,
+ GLUSTER_CLI_MOUNT, DRC_NA, 1},
+ [GLUSTER_CLI_UMOUNT] = {"UMOUNT", glusterd_handle_umount, NULL,
+ GLUSTER_CLI_UMOUNT, DRC_NA, 1},
};
struct rpcsvc_program gd_svc_cli_trusted_progs = {
@@ -6682,3 +6700,14 @@ struct rpcsvc_program gd_svc_cli_trusted_progs = {
.actors = gd_svc_cli_trusted_actors,
.synctask = _gf_true,
};
+
+/* As we cant remove the handlers, I'm moving the tier based
+ * handlers to this file as we no longer have gluster-tier.c
+ * and other tier.c files
+ */
+
+int
+glusterd_handle_tier(rpcsvc_request_t *req)
+{
+ return 0;
+}