summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd.h
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd.h')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd.h507
1 files changed, 193 insertions, 314 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index b0a7d9a448d..cc4f98ecf47 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -15,30 +15,32 @@
#include <pthread.h>
#include <libgen.h>
-#include "compat-uuid.h"
+#include <glusterfs/compat-uuid.h>
#include "rpc-clnt.h"
-#include "glusterfs.h"
-#include "xlator.h"
-#include "logging.h"
-#include "call-stub.h"
-#include "byte-order.h"
+#include <glusterfs/glusterfs.h>
+#include <glusterfs/xlator.h>
+#include <glusterfs/logging.h>
+#include <glusterfs/call-stub.h>
+#include <glusterfs/byte-order.h>
#include "glusterd-mem-types.h"
#include "rpcsvc.h"
#include "glusterd-sm.h"
#include "glusterd-snapd-svc.h"
-#include "glusterd-tierd-svc.h"
+#include "glusterd-shd-svc.h"
#include "glusterd-bitd-svc.h"
#include "glusterd1-xdr.h"
#include "protocol-common.h"
#include "glusterd-pmap.h"
#include "cli1-xdr.h"
-#include "syncop.h"
-#include "store.h"
+#include <glusterfs/syncop.h>
+#include <glusterfs/store.h>
#include "glusterd-rcu.h"
-#include "events.h"
+#include <glusterfs/events.h>
#include "glusterd-gfproxyd-svc.h"
+#include "gd-common-utils.h"
+
#define GLUSTERD_TR_LOG_SIZE 50
#define GLUSTERD_QUORUM_TYPE_KEY "cluster.server-quorum-type"
#define GLUSTERD_QUORUM_RATIO_KEY "cluster.server-quorum-ratio"
@@ -56,10 +58,16 @@
#define GLUSTER_SHARED_STORAGE "gluster_shared_storage"
#define GLUSTERD_SHARED_STORAGE_KEY "cluster.enable-shared-storage"
#define GLUSTERD_BRICK_MULTIPLEX_KEY "cluster.brick-multiplex"
+#define GLUSTERD_VOL_CNT_PER_THRD "glusterd.vol_count_per_thread"
#define GLUSTERD_BRICKMUX_LIMIT_KEY "cluster.max-bricks-per-process"
+#define GLUSTERD_BRICKMUX_LIMIT_DFLT_VALUE "250"
+#define GLUSTERD_VOL_CNT_PER_THRD_DEFAULT_VALUE "100"
#define GLUSTERD_LOCALTIME_LOGGING_KEY "cluster.localtime-logging"
#define GLUSTERD_DAEMON_LOG_LEVEL_KEY "cluster.daemon-log-level"
+#define GANESHA_HA_CONF CONFDIR "/ganesha-ha.conf"
+#define GANESHA_EXPORT_DIRECTORY CONFDIR "/exports"
+
#define GLUSTERD_SNAPS_MAX_HARD_LIMIT 256
#define GLUSTERD_SNAPS_DEF_SOFT_LIMIT_PERCENT 90
#define GLUSTERD_SNAPS_MAX_SOFT_LIMIT_PERCENT 100
@@ -160,13 +168,8 @@ typedef struct {
struct _volfile_ctx *volfile;
pthread_mutex_t mutex;
struct cds_list_head peers;
- gf_boolean_t verify_volfile_checksum;
- gf_boolean_t trace;
uuid_t uuid;
- char workdir[VALID_GLUSTERD_PATHMAX];
- char rundir[VALID_GLUSTERD_PATHMAX];
rpcsvc_t *rpc;
- glusterd_svc_t shd_svc;
glusterd_svc_t nfs_svc;
glusterd_svc_t bitd_svc;
glusterd_svc_t scrub_svc;
@@ -175,6 +178,7 @@ typedef struct {
struct cds_list_head volumes;
struct cds_list_head snapshots; /*List of snap volumes */
struct cds_list_head brick_procs; /* List of brick processes */
+ struct cds_list_head shd_procs; /* List of shd processes */
pthread_mutex_t xprt_lock;
struct list_head xprt_list;
pthread_mutex_t import_volumes;
@@ -196,13 +200,18 @@ typedef struct {
pthread_t brick_thread;
void *hooks_priv;
+ xlator_t *xl; /* Should be set to 'THIS' before creating thread */
/* need for proper handshake_t */
int op_version; /* Starts with 1 for 3.3.0 */
- xlator_t *xl; /* Should be set to 'THIS' before creating thread */
gf_boolean_t pending_quorum_action;
+ gf_boolean_t verify_volfile_checksum;
+ gf_boolean_t trace;
+ gf_boolean_t restart_done;
dict_t *opts;
synclock_t big_lock;
- gf_boolean_t restart_done;
+ synccond_t cond_restart_bricks;
+ synccond_t cond_restart_shd;
+ synccond_t cond_blockers;
rpcsvc_t *uds_rpc; /* RPCSVC for the unix domain socket */
uint32_t base_port;
uint32_t max_port;
@@ -212,11 +221,35 @@ typedef struct {
int ping_timeout;
uint32_t generation;
int32_t workers;
- gf_atomic_t blockers;
uint32_t mgmt_v3_lock_timeout;
+ gf_atomic_t blockers;
+ pthread_mutex_t attach_lock; /* Lock can be per process or a common one */
+ pthread_mutex_t volume_lock; /* We release the big_lock from lot of places
+ which might lead the modification of volinfo
+ list.
+ */
+ gf_atomic_t thread_count;
gf_boolean_t restart_bricks;
+ gf_boolean_t restart_shd; /* This flag prevents running two shd manager
+ simultaneously
+ */
+ char workdir[VALID_GLUSTERD_PATHMAX];
+ char rundir[VALID_GLUSTERD_PATHMAX];
+ char logdir[VALID_GLUSTERD_PATHMAX];
} glusterd_conf_t;
+typedef struct glusterd_add_dict_args {
+ xlator_t *this;
+ dict_t *voldict;
+ int start;
+ int end;
+} glusterd_add_dict_args_t;
+
+typedef struct glusterd_friend_synctask_args {
+ char *dict_buf;
+ u_int dictlen;
+} glusterd_friend_synctask_args_t;
+
typedef enum gf_brick_status {
GF_BRICK_STOPPED,
GF_BRICK_STARTED,
@@ -236,25 +269,15 @@ struct glusterd_brick_proc {
typedef struct glusterd_brick_proc glusterd_brick_proc_t;
struct glusterd_brickinfo {
- char hostname[NAME_MAX];
- char path[VALID_GLUSTERD_PATHMAX];
- char real_path[VALID_GLUSTERD_PATHMAX];
- char device_path[VALID_GLUSTERD_PATHMAX];
- char mount_dir[VALID_GLUSTERD_PATHMAX];
- char brick_id[1024]; /*Client xlator name, AFR changelog name*/
- char fstype[NAME_MAX]; /* Brick file-system type */
- char mnt_opts[1024]; /* Brick mount options */
struct cds_list_head brick_list;
uuid_t uuid;
int port;
int rdma_port;
char *logfile;
gf_store_handle_t *shandle;
- gf_brick_status_t status;
struct rpc_clnt *rpc;
int decommissioned;
- char vg[PATH_MAX]; /* FIXME: Use max size for length of vg */
- int caps; /* Capability */
+ gf_brick_status_t status;
int32_t snap_status;
/*
* The group is used to identify which bricks are part of the same
@@ -264,22 +287,30 @@ struct glusterd_brickinfo {
* a replica 3 volume with arbiter enabled.
*/
uint16_t group;
- uuid_t jbr_uuid;
+ gf_boolean_t port_registered;
+ gf_boolean_t start_triggered;
/* Below are used for handling the case of multiple bricks sharing
the backend filesystem */
uint64_t statfs_fsid;
- uint32_t fs_share_count;
- gf_boolean_t port_registered;
- gf_boolean_t start_triggered;
pthread_mutex_t restart_mutex;
glusterd_brick_proc_t *brick_proc; /* Information regarding mux bricks */
struct cds_list_head mux_bricks; /* List to store the bricks in brick_proc*/
+ uint32_t fs_share_count;
+ char hostname[NAME_MAX];
+ char path[VALID_GLUSTERD_PATHMAX];
+ char real_path[VALID_GLUSTERD_PATHMAX];
+ char device_path[VALID_GLUSTERD_PATHMAX];
+ char mount_dir[VALID_GLUSTERD_PATHMAX];
+ char brick_id[1024]; /*Client xlator name, AFR changelog name*/
+ char fstype[NAME_MAX]; /* Brick file-system type */
+ char mnt_opts[1024]; /* Brick mount options */
+ char vg[PATH_MAX]; /* FIXME: Use max size for length of vg */
};
struct glusterd_gfproxyd_info {
- short port;
char *logfile;
+ short port;
};
struct gf_defrag_brickinfo_ {
@@ -298,14 +329,13 @@ struct glusterd_defrag_info_ {
uint64_t total_failures;
gf_lock_t lock;
int cmd;
+ uint32_t connected;
pthread_t th;
- gf_defrag_status_t defrag_status;
struct rpc_clnt *rpc;
- uint32_t connected;
- char mount[1024];
struct gf_defrag_brickinfo_ *bricks; /* volinfo->brick_count */
-
defrag_cbk_fn_t cbk_fn;
+ gf_defrag_status_t defrag_status;
+ char mount[1024];
};
typedef struct glusterd_defrag_info_ glusterd_defrag_info_t;
@@ -350,20 +380,20 @@ struct glusterd_bitrot_scrub_ {
typedef struct glusterd_bitrot_scrub_ glusterd_bitrot_scrub_t;
struct glusterd_rebalance_ {
- gf_defrag_status_t defrag_status;
uint64_t rebalance_files;
uint64_t rebalance_data;
uint64_t lookedup_files;
uint64_t skipped_files;
+ uint64_t rebalance_failures;
glusterd_defrag_info_t *defrag;
gf_cli_defrag_type defrag_cmd;
- uint64_t rebalance_failures;
+ gf_defrag_status_t defrag_status;
uuid_t rebalance_id;
double rebalance_time;
uint64_t time_left;
- glusterd_op_t op;
dict_t *dict; /* Dict to store misc information
* like list of bricks being removed */
+ glusterd_op_t op;
uint32_t commit_hash;
};
@@ -382,44 +412,10 @@ typedef enum gd_quorum_status_ {
DOESNT_MEET_QUORUM, // Follows quorum and does not meet.
} gd_quorum_status_t;
-typedef struct tier_info_ {
- int cold_type;
- int cold_brick_count;
- int cold_replica_count;
- int cold_disperse_count;
- int cold_dist_leaf_count;
- int cold_redundancy_count;
- int hot_type;
- int hot_brick_count;
- int hot_replica_count;
- int promoted;
- int demoted;
- uint16_t cur_tier_hot;
-} gd_tier_info_t;
-
struct glusterd_volinfo_ {
gf_lock_t lock;
- gf_boolean_t is_snap_volume;
glusterd_snap_t *snapshot;
uuid_t restored_from_snap;
- gd_tier_info_t tier_info;
- gf_boolean_t is_tier_enabled;
- char parent_volname[GD_VOLUME_NAME_MAX];
- /* In case of a snap volume
- i.e (is_snap_volume == TRUE) this
- field will contain the name of
- the volume which is snapped. In
- case of a non-snap volume, this
- field will be initialized as N/A */
- char volname[NAME_MAX + 1];
- /* NAME_MAX + 1 will be equal to
- * GD_VOLUME_NAME_MAX + 5.(also to
- * GD_VOLUME_NAME_MAX_TIER). An extra 5
- * bytes are added to GD_VOLUME_NAME_MAX
- * because, as part of the tiering
- * volfile generation code, we are
- * temporarily appending either "-hot"
- * or "-cold" */
int type;
int brick_count;
uint64_t snap_count;
@@ -434,6 +430,7 @@ struct glusterd_volinfo_ {
/* This is a current pointer for
glusterd_volinfo_t->snap_volumes */
struct cds_list_head bricks;
+ struct cds_list_head ta_bricks;
struct cds_list_head snap_volumes;
/* TODO : Need to remove this, as this
* is already part of snapshot object.
@@ -443,6 +440,7 @@ struct glusterd_volinfo_ {
int stripe_count;
int replica_count;
int arbiter_count;
+ int thin_arbiter_count;
int disperse_count;
int redundancy_count;
int subvol_count; /* Number of subvolumes in a
@@ -463,13 +461,10 @@ struct glusterd_volinfo_ {
/* Bitrot scrub status*/
glusterd_bitrot_scrub_t bitrot_scrub;
- glusterd_rebalance_t tier;
-
int version;
uint32_t quota_conf_version;
uint32_t cksum;
uint32_t quota_conf_cksum;
- gf_transport_type transport_type;
dict_t *dict;
@@ -480,28 +475,48 @@ struct glusterd_volinfo_ {
dict_t *gsync_slaves;
dict_t *gsync_active_slaves;
- int decommission_in_progress;
xlator_t *xl;
-
- gf_boolean_t memory_accounting;
- int caps; /* Capability */
+ int decommission_in_progress;
int op_version;
int client_op_version;
+ int32_t quota_xattr_version;
pthread_mutex_t reflock;
int refcnt;
gd_quorum_status_t quorum_status;
glusterd_snapdsvc_t snapd;
- glusterd_tierdsvc_t tierd;
+ glusterd_shdsvc_t shd;
glusterd_gfproxydsvc_t gfproxyd;
- int32_t quota_xattr_version;
- gf_boolean_t stage_deleted; /* volume has passed staging
- * for delete operation
- */
pthread_mutex_t store_volinfo_lock; /* acquire lock for
* updating the volinfo
*/
+ gf_transport_type transport_type;
+ gf_boolean_t is_snap_volume;
+ gf_boolean_t memory_accounting;
+ gf_boolean_t stage_deleted; /* volume has passed staging
+ * for delete operation
+ */
+ char parent_volname[GD_VOLUME_NAME_MAX];
+ /* In case of a snap volume
+ i.e (is_snap_volume == TRUE) this
+ field will contain the name of
+ the volume which is snapped. In
+ case of a non-snap volume, this
+ field will be initialized as N/A */
+ char volname[NAME_MAX + 1];
+ /* NAME_MAX + 1 will be equal to
+ * GD_VOLUME_NAME_MAX + 5.(also to
+ * GD_VOLUME_NAME_MAX_TIER). An extra 5
+ * bytes are added to GD_VOLUME_NAME_MAX
+ * because, as part of the tiering
+ * volfile generation code, we are
+ * temporarily appending either "-hot"
+ * or "-cold" */
+ gf_atomic_t volpeerupdate;
+ /* Flag to check about volume has received updates
+ from peer
+ */
};
typedef enum gd_snap_status_ {
@@ -517,22 +532,22 @@ struct glusterd_snap_ {
gf_lock_t lock;
struct cds_list_head volumes;
struct cds_list_head snap_list;
- char snapname[GLUSTERD_MAX_SNAP_NAME];
- uuid_t snap_id;
char *description;
+ uuid_t snap_id;
time_t time_stamp;
- gf_boolean_t snap_restored;
- gd_snap_status_t snap_status;
gf_store_handle_t *shandle;
+ gd_snap_status_t snap_status;
+ gf_boolean_t snap_restored;
+ char snapname[GLUSTERD_MAX_SNAP_NAME];
};
typedef struct glusterd_snap_op_ {
char *snap_vol_id;
- int32_t brick_num;
char *brick_path;
+ struct cds_list_head snap_ops_list;
+ int32_t brick_num;
int32_t op;
int32_t status;
- struct cds_list_head snap_ops_list;
} glusterd_snap_op_t;
typedef struct glusterd_missed_snap_ {
@@ -570,9 +585,9 @@ typedef struct glusterd_pending_node_ {
struct gsync_config_opt_vals_ {
char *op_name;
+ char *values[GEO_CONF_MAX_OPT_VALS];
int no_of_pos_vals;
gf_boolean_t case_sensitive;
- char *values[GEO_CONF_MAX_OPT_VALS];
};
enum glusterd_op_ret {
@@ -599,6 +614,9 @@ typedef enum {
#define GLUSTERD_DEFAULT_PORT GF_DEFAULT_BASE_PORT
#define GLUSTERD_INFO_FILE "glusterd.info"
+#define GLUSTERD_UPGRADE_FILE \
+ "glusterd.upgrade" /* zero byte file to detect a need for regenerating \
+ volfiles in container mode */
#define GLUSTERD_VOLUME_QUOTA_CONFIG "quota.conf"
#define GLUSTERD_VOLUME_DIR_PREFIX "vols"
#define GLUSTERD_PEER_DIR_PREFIX "peers"
@@ -617,7 +635,6 @@ typedef enum {
#define GLUSTERD_DEFAULT_SNAPS_BRICK_DIR "/gluster/snaps"
#define GLUSTERD_BITD_RUN_DIR "/bitd"
#define GLUSTERD_SCRUB_RUN_DIR "/scrub"
-#define GLUSTERD_GLUSTERSHD_RUN_DIR "/glustershd"
#define GLUSTERD_NFS_RUN_DIR "/nfs"
#define GLUSTERD_QUOTAD_RUN_DIR "/quotad"
#define GLUSTER_SHARED_STORAGE_BRICK_DIR GLUSTERD_DEFAULT_WORKDIR "/ss_brick"
@@ -651,24 +668,36 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_TIER_DIR(path, volinfo, priv) \
+#define GLUSTERD_GET_DEFRAG_DIR(path, volinfo, priv) \
+ do { \
+ char vol_path[PATH_MAX]; \
+ int32_t _defrag_dir_len; \
+ GLUSTERD_GET_VOLUME_DIR(vol_path, volinfo, priv); \
+ _defrag_dir_len = snprintf(path, PATH_MAX, "%s/%s", vol_path, \
+ "rebalance"); \
+ if ((_defrag_dir_len < 0) || (_defrag_dir_len >= PATH_MAX)) { \
+ path[0] = 0; \
+ } \
+ } while (0)
+
+#define GLUSTERD_GET_DEFRAG_PID_FILE(path, volinfo, priv) \
do { \
- int32_t _tier_dir_len; \
- _tier_dir_len = snprintf(path, PATH_MAX, "%s/tier/%s", priv->workdir, \
- volinfo->volname); \
- if ((_tier_dir_len < 0) || (_tier_dir_len >= PATH_MAX)) { \
+ char defrag_path[PATH_MAX]; \
+ int32_t _defrag_pidfile_len; \
+ GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv); \
+ _defrag_pidfile_len = snprintf(path, PATH_MAX, "%s/%s.pid", \
+ defrag_path, uuid_utoa(MY_UUID)); \
+ if ((_defrag_pidfile_len < 0) || (_defrag_pidfile_len >= PATH_MAX)) { \
path[0] = 0; \
} \
} while (0)
-#define GLUSTERD_GET_TIER_PID_FILE(path, volinfo, priv) \
+#define GLUSTERD_GET_SHD_RUNDIR(path, volinfo, priv) \
do { \
- char tier_path[PATH_MAX]; \
- int32_t _tier_pid_len; \
- GLUSTERD_GET_TIER_DIR(tier_path, volinfo, priv); \
- _tier_pid_len = snprintf(path, PATH_MAX, "%s/run/%s-tierd.pid", \
- tier_path, volinfo->volname); \
- if ((_tier_pid_len < 0) || (_tier_pid_len >= PATH_MAX)) { \
+ int32_t _shd_dir_len; \
+ _shd_dir_len = snprintf(path, PATH_MAX, "%s/shd/%s", priv->rundir, \
+ volinfo->volname); \
+ if ((_shd_dir_len < 0) || (_shd_dir_len >= PATH_MAX)) { \
path[0] = 0; \
} \
} while (0)
@@ -689,16 +718,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_SNAP_DIR(path, snap, priv) \
- do { \
- int32_t _snap_dir_len; \
- _snap_dir_len = snprintf(path, PATH_MAX, "%s/snaps/%s", priv->workdir, \
- snap->snapname); \
- if ((_snap_dir_len < 0) || (_snap_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
#define GLUSTERD_GET_SNAP_GEO_REP_DIR(path, snap, priv) \
do { \
int32_t _snap_geo_len; \
@@ -709,42 +728,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_BRICK_DIR(path, volinfo, priv) \
- do { \
- int32_t _brick_len; \
- if (volinfo->is_snap_volume) { \
- _brick_len = snprintf(path, PATH_MAX, "%s/snaps/%s/%s/%s", \
- priv->workdir, volinfo->snapshot->snapname, \
- volinfo->volname, GLUSTERD_BRICK_INFO_DIR); \
- } else { \
- _brick_len = snprintf(path, PATH_MAX, "%s/%s/%s/%s", \
- priv->workdir, GLUSTERD_VOLUME_DIR_PREFIX, \
- volinfo->volname, GLUSTERD_BRICK_INFO_DIR); \
- } \
- if ((_brick_len < 0) || (_brick_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_NFS_DIR(path, priv) \
- do { \
- int32_t _nfs_dir_len; \
- _nfs_dir_len = snprintf(path, PATH_MAX, "%s/nfs", priv->workdir); \
- if ((_nfs_dir_len < 0) || (_nfs_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_QUOTAD_DIR(path, priv) \
- do { \
- int32_t _quotad_dir_len; \
- _quotad_dir_len = snprintf(path, PATH_MAX, "%s/quotad", \
- priv->workdir); \
- if ((_quotad_dir_len < 0) || (_quotad_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
#define GLUSTERD_GET_QUOTA_LIMIT_MOUNT_PATH(abspath, volname, path) \
do { \
snprintf(abspath, sizeof(abspath) - 1, \
@@ -752,18 +735,6 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
path); \
} while (0)
-#define GLUSTERD_GET_QUOTA_LIST_MOUNT_PATH(abspath, volname, path) \
- do { \
- snprintf(abspath, sizeof(abspath) - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_list%s", volname, path); \
- } while (0)
-
-#define GLUSTERD_GET_TMP_PATH(abspath, path) \
- do { \
- snprintf(abspath, sizeof(abspath) - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/tmp%s", path); \
- } while (0)
-
#define GLUSTERD_REMOVE_SLASH_FROM_PATH(path, string) \
do { \
int i = 0; \
@@ -792,138 +763,19 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
} \
} while (0)
-#define GLUSTERD_GET_NFS_PIDFILE(pidfile, nfspath, priv) \
- do { \
- int32_t _nfs_pid_len; \
- _nfs_pid_len = snprintf(pidfile, PATH_MAX, "%s/nfs/nfs.pid", \
- priv->rundir); \
- if ((_nfs_pid_len < 0) || (_nfs_pid_len >= PATH_MAX)) { \
- pidfile[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_QUOTAD_PIDFILE(pidfile, quotadpath, priv) \
- do { \
- int32_t _quotad_pid_len; \
- _quotad_pid_len = snprintf(pidfile, PATH_MAX, "%s/quotad/quotad.pid", \
- priv->rundir); \
- if ((_quotad_pid_len < 0) || (_quotad_pid_len >= PATH_MAX)) { \
- pidfile[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_QUOTA_CRAWL_PIDDIR(piddir, volinfo, type) \
- do { \
- char _volpath[PATH_MAX] = { \
- 0, \
- }; \
- int32_t _crawl_pid_len; \
- GLUSTERD_GET_VOLUME_DIR(_volpath, volinfo, priv); \
- if (type == GF_QUOTA_OPTION_TYPE_ENABLE || \
- type == GF_QUOTA_OPTION_TYPE_ENABLE_OBJECTS) \
- _crawl_pid_len = snprintf(piddir, PATH_MAX, "%s/run/quota/enable", \
- _volpath); \
- else \
- _crawl_pid_len = snprintf(piddir, PATH_MAX, \
- "%s/run/quota/disable", _volpath); \
- if ((_crawl_pid_len < 0) || (_crawl_pid_len >= PATH_MAX)) { \
- piddir[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_STACK_DESTROY(frame) \
- do { \
- frame->local = NULL; \
- STACK_DESTROY(frame->root); \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_PROCESS(path, volinfo) \
- do { \
- if (volinfo->rebal.defrag_cmd == GF_DEFRAG_CMD_START_TIER) \
- snprintf(path, NAME_MAX, "tier"); \
- else \
- snprintf(path, NAME_MAX, "rebalance"); \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_DIR(path, volinfo, priv) \
- do { \
- char vol_path[PATH_MAX]; \
- char operation[NAME_MAX]; \
- int32_t _defrag_dir_len; \
- GLUSTERD_GET_VOLUME_DIR(vol_path, volinfo, priv); \
- GLUSTERD_GET_DEFRAG_PROCESS(operation, volinfo); \
- _defrag_dir_len = snprintf(path, PATH_MAX, "%s/%s", vol_path, \
- operation); \
- if ((_defrag_dir_len < 0) || (_defrag_dir_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_SOCK_FILE_OLD(path, volinfo, priv) \
- do { \
- char defrag_path[PATH_MAX]; \
- int32_t _sockfile_old_len; \
- GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv); \
- _sockfile_old_len = snprintf(path, PATH_MAX, "%s/%s.sock", \
- defrag_path, uuid_utoa(MY_UUID)); \
- if ((_sockfile_old_len < 0) || (_sockfile_old_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_SOCK_FILE(path, volinfo) \
- do { \
- char operation[NAME_MAX]; \
- int32_t _defrag_sockfile_len; \
- GLUSTERD_GET_DEFRAG_PROCESS(operation, volinfo); \
- _defrag_sockfile_len = snprintf( \
- path, UNIX_PATH_MAX, \
- DEFAULT_VAR_RUN_DIRECTORY "/gluster-%s-%s.sock", operation, \
- uuid_utoa(volinfo->volume_id)); \
- if ((_defrag_sockfile_len < 0) || \
- (_defrag_sockfile_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERD_GET_DEFRAG_PID_FILE(path, volinfo, priv) \
- do { \
- char defrag_path[PATH_MAX]; \
- int32_t _defrag_pidfile_len; \
- GLUSTERD_GET_DEFRAG_DIR(defrag_path, volinfo, priv); \
- _defrag_pidfile_len = snprintf(path, PATH_MAX, "%s/%s.pid", \
- defrag_path, uuid_utoa(MY_UUID)); \
- if ((_defrag_pidfile_len < 0) || (_defrag_pidfile_len >= PATH_MAX)) { \
- path[0] = 0; \
- } \
- } while (0)
-
-#define GLUSTERFS_GET_QUOTA_LIMIT_MOUNT_PIDFILE(pidfile, volname) \
+#define RCU_READ_LOCK \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
{ \
- snprintf(pidfile, PATH_MAX - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_limit.pid", volname); \
- }
+ rcu_read_lock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock);
-#define GLUSTERFS_GET_QUOTA_LIST_MOUNT_PIDFILE(pidfile, volname) \
+#define RCU_READ_UNLOCK \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
{ \
- snprintf(pidfile, PATH_MAX - 1, \
- DEFAULT_VAR_RUN_DIRECTORY "/%s_quota_list.pid", volname); \
- }
-
-#define GLUSTERD_GET_UUID_NOHYPHEN(ret_string, uuid) \
- do { \
- char *snap_volname_ptr = ret_string; \
- char tmp_uuid[64]; \
- char *snap_volid_ptr = uuid_utoa_r(uuid, tmp_uuid); \
- while (*snap_volid_ptr) { \
- if (*snap_volid_ptr == '-') { \
- snap_volid_ptr++; \
- } else { \
- (*snap_volname_ptr++) = (*snap_volid_ptr++); \
- } \
- } \
- *snap_volname_ptr = '\0'; \
- } while (0)
+ rcu_read_unlock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock);
#define GLUSTERD_DUMP_PEERS(head, member, xpeers) \
do { \
@@ -933,7 +785,7 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
\
key = xpeers ? "glusterd.xaction_peer" : "glusterd.peer"; \
\
- rcu_read_lock(); \
+ RCU_READ_LOCK; \
cds_list_for_each_entry_rcu(_peerinfo, head, member) \
{ \
glusterd_dump_peer(_peerinfo, key, index, xpeers); \
@@ -941,7 +793,7 @@ typedef ssize_t (*gd_serialize_t)(struct iovec outmsg, void *args);
glusterd_dump_peer_rpcstat(_peerinfo, key, index); \
index++; \
} \
- rcu_read_unlock(); \
+ RCU_READ_UNLOCK; \
\
} while (0)
@@ -1204,15 +1056,11 @@ int
glusterd_fetchsnap_notify(xlator_t *this);
int
-glusterd_add_tier_volume_detail_to_dict(glusterd_volinfo_t *volinfo,
- dict_t *volumes, int count);
-
-int
glusterd_add_volume_detail_to_dict(glusterd_volinfo_t *volinfo, dict_t *volumes,
int count);
int
-glusterd_restart_bricks();
+glusterd_restart_bricks(void *opaque);
int32_t
glusterd_volume_txn(rpcsvc_request_t *req, char *volname, int flags,
@@ -1338,13 +1186,33 @@ glusterd_op_stop_volume(dict_t *dict);
int
glusterd_op_delete_volume(dict_t *dict);
int
+glusterd_handle_ganesha_op(dict_t *dict, char **op_errstr, char *key,
+ char *value);
+int
+glusterd_check_ganesha_cmd(char *key, char *value, char **errstr, dict_t *dict);
+int
+glusterd_op_stage_set_ganesha(dict_t *dict, char **op_errstr);
+int
+glusterd_op_set_ganesha(dict_t *dict, char **errstr);
+int
+ganesha_manage_export(dict_t *dict, char *value,
+ gf_boolean_t update_cache_invalidation, char **op_errstr);
+int
+gd_ganesha_send_dbus(char *volname, char *value);
+gf_boolean_t
+glusterd_is_ganesha_cluster();
+gf_boolean_t
+glusterd_check_ganesha_export(glusterd_volinfo_t *volinfo);
+int
+stop_ganesha(char **op_errstr);
+int
+tear_down_cluster(gf_boolean_t run_teardown);
+int
manage_export_config(char *volname, char *value, char **op_errstr);
int
glusterd_op_add_brick(dict_t *dict, char **op_errstr);
int
-glusterd_op_add_tier_brick(dict_t *dict, char **op_errstr);
-int
glusterd_op_remove_brick(dict_t *dict, char **op_errstr);
int
glusterd_op_stage_add_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
@@ -1352,6 +1220,18 @@ int
glusterd_op_stage_remove_brick(dict_t *dict, char **op_errstr);
int
+glusterd_set_rebalance_id_for_remove_brick(dict_t *req_dict, dict_t *rsp_dict);
+
+int
+glusterd_set_rebalance_id_in_rsp_dict(dict_t *req_dict, dict_t *rsp_dict);
+
+int
+glusterd_mgmt_v3_op_stage_rebalance(dict_t *dict, char **op_errstr);
+
+int
+glusterd_mgmt_v3_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
+
+int
glusterd_op_stage_rebalance(dict_t *dict, char **op_errstr);
int
glusterd_op_rebalance(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
@@ -1477,20 +1357,19 @@ glusterd_should_i_stop_bitd();
int
glusterd_remove_brick_migrate_cbk(glusterd_volinfo_t *volinfo,
gf_defrag_status_t status);
-/* tier */
-
int
__glusterd_handle_reset_brick(rpcsvc_request_t *req);
-int
-glusterd_op_stage_tier(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
-int
-glusterd_op_tier_start_stop(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
-int
-glusterd_op_remove_tier_brick(dict_t *dict, char **op_errstr, dict_t *rsp_dict);
-int
-glusterd_tier_prevalidate(dict_t *dict, char **op_errstr, dict_t *rsp_dict,
- uint32_t *op_errno);
int
glusterd_options_init(xlator_t *this);
+
+int32_t
+glusterd_recreate_volfiles(glusterd_conf_t *conf);
+
+void
+glusterd_add_peers_to_auth_list(char *volname);
+
+int
+glusterd_replace_old_auth_allow_list(char *volname);
+
#endif