diff options
author | M. Mohan Kumar <mohan@in.ibm.com> | 2012-11-29 21:46:07 +0530 |
---|---|---|
committer | Vijay Bellur <vbellur@redhat.com> | 2012-11-29 09:39:25 -0800 |
commit | ca796eba11a3f965bfbaa9bbffb5ef00c9bbb7ad (patch) | |
tree | 26dfc9d8d2c5f0aa2cd2cfe5d590f4930bd4a7ba /xlators/mgmt | |
parent | b7840704c2095ad64f56da8d37fbae26db3a81ac (diff) |
BD Backend: Volume creation support
A new parameter type is added to volume create command. To use BD xlator
one has to specify following argument in addition to normal volume
create
device vg brick:<VG-NAME>
for example,
$ gluster volume create lv_volume device vg host:/vg1
Changes from previous version
* New type 'backend' added to volinfo structure to differentiate between
posix and bd xlator
* Most of the volume related commands are updated to handle BD xlator,
like add-brick, heal-brick etc refuse to work when volume is BD xlator
type
* Only one VG (ie brick) can be specified for BD xlator during volume
creation
* volume info shows VG info if its of type BD xlator
BUG: 805138
Change-Id: I0ff90aca04840c71f364fabb0ab43ce33f9278ce
Signed-off-by: M. Mohan Kumar <mohan@in.ibm.com>
Reviewed-on: http://review.gluster.org/3717
Reviewed-by: Vijay Bellur <vbellur@redhat.com>
Tested-by: Vijay Bellur <vbellur@redhat.com>
Diffstat (limited to 'xlators/mgmt')
-rw-r--r-- | xlators/mgmt/glusterd/src/Makefile.am | 3 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 9 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-handler.c | 7 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 7 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-store.c | 12 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-store.h | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volgen.c | 52 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volume-ops.c | 65 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd.h | 7 |
9 files changed, 149 insertions, 14 deletions
diff --git a/xlators/mgmt/glusterd/src/Makefile.am b/xlators/mgmt/glusterd/src/Makefile.am index 95d2aba9dcb..5ec9d2bef15 100644 --- a/xlators/mgmt/glusterd/src/Makefile.am +++ b/xlators/mgmt/glusterd/src/Makefile.am @@ -2,6 +2,9 @@ xlator_LTLIBRARIES = glusterd.la xlatordir = $(libdir)/glusterfs/$(PACKAGE_VERSION)/xlator/mgmt glusterd_la_CPPFLAGS = $(AM_CPPFLAGS) "-DFILTERDIR=\"$(libdir)/glusterfs/$(PACKAGE_VERSION)/filter\"" glusterd_la_LDFLAGS = -module -avoidversion +if ENABLE_BD_XLATOR +glusterd_la_LDFLAGS += -llvm2app +endif glusterd_la_SOURCES = glusterd.c glusterd-handler.c glusterd-sm.c \ glusterd-op-sm.c glusterd-utils.c glusterd-rpc-ops.c \ glusterd-store.c glusterd-handshake.c glusterd-pmap.c \ diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c index ade30fe951a..a14828e980b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c @@ -1035,6 +1035,15 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr) goto out; } + if (volinfo->backend == GD_VOL_BK_BD) { + snprintf (msg, sizeof (msg), "Add brick is not supported for " + "Block backend volume %s.", volname); + gf_log (THIS->name, GF_LOG_ERROR, "%s", msg); + *op_errstr = gf_strdup (msg); + ret = -1; + goto out; + } + ret = glusterd_validate_volume_id (dict, volinfo); if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c index cec94f89f6c..9433436d0ae 100644 --- a/xlators/mgmt/glusterd/src/glusterd-handler.c +++ b/xlators/mgmt/glusterd/src/glusterd-handler.c @@ -370,6 +370,13 @@ glusterd_add_volume_detail_to_dict (glusterd_volinfo_t *volinfo, if (ret) goto out; +#ifdef HAVE_BD_XLATOR + snprintf (key, 256, "volume%d.backend", count); + ret = dict_set_int32 (volumes, key, volinfo->backend); + if (ret) + goto out; +#endif + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { char brick[1024] = {0,}; snprintf (key, 256, "volume%d.brick%d", count, i); diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c index edc2627c1b3..bca306bd897 100644 --- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c +++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c @@ -243,6 +243,13 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr, goto out; } + if (volinfo->backend == GD_VOL_BK_BD) { + snprintf (msg, sizeof (msg), "replace brick not supported " + "for Block backend volume"); + *op_errstr = gf_strdup (msg); + goto out; + } + if (GLUSTERD_STATUS_STARTED != volinfo->status) { ret = -1; snprintf (msg, sizeof (msg), "volume: %s is not started", diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c index 471a24e6622..413c8a39abb 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.c +++ b/xlators/mgmt/glusterd/src/glusterd-store.c @@ -703,6 +703,15 @@ glusterd_volume_exclude_options_write (int fd, glusterd_volinfo_t *volinfo) if (ret) goto out; } + + if (volinfo->backend == GD_VOL_BK_BD) { + snprintf (buf, sizeof (buf), "%d", volinfo->backend); + ret = glusterd_store_save_value (fd, + GLUSTERD_STORE_KEY_VOL_BACKEND, buf); + if (ret) + goto out; + } + out: if (ret) gf_log ("", GF_LOG_ERROR, "Unable to write volume values" @@ -2341,6 +2350,9 @@ glusterd_store_retrieve_volume (char *volname) gf_log ("", GF_LOG_DEBUG, "Parsed as "GEOREP" " " slave:key=%s,value:%s", key, value); + } else if (!strncmp (key, GLUSTERD_STORE_KEY_VOL_BACKEND, + strlen (GLUSTERD_STORE_KEY_VOL_BACKEND))) { + volinfo->backend = atoi (value); } else { if (is_key_glusterd_hooks_friendly (key)) { diff --git a/xlators/mgmt/glusterd/src/glusterd-store.h b/xlators/mgmt/glusterd/src/glusterd-store.h index d8c1567a661..1ab398c0b77 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.h +++ b/xlators/mgmt/glusterd/src/glusterd-store.h @@ -64,6 +64,7 @@ typedef enum glusterd_store_ver_ac_{ #define GLUSTERD_STORE_KEY_PEER_UUID "uuid" #define GLUSTERD_STORE_KEY_PEER_HOSTNAME "hostname" #define GLUSTERD_STORE_KEY_PEER_STATE "state" +#define GLUSTERD_STORE_KEY_VOL_BACKEND "backend" #define glusterd_for_each_entry(entry, dir) \ do {\ diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c index 76172dd9bce..f3de2feed52 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volgen.c +++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c @@ -1574,6 +1574,8 @@ server_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo, char *password = NULL; char index_basepath[PATH_MAX] = {0}; char key[1024] = {0}; + char *vgname = NULL; + char *vg = NULL; path = param; volname = volinfo->volname; @@ -1591,23 +1593,47 @@ server_graph_builder (volgen_graph_t *graph, glusterd_volinfo_t *volinfo, } } - xl = volgen_graph_add (graph, "storage/posix", volname); - if (!xl) - return -1; + if (volinfo->backend == GD_VOL_BK_BD) { + xl = volgen_graph_add (graph, "storage/bd_map", volname); + if (!xl) + return -1; - ret = xlator_set_option (xl, "directory", path); - if (ret) - return -1; + ret = xlator_set_option (xl, "device", "vg"); + if (ret) + return -1; - ret = xlator_set_option (xl, "volume-id", - uuid_utoa (volinfo->volume_id)); - if (ret) - return -1; + vg = gf_strdup (path); + vgname = strrchr (vg, '/'); + if (strchr(vg, '/') != vgname) { + gf_log ("glusterd", GF_LOG_ERROR, + "invalid vg specified %s", path); + GF_FREE (vg); + goto out; + } + vgname++; + ret = xlator_set_option (xl, "export", vgname); + GF_FREE (vg); + if (ret) + return -1; + } else { + xl = volgen_graph_add (graph, "storage/posix", volname); + if (!xl) + return -1; - ret = check_and_add_debug_xl (graph, set_dict, volname, "posix"); - if (ret) - return -1; + ret = xlator_set_option (xl, "directory", path); + if (ret) + return -1; + ret = xlator_set_option (xl, "volume-id", + uuid_utoa (volinfo->volume_id)); + if (ret) + return -1; + + ret = check_and_add_debug_xl (graph, set_dict, volname, + "posix"); + if (ret) + return -1; + } xl = volgen_graph_add (graph, "features/access-control", volname); if (!xl) return -1; diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c index d42694353af..8c76c8f09e1 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c @@ -12,6 +12,10 @@ #include "config.h" #endif +#ifdef HAVE_BD_XLATOR +#include <lvm2app.h> +#endif + #include "common-utils.h" #include "syscall.h" #include "cli1-xdr.h" @@ -554,6 +558,36 @@ out: return ret; } +#ifdef HAVE_BD_XLATOR +int +glusterd_is_valid_vg (const char *name) +{ + lvm_t handle = NULL; + vg_t vg = NULL; + char *vg_name = NULL; + int retval = -1; + + handle = lvm_init (NULL); + if (!handle) { + gf_log ("", GF_LOG_ERROR, "lvm_init failed"); + return -1; + } + vg_name = gf_strdup (name); + vg = lvm_vg_open (handle, basename (vg_name), "r", 0); + if (!vg) { + gf_log ("", GF_LOG_ERROR, "no such vg: %s", vg_name); + goto out; + } + retval = 0; +out: + if (vg) + lvm_vg_close (vg); + lvm_quit (handle); + GF_FREE (vg_name); + return retval; +} +#endif + /* op-sm */ int glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr) @@ -575,7 +609,9 @@ glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr) char msg[2048] = {0}; uuid_t volume_uuid; char *volume_uuid_str; - +#ifdef HAVE_BD_XLATOR + char *dev_type = NULL; +#endif this = THIS; if (!this) { gf_log ("glusterd", GF_LOG_ERROR, @@ -625,6 +661,11 @@ glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr) goto out; } + +#ifdef HAVE_BD_XLATOR + ret = dict_get_str (dict, "device", &dev_type); +#endif + ret = dict_get_str (dict, "bricks", &bricks); if (ret) { gf_log ("", GF_LOG_ERROR, "Unable to get bricks"); @@ -670,6 +711,19 @@ glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr) goto out; } +#ifdef HAVE_BD_XLATOR + if (dev_type) { + ret = glusterd_is_valid_vg (brick_info->path); + if (ret) { + snprintf (msg, sizeof(msg), "invalid vg %s", + brick_info->path); + *op_errstr = gf_strdup (msg); + goto out; + } + + break; + } else +#endif if (!uuid_compare (brick_info->uuid, MY_UUID)) { ret = glusterd_brick_create_path (brick_info->hostname, brick_info->path, @@ -1209,6 +1263,9 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr) char *str = NULL; char *username = NULL; char *password = NULL; +#ifdef HAVE_BD_XLATOR + char *device = NULL; +#endif this = THIS; GF_ASSERT (this); @@ -1261,6 +1318,12 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr) goto out; } +#ifdef HAVE_BD_XLATOR + ret = dict_get_str (dict, "device", &device); + if (!ret) + volinfo->backend = GD_VOL_BK_BD; +#endif + /* replica-count 1 means, no replication, file is in one brick only */ volinfo->replica_count = 1; /* stripe-count 1 means, no striping, file is present as a whole */ diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h index 95353f06514..ea0ea6061d3 100644 --- a/xlators/mgmt/glusterd/src/glusterd.h +++ b/xlators/mgmt/glusterd/src/glusterd.h @@ -229,6 +229,12 @@ struct _auth { typedef struct _auth auth_t; +typedef enum glusterd_vol_backend_ { + GD_VOL_BK_DEFAULT = 0, /* POSIX */ + GD_VOL_BK_BD = 1, +} glusterd_vol_backend_t; + + struct glusterd_volinfo_ { char volname[GLUSTERD_MAX_VOLUME_NAME]; int type; @@ -278,6 +284,7 @@ struct glusterd_volinfo_ { xlator_t *xl; gf_boolean_t memory_accounting; + glusterd_vol_backend_t backend; }; typedef enum gd_node_type_ { |