summaryrefslogtreecommitdiffstats
path: root/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'xlators/mgmt/glusterd/src/glusterd-volume-ops.c')
-rw-r--r--xlators/mgmt/glusterd/src/glusterd-volume-ops.c447
1 files changed, 274 insertions, 173 deletions
diff --git a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
index fd4e0268a..0d322b9ad 100644
--- a/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-volume-ops.c
@@ -30,8 +30,9 @@
#define glusterd_op_start_volume_args_get(dict, volname, flags) \
glusterd_op_stop_volume_args_get (dict, volname, flags)
+
int
-glusterd_handle_create_volume (rpcsvc_request_t *req)
+__glusterd_handle_create_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
@@ -192,7 +193,14 @@ out:
}
int
-glusterd_handle_cli_start_volume (rpcsvc_request_t *req)
+glusterd_handle_create_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_create_volume);
+}
+
+int
+__glusterd_handle_cli_start_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
@@ -257,9 +265,15 @@ out:
return ret;
}
+int
+glusterd_handle_cli_start_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_start_volume);
+}
int
-glusterd_handle_cli_stop_volume (rpcsvc_request_t *req)
+__glusterd_handle_cli_stop_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
@@ -327,7 +341,14 @@ out:
}
int
-glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_stop_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_stop_volume);
+}
+
+int
+__glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,},};
@@ -397,7 +418,14 @@ out:
}
int
-glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_delete_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_delete_volume);
+}
+
+int
+__glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
@@ -481,7 +509,14 @@ out:
}
int
-glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req)
+glusterd_handle_cli_heal_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_heal_volume);
+}
+
+int
+__glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req)
{
int32_t ret = -1;
gf_cli_req cli_req = {{0,}};
@@ -562,32 +597,104 @@ out:
return ret;
}
+int
+glusterd_handle_cli_statedump_volume (rpcsvc_request_t *req)
+{
+ return glusterd_big_locked_handler (req,
+ __glusterd_handle_cli_statedump_volume);
+}
+
#ifdef HAVE_BD_XLATOR
+/*
+ * Validates if given VG in the brick exists or not. Also checks if VG has
+ * GF_XATTR_VOL_ID_KEY tag set to avoid using same VG for multiple bricks.
+ * Tag is checked only during glusterd_op_stage_create_volume. Tag is set during
+ * glusterd_validate_and_create_brickpath().
+ * @brick - brick info, @check_tag - check for VG tag or not
+ * @msg - Error message to return to caller
+ */
int
-glusterd_is_valid_vg (const char *name)
+glusterd_is_valid_vg (glusterd_brickinfo_t *brick, int check_tag, char *msg)
{
- lvm_t handle = NULL;
- vg_t vg = NULL;
- char *vg_name = NULL;
- int retval = -1;
+ lvm_t handle = NULL;
+ vg_t vg = NULL;
+ char *vg_name = NULL;
+ int retval = 0;
+ char *p = NULL;
+ char *ptr = NULL;
+ struct dm_list *dm_lvlist = NULL;
+ struct dm_list *dm_seglist = NULL;
+ struct lvm_lv_list *lv_list = NULL;
+ struct lvm_property_value prop = {0, };
+ struct lvm_lvseg_list *seglist = NULL;
+ struct dm_list *taglist = NULL;
+ struct lvm_str_list *strl = NULL;
handle = lvm_init (NULL);
if (!handle) {
- gf_log ("", GF_LOG_ERROR, "lvm_init failed");
+ sprintf (msg, "lvm_init failed, could not validate vg");
return -1;
}
- vg_name = gf_strdup (name);
- vg = lvm_vg_open (handle, basename (vg_name), "r", 0);
+ if (*brick->vg == '\0') { /* BD xlator has vg in brick->path */
+ p = gf_strdup (brick->path);
+ vg_name = strtok_r (p, "/", &ptr);
+ } else
+ vg_name = brick->vg;
+
+ vg = lvm_vg_open (handle, vg_name, "r", 0);
if (!vg) {
- gf_log ("", GF_LOG_ERROR, "no such vg: %s", vg_name);
- goto out;
+ sprintf (msg, "no such vg: %s", vg_name);
+ retval = -1;
+ goto out;
+ }
+ if (!check_tag)
+ goto next;
+
+ taglist = lvm_vg_get_tags (vg);
+ if (!taglist)
+ goto next;
+
+ dm_list_iterate_items (strl, taglist) {
+ if (!strncmp(strl->str, GF_XATTR_VOL_ID_KEY,
+ strlen (GF_XATTR_VOL_ID_KEY))) {
+ sprintf (msg, "VG %s is already part of"
+ " a brick", vg_name);
+ retval = -1;
+ goto out;
+ }
+ }
+next:
+
+ brick->caps = CAPS_BD | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
+
+ dm_lvlist = lvm_vg_list_lvs (vg);
+ if (!dm_lvlist)
+ goto out;
+
+ dm_list_iterate_items (lv_list, dm_lvlist) {
+ dm_seglist = lvm_lv_list_lvsegs (lv_list->lv);
+ dm_list_iterate_items (seglist, dm_seglist) {
+ prop = lvm_lvseg_get_property (seglist->lvseg,
+ "segtype");
+ if (!prop.is_valid || !prop.value.string)
+ continue;
+ if (!strcmp (prop.value.string, "thin-pool")) {
+ brick->caps |= CAPS_THIN;
+ gf_log (THIS->name, GF_LOG_INFO, "Thin Pool "
+ "\"%s\" will be used for thin LVs",
+ lvm_lv_get_name (lv_list->lv));
+ break;
+ }
+ }
}
+
retval = 0;
out:
if (vg)
lvm_vg_close (vg);
lvm_quit (handle);
- GF_FREE (vg_name);
+ if (p)
+ GF_FREE (p);
return retval;
}
#endif
@@ -612,9 +719,6 @@ glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr)
char msg[2048] = {0};
uuid_t volume_uuid;
char *volume_uuid_str;
-#ifdef HAVE_BD_XLATOR
- char *dev_type = NULL;
-#endif
gf_boolean_t is_force = _gf_false;
this = THIS;
@@ -659,10 +763,6 @@ glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr)
goto out;
}
-#ifdef HAVE_BD_XLATOR
- ret = dict_get_str (dict, "device", &dev_type);
-#endif
-
ret = dict_get_str (dict, "bricks", &bricks);
if (ret) {
gf_log (this->name, GF_LOG_ERROR, "Unable to get bricks for "
@@ -711,19 +811,15 @@ glusterd_op_stage_create_volume (dict_t *dict, char **op_errstr)
goto out;
}
+ if (!uuid_compare (brick_info->uuid, MY_UUID)) {
+
#ifdef HAVE_BD_XLATOR
- if (dev_type) {
- ret = glusterd_is_valid_vg (brick_info->path);
- if (ret) {
- snprintf (msg, sizeof(msg), "invalid vg %s",
- brick_info->path);
- goto out;
+ if (brick_info->vg[0]) {
+ ret = glusterd_is_valid_vg (brick_info, 1, msg);
+ if (ret)
+ goto out;
}
-
- break;
- } else
#endif
- if (!uuid_compare (brick_info->uuid, MY_UUID)) {
ret = glusterd_validate_and_create_brickpath (brick_info,
volume_uuid, op_errstr,
is_force);
@@ -821,6 +917,7 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
uuid_t volume_id = {0,};
char volid[50] = {0,};
char xattr_volid[50] = {0,};
+ int caps = 0;
this = THIS;
GF_ASSERT (this);
@@ -870,11 +967,10 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
if (uuid_compare (brickinfo->uuid, MY_UUID))
continue;
- if (volinfo->backend == GD_VOL_BK_BD)
- continue;
-
ret = gf_lstat_dir (brickinfo->path, NULL);
- if (ret) {
+ if (ret && (flags & GF_CLI_FLAG_OP_FORCE)) {
+ continue;
+ } else if (ret) {
snprintf (msg, sizeof (msg), "Failed to find "
"brick directory %s for volume %s. "
"Reason : %s", brickinfo->path,
@@ -883,13 +979,27 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
}
ret = sys_lgetxattr (brickinfo->path, GF_XATTR_VOL_ID_KEY,
volume_id, 16);
- if (ret < 0) {
+ if (ret < 0 && (!(flags & GF_CLI_FLAG_OP_FORCE))) {
snprintf (msg, sizeof (msg), "Failed to get "
"extended attribute %s for brick dir %s. "
"Reason : %s", GF_XATTR_VOL_ID_KEY,
brickinfo->path, strerror (errno));
ret = -1;
goto out;
+ } else if (ret < 0) {
+ ret = sys_lsetxattr (brickinfo->path,
+ GF_XATTR_VOL_ID_KEY,
+ volinfo->volume_id, 16,
+ XATTR_CREATE);
+ if (ret) {
+ snprintf (msg, sizeof (msg), "Failed to set "
+ "extended attribute %s on %s. Reason: "
+ "%s", GF_XATTR_VOL_ID_KEY,
+ brickinfo->path, strerror (errno));
+ goto out;
+ } else {
+ continue;
+ }
}
if (uuid_compare (volinfo->volume_id, volume_id)) {
snprintf (msg, sizeof (msg), "Volume id mismatch for "
@@ -901,8 +1011,24 @@ glusterd_op_stage_start_volume (dict_t *dict, char **op_errstr)
ret = -1;
goto out;
}
+#ifdef HAVE_BD_XLATOR
+ if (brickinfo->vg[0])
+ caps = CAPS_BD | CAPS_THIN |
+ CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
+ /* Check for VG/thin pool if its BD volume */
+ if (brickinfo->vg[0]) {
+ ret = glusterd_is_valid_vg (brickinfo, 0, msg);
+ if (ret)
+ goto out;
+ /* if anyone of the brick does not have thin support,
+ disable it for entire volume */
+ caps &= brickinfo->caps;
+ } else
+ caps = 0;
+#endif
}
+ volinfo->caps = caps;
ret = 0;
out:
if (ret && (msg[0] != '\0')) {
@@ -1057,6 +1183,16 @@ glusterd_op_stage_delete_volume (dict_t *dict, char **op_errstr)
goto out;
}
+ if (volinfo->snap_count > 0 || !list_empty(&volinfo->snap_volumes)) {
+ snprintf (msg, sizeof (msg), "Cannot delete Volume %s ,"
+ "as it has %ld snapshots. "
+ "To delete the volume, "
+ "first delete all the snapshots under it.",
+ volname, volinfo->snap_count);
+ ret = -1;
+ goto out;
+ }
+
ret = 0;
out:
@@ -1154,14 +1290,22 @@ glusterd_op_stage_heal_volume (dict_t *dict, char **op_errstr)
goto out;
}
- if ((heal_op != GF_AFR_OP_INDEX_SUMMARY) &&
- !glusterd_is_nodesvc_online ("glustershd")) {
- ret = -1;
- *op_errstr = gf_strdup ("Self-heal daemon is not running."
- " Check self-heal daemon log file.");
- gf_log (this->name, GF_LOG_WARNING, "%s", "Self-heal daemon is "
- "not running. Check self-heal daemon log file.");
- goto out;
+ switch (heal_op) {
+ case GF_AFR_OP_INDEX_SUMMARY:
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT:
+ case GF_AFR_OP_STATISTICS_HEAL_COUNT_PER_REPLICA:
+ break;
+ default:
+ if (!glusterd_is_nodesvc_online("glustershd")){
+ ret = -1;
+ *op_errstr = gf_strdup ("Self-heal daemon is "
+ "not running. Check self-heal "
+ "daemon log file.");
+ gf_log (this->name, GF_LOG_WARNING, "%s",
+ "Self-heal daemon is not running."
+ "Check self-heal daemon log file.");
+ goto out;
+ }
}
ret = 0;
@@ -1285,109 +1429,6 @@ out:
return ret;
}
-#ifdef HAVE_BD_XLATOR
-int
-glusterd_op_stage_bd (dict_t *dict, char **op_errstr)
-{
- int ret = -1;
- char *volname = NULL;
- char *path = NULL;
- char *size = NULL;
- glusterd_volinfo_t *volinfo = NULL;
- char msg[2048] = {0,};
- gf_xl_bd_op_t bd_op = GF_BD_OP_INVALID;
- uint64_t bytes = 0;
-
- ret = dict_get_str (dict, "volname", &volname);
- if (ret) {
- snprintf (msg, sizeof(msg), "Failed to get volume name");
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
-
- ret = dict_get_int32 (dict, "bd-op", (int32_t *)&bd_op);
- if (ret) {
- snprintf (msg, sizeof(msg), "Failed to get bd-op");
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
-
- ret = dict_get_str (dict, "path", &path);
- if (ret) {
- snprintf (msg, sizeof(msg), "Failed to get path");
- gf_log (THIS->name, GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
-
- if (bd_op == GF_BD_OP_NEW_BD) {
- ret = dict_get_str (dict, "size", &size);
- if (ret) {
- snprintf (msg, sizeof(msg), "Failed to get size");
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- if (gf_string2bytesize (size, &bytes) < 0) {
- snprintf (msg, sizeof(msg),
- "Invalid size %s, suffix with KB, MB etc",
- size);
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
- } else if (bd_op == GF_BD_OP_SNAPSHOT_BD) {
- ret = dict_get_str (dict, "size", &size);
- if (ret) {
- snprintf (msg, sizeof(msg), "Failed to get size");
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
-
- if (gf_string2bytesize (size, &bytes) < 0) {
- ret = -1;
- snprintf (msg, sizeof(msg),
- "Invalid size %s, suffix with KB, MB etc",
- size);
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
- }
-
- ret = glusterd_volinfo_find (volname, &volinfo);
- if (ret) {
- snprintf (msg, sizeof(msg), "Volume %s does not exist",
- volname);
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- goto out;
- }
-
- ret = glusterd_validate_volume_id (dict, volinfo);
- if (ret)
- goto out;
-
- if (!glusterd_is_volume_started (volinfo)) {
- snprintf (msg, sizeof(msg), "Volume %s is not started",
- volname);
- gf_log ("", GF_LOG_ERROR, "%s", msg);
- *op_errstr = gf_strdup (msg);
- ret = -1;
- goto out;
- }
-
- ret = 0;
-out:
- gf_log ("", GF_LOG_DEBUG, "Returning %d", ret);
- return ret;
-}
-#endif
-
int
glusterd_op_create_volume (dict_t *dict, char **op_errstr)
{
@@ -1409,9 +1450,8 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
char *str = NULL;
char *username = NULL;
char *password = NULL;
-#ifdef HAVE_BD_XLATOR
- char *device = NULL;
-#endif
+ int caps = 0;
+ char msg[1024] __attribute__((unused)) = {0, };
this = THIS;
GF_ASSERT (this);
@@ -1466,12 +1506,6 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
goto out;
}
-#ifdef HAVE_BD_XLATOR
- ret = dict_get_str (dict, "device", &device);
- if (!ret)
- volinfo->backend = GD_VOL_BK_BD;
-#endif
-
/* replica-count 1 means, no replication, file is in one brick only */
volinfo->replica_count = 1;
/* stripe-count 1 means, no striping, file is present as a whole */
@@ -1580,6 +1614,7 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
if (count)
brick = strtok_r (brick_list+1, " \n", &saveptr);
+ caps = CAPS_BD | CAPS_THIN | CAPS_OFFLOAD_COPY | CAPS_OFFLOAD_SNAPSHOT;
while ( i <= count) {
ret = glusterd_brickinfo_new_from_brick (brick, &brickinfo);
@@ -1592,11 +1627,36 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
brickinfo->hostname, brickinfo->path);
goto out;
}
+
+#ifdef HAVE_BD_XLATOR
+ if (!uuid_compare (brickinfo->uuid, MY_UUID)) {
+ if (brickinfo->vg[0]) {
+ ret = glusterd_is_valid_vg (brickinfo, 0, msg);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "%s",
+ msg);
+ goto out;
+ }
+
+ /* if anyone of the brick does not have thin
+ support, disable it for entire volume */
+ caps &= brickinfo->caps;
+
+
+ } else
+ caps = 0;
+ }
+#endif
+
list_add_tail (&brickinfo->brick_list, &volinfo->bricks);
brick = strtok_r (NULL, " \n", &saveptr);
i++;
}
+ gd_update_volume_op_versions (volinfo);
+
+ volinfo->caps = caps;
+
ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
if (ret) {
glusterd_store_delete_volume (volinfo);
@@ -1613,6 +1673,7 @@ glusterd_op_create_volume (dict_t *dict, char **op_errstr)
volinfo->rebal.defrag_status = 0;
list_add_tail (&volinfo->vol_list, &priv->volumes);
vol_added = _gf_true;
+
out:
GF_FREE(free_ptr);
if (!vol_added && volinfo)
@@ -1646,7 +1707,10 @@ glusterd_op_start_volume (dict_t *dict, char **op_errstr)
list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
ret = glusterd_brick_start (volinfo, brickinfo, _gf_true);
- if (ret)
+ /* If 'force' try to start all bricks regardless of success or
+ * failure
+ */
+ if (!(flags & GF_CLI_FLAG_OP_FORCE) && ret)
goto out;
}
@@ -1663,6 +1727,47 @@ out:
return ret;
}
+int
+glusterd_stop_volume (glusterd_volinfo_t *volinfo)
+{
+ int ret = -1;
+ glusterd_brickinfo_t *brickinfo = NULL;
+ xlator_t *this = NULL;
+
+ this = THIS;
+ GF_ASSERT (this);
+
+ GF_VALIDATE_OR_GOTO (this->name, volinfo, out);
+
+ list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
+ ret = glusterd_brick_stop (volinfo, brickinfo, _gf_false);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to stop "
+ "brick (%s)", brickinfo->path);
+ goto out;
+ }
+ }
+
+ glusterd_set_volume_status (volinfo, GLUSTERD_STATUS_STOPPED);
+
+ ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to store volinfo of "
+ "%s volume", volinfo->volname);
+ goto out;
+ }
+
+ ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to notify graph "
+ "change for %s volume", volinfo->volname);
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
int
glusterd_op_stop_volume (dict_t *dict)
@@ -1671,7 +1776,6 @@ glusterd_op_stop_volume (dict_t *dict)
int flags = 0;
char *volname = NULL;
glusterd_volinfo_t *volinfo = NULL;
- glusterd_brickinfo_t *brickinfo = NULL;
xlator_t *this = NULL;
this = THIS;
@@ -1688,19 +1792,12 @@ glusterd_op_stop_volume (dict_t *dict)
goto out;
}
- list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) {
- ret = glusterd_brick_stop (volinfo, brickinfo, _gf_false);
- if (ret)
- goto out;
- }
-
- glusterd_set_volume_status (volinfo, GLUSTERD_STATUS_STOPPED);
-
- ret = glusterd_store_volinfo (volinfo, GLUSTERD_VOLINFO_VER_AC_INCREMENT);
- if (ret)
+ ret = glusterd_stop_volume (volinfo);
+ if (ret) {
+ gf_log (this->name, GF_LOG_ERROR, "Failed to stop %s volume",
+ volname);
goto out;
-
- ret = glusterd_nodesvcs_handle_graph_change (volinfo);
+ }
out:
return ret;
}
@@ -1852,7 +1949,9 @@ glusterd_clearlocks_unmount (glusterd_volinfo_t *volinfo, char *mntpt)
runner_add_args (&runner, "/bin/umount", "-f", NULL);
runner_argprintf (&runner, "%s", mntpt);
+ synclock_unlock (&priv->big_lock);
ret = runner_run (&runner);
+ synclock_lock (&priv->big_lock);
if (ret) {
ret = 0;
gf_log ("", GF_LOG_DEBUG,
@@ -1924,7 +2023,9 @@ glusterd_clearlocks_mount (glusterd_volinfo_t *volinfo, char **xl_opts,
}
runner_argprintf (&runner, "%s", mntpt);
+ synclock_unlock (&priv->big_lock);
ret = runner_run (&runner);
+ synclock_lock (&priv->big_lock);
if (ret) {
gf_log (THIS->name, GF_LOG_DEBUG,
"Could not start glusterfs");