diff options
author | Ravishankar N <ravishankar@redhat.com> | 2016-04-29 17:41:18 +0530 |
---|---|---|
committer | Atin Mukherjee <amukherj@redhat.com> | 2016-05-19 09:40:04 -0700 |
commit | 61c1b2cee973b11897a37d508910012e616033bc (patch) | |
tree | fbaf9adc16738884c9c073677ac01704f8f6f560 | |
parent | 6f1a71210ee0a0f3741b5ece3b5240c1e4b5fa6d (diff) |
cli/glusterd: add/remove brick fixes for arbiter volumes
1.Provide a command to convert replica 2 volumes to arbiter volumes.
Existing self-heal logic will automatically heal the file hierarchy into
the arbiter brick, the progress of which can be monitored using the
heal info command.
Syntax: gluster volume add-brick <VOLNAME> replica 3 arbiter 1
<HOST:arbiter-brick-path>
2. Add checks when removing bricks from arbiter volumes:
- When converting from arbiter to replica 2 volume, allow only arbiter
brick to be removed.
- When converting from arbiter to plain distribute volume, allow only if
arbiter is one of the bricks that is removed.
3. Some clean-up:
- Use GD_MSG_DICT_GET_SUCCESS instead of GD_MSG_DICT_GET_FAILED to
log messages that are not failures.
- Remove unused variable `brick_list`
- Move 'brickinfo->group' related functions to glusted-utils.
Change-Id: Ic87b8c7e4d7d3ab03f93e7b9f372b314d80947ce
BUG: 1318289
Signed-off-by: Ravishankar N <ravishankar@redhat.com>
Reviewed-on: http://review.gluster.org/14126
Smoke: Gluster Build System <jenkins@build.gluster.com>
CentOS-regression: Gluster Build System <jenkins@build.gluster.com>
NetBSD-regression: NetBSD Build System <jenkins@build.gluster.org>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
-rw-r--r-- | cli/src/cli-cmd-parser.c | 17 | ||||
-rw-r--r-- | cli/src/cli-cmd-volume.c | 3 | ||||
-rw-r--r-- | tests/basic/afr/arbiter-add-brick.t | 60 | ||||
-rw-r--r-- | tests/basic/afr/arbiter-remove-brick.t | 36 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 171 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-messages.h | 11 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-store.c | 1 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.c | 37 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-utils.h | 6 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volgen.c | 41 | ||||
-rw-r--r-- | xlators/mgmt/glusterd/src/glusterd-volgen.h | 3 |
11 files changed, 319 insertions, 67 deletions
diff --git a/cli/src/cli-cmd-parser.c b/cli/src/cli-cmd-parser.c index 6b2d6dfe7da..b062adfab0f 100644 --- a/cli/src/cli-cmd-parser.c +++ b/cli/src/cli-cmd-parser.c @@ -1583,6 +1583,7 @@ cli_cmd_volume_add_brick_parse (const char **words, int wordcount, char *opwords_cl[] = { "replica", "stripe", NULL }; gf1_cluster_type type = GF_CLUSTER_TYPE_NONE; int count = 1; + int arbiter_count = 0; char *w = NULL; int index; gf_boolean_t is_force = _gf_false; @@ -1635,6 +1636,22 @@ cli_cmd_volume_add_brick_parse (const char **words, int wordcount, if (ret) goto out; index = 5; + if (words[index] && !strcmp (words[index], "arbiter")) { + arbiter_count = strtol (words[6], NULL, 0); + if (arbiter_count != 1 || count != 3) { + cli_err ("For arbiter configuration, replica " + "count must be 3 and arbiter count " + "must be 1. The 3rd brick of the " + "replica will be the arbiter"); + ret = -1; + goto out; + } + ret = dict_set_int32 (dict, "arbiter-count", + arbiter_count); + if (ret) + goto out; + index = 7; + } } else if ((strcmp (w, "stripe")) == 0) { type = GF_CLUSTER_TYPE_STRIPE; count = strtol (words[4], NULL, 0); diff --git a/cli/src/cli-cmd-volume.c b/cli/src/cli-cmd-volume.c index bc4f42c5967..689eba6d281 100644 --- a/cli/src/cli-cmd-volume.c +++ b/cli/src/cli-cmd-volume.c @@ -2586,7 +2586,8 @@ struct cli_cmd volume_cmds[] = { "{start|stop|commit} [force]"}, #endif - { "volume add-brick <VOLNAME> [<stripe|replica> <COUNT>] <NEW-BRICK> ... [force]", + { "volume add-brick <VOLNAME> [<stripe|replica> <COUNT> " + "[arbiter <COUNT>]] <NEW-BRICK> ... [force]", cli_cmd_volume_add_brick_cbk, "add brick to volume <VOLNAME>"}, diff --git a/tests/basic/afr/arbiter-add-brick.t b/tests/basic/afr/arbiter-add-brick.t new file mode 100644 index 00000000000..357f59b0852 --- /dev/null +++ b/tests/basic/afr/arbiter-add-brick.t @@ -0,0 +1,60 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +TEST glusterd +TEST pidof glusterd + +#Create replica 2 volume and create file/dir. +TEST $CLI volume create $V0 replica 2 $H0:$B0/${V0}{0,1} +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0; +TEST mkdir $M0/dir1 +TEST dd if=/dev/urandom of=$M0/file1 bs=1024 count=1 + +#Kill second brick and perform I/O to have pending heals. +TEST kill_brick $V0 $H0 $B0/${V0}1 +TEST mkdir $M0/dir2 +TEST dd if=/dev/urandom of=$M0/file1 bs=1024 count=1024 + + +#syntax check for add-brick. +TEST ! $CLI volume add-brick $V0 replica 2 arbiter 1 $H0:$B0/${V0}2 +TEST ! $CLI volume add-brick $V0 replica 3 arbiter 2 $H0:$B0/${V0}2 + +#convert replica 2 to arbiter volume +TEST $CLI volume add-brick $V0 replica 3 arbiter 1 $H0:$B0/${V0}2 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}2 + +#Heal files +TEST $CLI volume start $V0 force +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" brick_up_status $V0 $H0 $B0/${V0}1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "Y" glustershd_up_status +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 0 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 1 +EXPECT_WITHIN $CHILD_UP_TIMEOUT "1" afr_child_up_status_in_shd $V0 2 +EXPECT_WITHIN $HEAL_TIMEOUT "0" get_pending_heal_count $V0 + +#Perform I/O after add-brick +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 0 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 1 +EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" afr_child_up_status $V0 2 +TEST mkdir $M0/dir3 +TEST dd if=/dev/urandom of=$M0/file2 bs=1024 count=1024 + +# File hierarchy must be same in all 3 bricks. +TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}2 | sort) +TEST diff <(ls $B0/${V0}1 | sort) <(ls $B0/${V0}2 | sort) + +#Mount serves the correct file size +EXPECT "1048576" stat -c %s $M0/file1 +EXPECT "1048576" stat -c %s $M0/file2 + +#Check file size in arbiter brick +EXPECT "0" stat -c %s $B0/${V0}2/file1 +EXPECT "0" stat -c %s $B0/${V0}2/file2 + +TEST force_umount $M0 +cleanup; diff --git a/tests/basic/afr/arbiter-remove-brick.t b/tests/basic/afr/arbiter-remove-brick.t new file mode 100644 index 00000000000..5a6daa95cfd --- /dev/null +++ b/tests/basic/afr/arbiter-remove-brick.t @@ -0,0 +1,36 @@ +#!/bin/bash +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +cleanup; + +TEST glusterd +TEST pidof glusterd + +#Create arbiter volume. +TEST $CLI volume create $V0 replica 3 arbiter 1 $H0:$B0/${V0}{0,1,2} +EXPECT "1 x \(2 \+ 1\) = 3" volinfo_field $V0 "Number of Bricks" +TEST $CLI volume set $V0 performance.stat-prefetch off +TEST $CLI volume start $V0 +TEST glusterfs --volfile-id=$V0 --volfile-server=$H0 $M0; + +#syntax check for remove-brick. +TEST ! $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}0 force +TEST ! $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}1 force + +#convert to replica 2 volume +TEST $CLI volume remove-brick $V0 replica 2 $H0:$B0/${V0}2 force +EXPECT "1 x 2 = 2" volinfo_field $V0 "Number of Bricks" + +TEST mkdir $M0/dir +TEST dd if=/dev/urandom of=$M0/file bs=1024 count=1024 +TEST diff <(ls $B0/${V0}0 | sort) <(ls $B0/${V0}1 | sort) + +#Mount serves the correct file size +EXPECT "1048576" stat -c %s $M0/file + +#Check file size in bricks +EXPECT "1048576" stat -c %s $B0/${V0}0/file +EXPECT "1048576" stat -c %s $B0/${V0}1/file + +TEST force_umount $M0 +cleanup; diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c index 5b2f559b546..a90114ab2b3 100644 --- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c +++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c @@ -219,8 +219,8 @@ out: static int gd_addbr_validate_replica_count (glusterd_volinfo_t *volinfo, int replica_count, - int total_bricks, int *type, char *err_str, - int err_len) + int arbiter_count, int total_bricks, int *type, + char *err_str, int err_len) { int ret = -1; @@ -283,6 +283,14 @@ gd_addbr_validate_replica_count (glusterd_volinfo_t *volinfo, int replica_count, goto out; } if (replica_count == volinfo->replica_count) { + if (arbiter_count && !volinfo->arbiter_count) { + snprintf (err_str, err_len, + "Cannot convert replica 3 volume " + "to arbiter volume."); + gf_msg (THIS->name, GF_LOG_ERROR, EINVAL, + GD_MSG_INVALID_ENTRY, "%s", err_str); + goto out; + } if (!(total_bricks % volinfo->dist_leaf_count)) { ret = 1; goto out; @@ -413,6 +421,7 @@ __glusterd_handle_add_brick (rpcsvc_request_t *req) xlator_t *this = NULL; int total_bricks = 0; int32_t replica_count = 0; + int32_t arbiter_count = 0; int32_t stripe_count = 0; int type = 0; glusterd_conf_t *conf = NULL; @@ -486,14 +495,21 @@ __glusterd_handle_add_brick (rpcsvc_request_t *req) ret = dict_get_int32 (dict, "replica-count", &replica_count); if (!ret) { gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_FAILED, "replica-count is %d", + GD_MSG_DICT_GET_SUCCESS, "replica-count is %d", replica_count); } + ret = dict_get_int32 (dict, "arbiter-count", &arbiter_count); + if (!ret) { + gf_msg (this->name, GF_LOG_INFO, errno, + GD_MSG_DICT_GET_SUCCESS, "arbiter-count is %d", + arbiter_count); + } + ret = dict_get_int32 (dict, "stripe-count", &stripe_count); if (!ret) { gf_msg (this->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_FAILED, "stripe-count is %d", + GD_MSG_DICT_GET_SUCCESS, "stripe-count is %d", stripe_count); } @@ -602,7 +618,7 @@ __glusterd_handle_add_brick (rpcsvc_request_t *req) } ret = gd_addbr_validate_replica_count (volinfo, replica_count, - total_bricks, + arbiter_count, total_bricks, &type, err_str, sizeof (err_str)); if (ret == -1) { @@ -791,6 +807,71 @@ glusterd_set_detach_bricks(dict_t *dict, glusterd_volinfo_t *volinfo) return hot_brick_num; } +static int +glusterd_remove_brick_validate_arbiters (glusterd_volinfo_t *volinfo, + int32_t count, int32_t replica_count, + glusterd_brickinfo_t **brickinfo_list, + char *err_str, size_t err_len) +{ + int i = 0; + int ret = 0; + glusterd_brickinfo_t *brickinfo = NULL; + glusterd_brickinfo_t *last = NULL; + char *arbiter_array = NULL; + + if ((volinfo->type != GF_CLUSTER_TYPE_REPLICATE) && + (volinfo->type != GF_CLUSTER_TYPE_STRIPE_REPLICATE)) + goto out; + + if (!replica_count || !volinfo->arbiter_count) + goto out; + + if (replica_count == 2) { + /* If it is an arbiter to replica 2 conversion, only permit + * removal of the arbiter brick.*/ + for (i = 0; i < count; i++) { + brickinfo = brickinfo_list[i]; + last = get_last_brick_of_brick_group (volinfo, + brickinfo); + if (last != brickinfo) { + snprintf (err_str, err_len, "Remove arbiter " + "brick(s) only when converting from " + "arbiter to replica 2 subvolume."); + ret = -1; + goto out; + } + } + } else if (replica_count == 1) { + /* If it is an arbiter to plain distribute conversion, in every + * replica subvol, the arbiter has to be one of the bricks that + * are removed. */ + arbiter_array = GF_CALLOC (volinfo->subvol_count, + sizeof (*arbiter_array), + gf_common_mt_char); + if (!arbiter_array) + return -1; + for (i = 0; i < count; i++) { + brickinfo = brickinfo_list[i]; + last = get_last_brick_of_brick_group (volinfo, + brickinfo); + if (last == brickinfo) + arbiter_array[brickinfo->group] = 1; + } + for (i = 0; i < volinfo->subvol_count; i++) + if (!arbiter_array[i]) { + snprintf (err_str, err_len, "Removed bricks " + "must contain arbiter when converting" + " to plain distrubute."); + ret = -1; + break; + } + GF_FREE (arbiter_array); + } + +out: + return ret; +} + int __glusterd_handle_remove_brick (rpcsvc_request_t *req) { @@ -800,10 +881,10 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req) int32_t count = 0; char *brick = NULL; char key[256] = {0,}; - char *brick_list = NULL; int i = 1; glusterd_volinfo_t *volinfo = NULL; glusterd_brickinfo_t *brickinfo = NULL; + glusterd_brickinfo_t **brickinfo_list = NULL; int *subvols = NULL; char err_str[2048] = {0}; gf_cli_rsp rsp = {0,}; @@ -998,16 +1079,6 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req) } } - brick_list = GF_MALLOC (120000 * sizeof(*brick_list),gf_common_mt_char); - - if (!brick_list) { - ret = -1; - goto out; - } - - - strcpy (brick_list, " "); - /* subvol match is not required for tiered volume*/ if ((volinfo->type != GF_CLUSTER_TYPE_NONE) && (volinfo->type != GF_CLUSTER_TYPE_TIER) && @@ -1020,6 +1091,13 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req) if (volinfo->type == GF_CLUSTER_TYPE_TIER) count = glusterd_set_detach_bricks(dict, volinfo); + brickinfo_list = GF_CALLOC (count, sizeof (*brickinfo_list), + gf_common_mt_pointer); + if (!brickinfo_list) { + ret = -1; + goto out; + } + while ( i <= count) { snprintf (key, sizeof (key), "brick%d", i); ret = dict_get_str (dict, key, &brick); @@ -1044,8 +1122,7 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req) GD_MSG_BRICK_NOT_FOUND, "%s", err_str); goto out; } - strcat(brick_list, brick); - strcat(brick_list, " "); + brickinfo_list[i-1] = brickinfo; i++; if ((volinfo->type == GF_CLUSTER_TYPE_NONE) || @@ -1072,6 +1149,14 @@ __glusterd_handle_remove_brick (rpcsvc_request_t *req) goto out; } + ret = glusterd_remove_brick_validate_arbiters (volinfo, count, + replica_count, + brickinfo_list, + err_str, + sizeof (err_str)); + if (ret) + goto out; + ret = glusterd_op_begin_synctask (req, GD_OP_REMOVE_BRICK, dict); out: @@ -1092,8 +1177,8 @@ out: } - if (brick_list) - GF_FREE (brick_list); + if (brickinfo_list) + GF_FREE (brickinfo_list); subvol_matcher_destroy (subvols); free (cli_req.dict.dict_val); //its malloced by xdr @@ -1224,6 +1309,7 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count, int32_t ret = -1; int32_t stripe_count = 0; int32_t replica_count = 0; + int32_t arbiter_count = 0; int32_t type = 0; glusterd_brickinfo_t *brickinfo = NULL; glusterd_gsync_status_temp_t param = {0, }; @@ -1256,18 +1342,23 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count, ret = dict_get_int32 (dict, "stripe-count", &stripe_count); if (!ret) gf_msg (THIS->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_FAILED, + GD_MSG_DICT_GET_SUCCESS, "stripe-count is set %d", stripe_count); ret = dict_get_int32 (dict, "replica-count", &replica_count); if (!ret) gf_msg (THIS->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_FAILED, + GD_MSG_DICT_GET_SUCCESS, "replica-count is set %d", replica_count); + ret = dict_get_int32 (dict, "arbiter-count", &arbiter_count); + if (!ret) + gf_msg (THIS->name, GF_LOG_INFO, errno, + GD_MSG_DICT_GET_SUCCESS, + "arbiter-count is set %d", arbiter_count); ret = dict_get_int32 (dict, "type", &type); if (!ret) gf_msg (THIS->name, GF_LOG_INFO, errno, - GD_MSG_DICT_GET_FAILED, + GD_MSG_DICT_GET_SUCCESS, "type is set %d, need to change it", type); } @@ -1328,6 +1419,9 @@ glusterd_op_perform_add_bricks (glusterd_volinfo_t *volinfo, int32_t count, if (replica_count) { volinfo->replica_count = replica_count; } + if (arbiter_count) { + volinfo->arbiter_count = arbiter_count; + } if (stripe_count) { volinfo->stripe_count = stripe_count; } @@ -1529,6 +1623,7 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict) char *volname = NULL; int count = 0; int replica_count = 0; + int arbiter_count = 0; int i = 0; int32_t local_brick_count = 0; char *bricks = NULL; @@ -1578,6 +1673,12 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict) "Unable to get replica count"); } + ret = dict_get_int32 (dict, "arbiter-count", &arbiter_count); + if (ret) { + gf_msg_debug (THIS->name, 0, + "No arbiter count present in the dict"); + } + if (replica_count > 0) { ret = op_version_check (this, GD_OP_VER_PERSISTENT_AFR_XATTRS, msg, sizeof(msg)); @@ -1589,10 +1690,10 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict) } } - /* Do not allow add-brick for stopped volumes when replica-count - * is being increased. - */ if (glusterd_is_volume_replicate (volinfo)) { + /* Do not allow add-brick for stopped volumes when replica-count + * is being increased. + */ if (conf->op_version >= GD_OP_VERSION_3_7_10 && !dict_get (dict, "attach-tier") && replica_count && @@ -1606,6 +1707,20 @@ glusterd_op_stage_add_brick (dict_t *dict, char **op_errstr, dict_t *rsp_dict) *op_errstr = gf_strdup (msg); goto out; } + /* op-version check for replica 2 to arbiter conversion. If we + * dont have this check, an older peer added as arbiter brick + * will not have the arbiter xlator in its volfile. */ + if ((conf->op_version < GD_OP_VERSION_3_8_0) && + (arbiter_count == 1) && (replica_count == 3)) { + ret = -1; + snprintf (msg, sizeof (msg), "Cluster op-version must " + "be >= 30800 to add arbiter brick to a " + "replica 2 volume."); + gf_msg (THIS->name, GF_LOG_ERROR, 0, + GD_MSG_BRICK_ADD_FAIL, "%s", msg); + *op_errstr = gf_strdup (msg); + goto out; + } } if (conf->op_version > GD_OP_VERSION_3_7_5 && @@ -2689,6 +2804,10 @@ glusterd_op_remove_brick (dict_t *dict, char **op_errstr) volinfo->replica_count, replica_count, volinfo->volname); volinfo->replica_count = replica_count; + /* A reduction in replica count implies an arbiter volume + * earlier is now no longer one. */ + if (volinfo->arbiter_count) + volinfo->arbiter_count = 0; volinfo->sub_count = replica_count; volinfo->dist_leaf_count = glusterd_get_dist_leaf_count (volinfo); diff --git a/xlators/mgmt/glusterd/src/glusterd-messages.h b/xlators/mgmt/glusterd/src/glusterd-messages.h index 9fa28611f19..ba40b8f7628 100644 --- a/xlators/mgmt/glusterd/src/glusterd-messages.h +++ b/xlators/mgmt/glusterd/src/glusterd-messages.h @@ -41,7 +41,7 @@ #define GLUSTERD_COMP_BASE GLFS_MSGID_GLUSTERD -#define GLFS_NUM_MESSAGES 577 +#define GLFS_NUM_MESSAGES 578 #define GLFS_MSGID_END (GLUSTERD_COMP_BASE + GLFS_NUM_MESSAGES + 1) /* Messaged with message IDs */ @@ -4647,7 +4647,6 @@ * on its own and try to restart the brick with a new port * @recommendedaction Ensure the new port is not blocked by firewall */ - #define GD_MSG_RETRY_WITH_NEW_PORT (GLUSTERD_COMP_BASE + 575) /*! @@ -4666,6 +4665,14 @@ */ #define GD_MSG_SLAVE_VOL_PARSE_FAIL (GLUSTERD_COMP_BASE + 577) +/*! + * @messageid + * @diagnosis + * @recommendedaction + * + */ +#define GD_MSG_DICT_GET_SUCCESS (GLUSTERD_COMP_BASE + 578) + /*------------*/ #define glfs_msg_end_x GLFS_MSGID_END, "Invalid: End of messages" #endif /* !_GLUSTERD_MESSAGES_H_ */ diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c index 113902d2e6a..aa2be17bd9a 100644 --- a/xlators/mgmt/glusterd/src/glusterd-store.c +++ b/xlators/mgmt/glusterd/src/glusterd-store.c @@ -2444,6 +2444,7 @@ glusterd_store_retrieve_bricks (glusterd_volinfo_t *volinfo) brick_count++; } + assign_brick_groups (volinfo); ret = gf_store_iter_destroy (tmpiter); if (ret) goto out; diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c index 89c6d2dfa18..19173a4d015 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.c +++ b/xlators/mgmt/glusterd/src/glusterd-utils.c @@ -11418,3 +11418,40 @@ out: gf_msg_debug ("glusterd", 0, "Returning with ret"); return ret; } + +void +assign_brick_groups (glusterd_volinfo_t *volinfo) +{ + glusterd_brickinfo_t *brickinfo = NULL; + uint16_t group_num = 0; + int in_group = 0; + + list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { + brickinfo->group = group_num; + if (++in_group >= volinfo->replica_count) { + in_group = 0; + ++group_num; + } + } +} + +glusterd_brickinfo_t* +get_last_brick_of_brick_group (glusterd_volinfo_t *volinfo, + glusterd_brickinfo_t *brickinfo) +{ + glusterd_brickinfo_t *next = NULL; + glusterd_brickinfo_t *last = NULL; + int ret = -1; + + last = brickinfo; + for (;;) { + next = list_next (last, &volinfo->bricks, + glusterd_brickinfo_t, brick_list); + if (!next || (next->group != brickinfo->group)) { + break; + } + last = next; + } + + return last; +} diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.h b/xlators/mgmt/glusterd/src/glusterd-utils.h index e6436eff419..5f4bb6d251b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-utils.h +++ b/xlators/mgmt/glusterd/src/glusterd-utils.h @@ -708,4 +708,10 @@ int glusterd_handle_replicate_brick_ops (glusterd_volinfo_t *volinfo, glusterd_brickinfo_t *brickinfo, glusterd_op_t op); +void +assign_brick_groups (glusterd_volinfo_t *volinfo); + +glusterd_brickinfo_t* +get_last_brick_of_brick_group (glusterd_volinfo_t *volinfo, + glusterd_brickinfo_t *brickinfo); #endif diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.c b/xlators/mgmt/glusterd/src/glusterd-volgen.c index 4321ddb7ddb..231117750f6 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volgen.c +++ b/xlators/mgmt/glusterd/src/glusterd-volgen.c @@ -1547,16 +1547,8 @@ brick_graph_add_arbiter (volgen_graph_t *graph, glusterd_volinfo_t *volinfo, if (volinfo->arbiter_count != 1) return 0; - /* Find the last brick in the same group. */ - last = brickinfo; - for (;;) { - next = list_next (last, &volinfo->bricks, - glusterd_brickinfo_t, brick_list); - if (!next || (next->group != brickinfo->group)) { - break; - } - last = next; - } + /* Add arbiter only if it is the last (i.e. 3rd) brick. */ + last = get_last_brick_of_brick_group (volinfo, brickinfo); if (last != brickinfo) return 0; @@ -1623,22 +1615,6 @@ out: return ret; } -void -assign_brick_groups (glusterd_volinfo_t *volinfo) -{ - glusterd_brickinfo_t *brickinfo = NULL; - uint16_t group_num = 0; - int in_group = 0; - - list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - brickinfo->group = group_num; - if (++in_group >= volinfo->replica_count) { - in_group = 0; - ++group_num; - } - } -} - static int brick_graph_add_changelog (volgen_graph_t *graph, glusterd_volinfo_t *volinfo, dict_t *set_dict, glusterd_brickinfo_t *brickinfo) @@ -5304,23 +5280,18 @@ get_parent_vol_tstamp_file (char *filename, glusterd_volinfo_t *volinfo) } void -assign_groups (glusterd_volinfo_t *volinfo) +assign_jbr_uuids (glusterd_volinfo_t *volinfo) { glusterd_brickinfo_t *brickinfo = NULL; - uint16_t group_num = 0; int in_group = 0; uuid_t tmp_uuid; list_for_each_entry (brickinfo, &volinfo->bricks, brick_list) { - if (in_group == 0) { + if (in_group == 0) gf_uuid_generate(tmp_uuid); - } - brickinfo->group = group_num; gf_uuid_copy(brickinfo->jbr_uuid, tmp_uuid); - if (++in_group >= volinfo->replica_count) { + if (++in_group >= volinfo->replica_count) in_group = 0; - ++group_num; - } } } @@ -5393,7 +5364,7 @@ generate_brick_volfiles (glusterd_volinfo_t *volinfo) } if (glusterd_volinfo_get_boolean(volinfo, "cluster.jbr") > 0) { - assign_groups(volinfo); + assign_jbr_uuids(volinfo); } ret = glusterd_volume_brick_for_each (volinfo, NULL, diff --git a/xlators/mgmt/glusterd/src/glusterd-volgen.h b/xlators/mgmt/glusterd/src/glusterd-volgen.h index df6d9e5da7e..d8dd70bc33b 100644 --- a/xlators/mgmt/glusterd/src/glusterd-volgen.h +++ b/xlators/mgmt/glusterd/src/glusterd-volgen.h @@ -230,9 +230,6 @@ glusterd_check_voloption_flags (char *key, int32_t flags); gf_boolean_t glusterd_is_valid_volfpath (char *volname, char *brick); -void -assign_brick_groups (glusterd_volinfo_t *volinfo); - int generate_brick_volfiles (glusterd_volinfo_t *volinfo); |