diff options
| -rw-r--r-- | glusterfsd/src/glusterfsd-mgmt.c | 73 | ||||
| -rw-r--r-- | tests/basic/glusterd/volfile_server_switch.t | 48 | 
2 files changed, 83 insertions, 38 deletions
diff --git a/glusterfsd/src/glusterfsd-mgmt.c b/glusterfsd/src/glusterfsd-mgmt.c index 7af15eba92e..61309f9fc08 100644 --- a/glusterfsd/src/glusterfsd-mgmt.c +++ b/glusterfsd/src/glusterfsd-mgmt.c @@ -1897,49 +1897,46 @@ mgmt_rpc_notify (struct rpc_clnt *rpc, void *mydata, rpc_clnt_event_t event,          switch (event) {          case RPC_CLNT_DISCONNECT: -                if (!ctx->active) { -                        gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, -                                "failed to connect with remote-host: %s (%s)", -                                ctx->cmd_args.volfile_server, -                                strerror (errno)); -                        if (!rpc->disabled) { -                                /* -                                 * Check if dnscache is exhausted for current server -                                 * and continue until cache is exhausted -                                 */ -                                dnscache = rpc_trans->dnscache; -                                if (dnscache && dnscache->next) { -                                        break; -                                } -                        } -                        server = ctx->cmd_args.curr_server; -                        if (server->list.next == &ctx->cmd_args.volfile_servers) { -                                need_term = 1; -                                emval = ENOTCONN; -                                gf_log("glusterfsd-mgmt", GF_LOG_INFO, -                                       "Exhausted all volfile servers"); +                gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, +                        "failed to connect with remote-host: %s (%s)", +                        ctx->cmd_args.volfile_server, strerror (errno)); +                if (!rpc->disabled) { +                        /* +                         * Check if dnscache is exhausted for current server +                         * and continue until cache is exhausted +                         */ +                        dnscache = rpc_trans->dnscache; +                        if (dnscache && dnscache->next) {                                  break;                          } -                        server = list_entry (server->list.next, typeof(*server), -                                             list); -                        ctx->cmd_args.curr_server = server; -                        ctx->cmd_args.volfile_server = server->volfile_server; - -                        ret = dict_set_str (rpc_trans->options, -                                            "remote-host", -                                            server->volfile_server); -                        if (ret != 0) { -                                gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, -                                        "failed to set remote-host: %s", -                                        server->volfile_server); +                } +                server = ctx->cmd_args.curr_server; +                if (server->list.next == &ctx->cmd_args.volfile_servers) { +                        if (!ctx->active)                                  need_term = 1; -                                emval = ENOTCONN; -                                break; -                        } -                        gf_log ("glusterfsd-mgmt", GF_LOG_INFO, -                                "connecting to next volfile server %s", +                        emval = ENOTCONN; +                        gf_log("glusterfsd-mgmt", GF_LOG_INFO, +                               "Exhausted all volfile servers"); +                        break; +                } +                server = list_entry (server->list.next, typeof(*server), list); +                ctx->cmd_args.curr_server = server; +                ctx->cmd_args.volfile_server = server->volfile_server; + +                ret = dict_set_str (rpc_trans->options, "remote-host", +                                    server->volfile_server); +                if (ret != 0) { +                        gf_log ("glusterfsd-mgmt", GF_LOG_ERROR, +                                "failed to set remote-host: %s",                                  server->volfile_server); +                        if (!ctx->active) +                                need_term = 1; +                        emval = ENOTCONN; +                        break;                  } +                gf_log ("glusterfsd-mgmt", GF_LOG_INFO, +                        "connecting to next volfile server %s", +                        server->volfile_server);                  break;          case RPC_CLNT_CONNECT:                  rpc_clnt_set_connected (&((struct rpc_clnt*)ctx->mgmt)->conn); diff --git a/tests/basic/glusterd/volfile_server_switch.t b/tests/basic/glusterd/volfile_server_switch.t new file mode 100644 index 00000000000..0b0e6470244 --- /dev/null +++ b/tests/basic/glusterd/volfile_server_switch.t @@ -0,0 +1,48 @@ +#!/bin/bash + +. $(dirname $0)/../../include.rc +. $(dirname $0)/../../volume.rc +. $(dirname $0)/../../cluster.rc + + +cleanup; + +# * How this test works ? +# 1. create a 3 node cluster +# 2. add them to trusted pool +# 3. create a volume and start +# 4. mount the volume with all 3 backup-volfile servers +# 5. kill glusterd in node 1 +# 6. make changes to volume using node 2, using 'volume set' here +# 7. check whether those notifications are received by client + +TEST launch_cluster 3; + +TEST $CLI_1 peer probe $H1; + +TEST $CLI_1 peer probe $H2; + +TEST $CLI_1 peer probe $H3; + +EXPECT_WITHIN $PROBE_TIMEOUT 2 peer_count + +TEST $CLI_1 volume create $V0 $H1:$B1/$V0 $H2:$B2/$V0 $H3:$B3/$V0 + +TEST $CLI_1 volume start $V0 + +TEST $CLI_1 volume status $V0; + +TEST glusterfs --volfile-id=/$V0 --volfile-server=$H1 --volfile-server=$H2 --volfile-server=$H3  $M0 + +TEST kill_glusterd 1 + +TEST $CLI_2 volume set $V0 performance.io-cache off + +# make sure by this time directory will be created +# TODO: suggest ideal time to wait +sleep 5 + +count=$(find $M0/.meta/graphs/* -maxdepth 0 -type d -iname "*" | wc -l) +TEST [ "$count" -gt "1" ] + +cleanup;  | 
