blob: d341e62dc513fadffc7da377d932a5f6b2dc3042 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
|
#!/bin/bash
. $(dirname $0)/../../include.rc
. $(dirname $0)/../../volume.rc
. $(dirname $0)/../../tier.rc
. $(dirname $0)/../../cluster.rc
# Creates a tiered volume with pure distribute hot and cold tiers
# Both hot and cold tiers will have an equal number of bricks.
function check_peers {
$CLI_1 peer status | grep 'Peer in Cluster (Connected)' | wc -l
}
function create_dist_tier_vol () {
TEST $CLI_1 volume create $V0 $H1:$B1/${V0} $H2:$B2/${V0} $H3:$B3/${V0}
TEST $CLI_1 volume start $V0
TEST $CLI_1 volume tier $V0 attach $H1:$B1/${V0}_h1 $H2:$B2/${V0}_h2 $H3:$B3/${V0}_h3
}
function tier_daemon_status {
local _VAR=CLI_$1
local xpath_sel='//node[hostname="Tier Daemon"][path="localhost"]/status'
${!_VAR} --xml volume status $V0 \
| xmllint --xpath "$xpath_sel" - \
| sed -n '/.*<status>\([0-9]*\).*/s//\1/p'
}
function detach_xml_status {
$CLI_1 volume tier $V0 detach status --xml | sed -n \
'/.*<opErrstr>Detach tier status successful/p' | wc -l
}
cleanup;
#setup cluster and test volume
TEST launch_cluster 3; # start 3-node virtual cluster
TEST $CLI_1 peer probe $H2; # peer probe server 2 from server 1 cli
TEST $CLI_1 peer probe $H3; # peer probe server 3 from server 1 cli
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
#Create and start a tiered volume
create_dist_tier_vol
########### check failure for older commands #############
TEST ! $CLI_1 volume rebalance $V0 tier status
# failure for older command can be removed in 3.11
##########################################################
#Issue detach tier on the tiered volume
#Will throw error saying detach tier not started
EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status
EXPECT "0" detach_xml_status
#after starting detach tier the detach tier status should display the status
TEST $CLI_1 volume tier $V0 detach start
EXPECT "1" detach_xml_status
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
#kill a node
TEST kill_node 2
#check if we have the rest of the node available printed in the output of detach status
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status_node_down
#check if we have the rest of the node available printed in the output of tier status
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_status_node_down
TEST $glusterd_2;
EXPECT_WITHIN $PROBE_TIMEOUT 2 check_peers;
# Make sure we check that the *bricks* are up and not just the node. >:-(
EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 brick_up_status_1 $V0 $H2 $B2/${V0}
EXPECT_WITHIN $CHILD_UP_TIMEOUT 1 brick_up_status_1 $V0 $H2 $B2/${V0}_h2
# Parsing normal output doesn't work because of line-wrap issues on our
# regression machines, and the version of xmllint there doesn't support --xpath
# so we can't do it that way either. In short, there's no way for us to detect
# when we can stop waiting, so we just have to wait the maximum time every time
# and hope any failures will show up later in the script.
sleep $PROCESS_UP_TIMEOUT
#XPECT_WITHIN $PROCESS_UP_TIMEOUT 1 tier_daemon_status 2
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_status
TEST $CLI_1 volume tier $V0 detach stop
#If detach tier is stopped the detach tier command will fail
EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status
TEST $CLI_1 volume tier $V0 detach start
#wait for the detach to complete
EXPECT_WITHIN $PROCESS_UP_TIMEOUT "1" tier_detach_commit
#If detach tier is committed then the detach status should fail throwing an error
#saying its not a tiered volume
EXPECT "Tier command failed" $CLI_1 volume tier $V0 detach status
########### check failure for older commands #############
TEST ! $CLI_1 volume rebalance $V0 tier start
# failure for older command can be removed in 3.11
##########################################################
cleanup;
#G_TESTDEF_TEST_STATUS_NETBSD7=KNOWN_ISSUE,BUG=000000
|