1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
from openshiftstoragelibs.baseclass import GlusterBlockBaseClass
from openshiftstoragelibs.command import cmd_run
from openshiftstoragelibs.openshift_ops import (
get_pod_name_from_dc,
oc_adm_manage_node,
oc_delete,
oc_get_schedulable_nodes,
wait_for_pod_be_ready,
wait_for_resource_absence,
)
from openshiftstoragelibs.openshift_storage_libs import (
get_iscsi_session,
)
class TestGlusterBlockStability(GlusterBlockBaseClass):
'''Class that contain gluster-block stability TC'''
def setUp(self):
super(TestGlusterBlockStability, self).setUp()
self.node = self.ocp_master_node[0]
def initiator_side_failures(self):
self.create_storage_class()
self.create_and_wait_for_pvc()
# Create app pod
dc_name, pod_name = self.create_dc_with_pvc(self.pvc_name)
iqn, _, node = self.verify_iscsi_sessions_and_multipath(
self.pvc_name, dc_name)
# Make node unschedulabe where pod is running
oc_adm_manage_node(
self.node, '--schedulable=false', nodes=[node])
# Make node schedulabe where pod is running
self.addCleanup(
oc_adm_manage_node, self.node, '--schedulable=true',
nodes=[node])
# Delete pod so it get respun on any other node
oc_delete(self.node, 'pod', pod_name)
wait_for_resource_absence(self.node, 'pod', pod_name)
# Wait for pod to come up
pod_name = get_pod_name_from_dc(self.node, dc_name)
wait_for_pod_be_ready(self.node, pod_name)
# Get the iscsi session from the previous node to verify logout
iscsi = get_iscsi_session(node, iqn, raise_on_error=False)
self.assertFalse(iscsi)
self.verify_iscsi_sessions_and_multipath(self.pvc_name, dc_name)
def test_initiator_side_failures_initiator_and_target_on_different_node(
self):
nodes = oc_get_schedulable_nodes(self.node)
# Get list of all gluster nodes
cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
"-o=custom-columns=:.spec.nodeName")
g_nodes = cmd_run(cmd, self.node)
g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
# Skip test case if required schedulable node count not met
if len(set(nodes) - set(g_nodes)) < 2:
self.skipTest("skipping test case because it needs at least two"
" nodes schedulable")
# Make containerized Gluster nodes unschedulable
if g_nodes:
# Make gluster nodes unschedulable
oc_adm_manage_node(
self.node, '--schedulable=false',
nodes=g_nodes)
# Make gluster nodes schedulable
self.addCleanup(
oc_adm_manage_node, self.node, '--schedulable=true',
nodes=g_nodes)
self.initiator_side_failures()
def test_initiator_side_failures_initiator_and_target_on_same_node(self):
# Note: This test case is supported for containerized gluster only.
nodes = oc_get_schedulable_nodes(self.node)
# Get list of all gluster nodes
cmd = ("oc get pods --no-headers -l glusterfs-node=pod "
"-o=custom-columns=:.spec.nodeName")
g_nodes = cmd_run(cmd, self.node)
g_nodes = g_nodes.split('\n') if g_nodes else g_nodes
# Get the list of nodes other than gluster
o_nodes = list((set(nodes) - set(g_nodes)))
# Skip the test case if it is crs setup
if not g_nodes:
self.skipTest("skipping test case because it is not a "
"containerized gluster setup. "
"This test case is for containerized gluster only.")
# Make other nodes unschedulable
oc_adm_manage_node(
self.node, '--schedulable=false', nodes=o_nodes)
# Make other nodes schedulable
self.addCleanup(
oc_adm_manage_node, self.node, '--schedulable=true', nodes=o_nodes)
self.initiator_side_failures()
|