summaryrefslogtreecommitdiffstats
path: root/tests/functional/heketi/test_volume_creation.py
blob: 7be97ff5848f85979c7ff8d84a5ba2c85f077ac5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
from glusto.core import Glusto as g
from glustolibs.gluster import volume_ops

from openshiftstoragelibs.baseclass import BaseClass
from openshiftstoragelibs import heketi_ops
from openshiftstoragelibs import podcmd


class TestVolumeCreationTestCases(BaseClass):
    """
    Class for volume creation related test cases
    """

    @podcmd.GlustoPod()
    def test_create_heketi_volume(self):
        """Test heketi volume creation and background gluster validation"""

        hosts = []
        gluster_servers = []
        brick_info = []

        output_dict = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 10, json=True)

        self.assertNotEqual(output_dict, False,
                            "Volume could not be created")

        volume_name = output_dict["name"]
        volume_id = output_dict["id"]

        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, volume_id)

        self.assertEqual(output_dict["durability"]
                         ["replicate"]["replica"], 3,
                         "Volume %s is not replica 3" % volume_id)

        self.assertEqual(output_dict["size"], 10,
                         "Volume %s is not of intended size"
                         % volume_id)

        mount_node = (output_dict["mount"]["glusterfs"]
                      ["device"].strip().split(":")[0])
        hosts.append(mount_node)

        for backup_volfile_server in (output_dict["mount"]["glusterfs"]
                                      ["options"]["backup-volfile-servers"]
                                      .strip().split(",")):
            hosts.append(backup_volfile_server)

        for gluster_server in self.gluster_servers:
            gluster_servers.append(g.config["gluster_servers"]
                                   [gluster_server]["storage"])

        self.assertEqual(set(hosts), set(gluster_servers),
                         "Hosts and gluster servers not matching for %s"
                         % volume_id)

        volume_info = volume_ops.get_volume_info(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(volume_info, "get_volume_info returned None")

        volume_status = volume_ops.get_volume_status(
            'auto_get_gluster_endpoint', volume_name)
        self.assertIsNotNone(
            volume_status, "get_volume_status returned None")

        self.assertEqual(int(volume_info[volume_name]["status"]), 1,
                         "Volume %s status down" % volume_id)
        for brick_details in volume_info[volume_name]["bricks"]["brick"]:
            brick_info.append(brick_details["name"])

        self.assertNotEqual(
            brick_info, [], "Brick details are empty for %s" % volume_name)

        for brick in brick_info:
            brick_data = brick.strip().split(":")
            brick_ip = brick_data[0]
            brick_name = brick_data[1]
            self.assertEqual(int(volume_status
                             [volume_name][brick_ip]
                             [brick_name]["status"]), 1,
                             "Brick %s is not up" % brick_name)

    def test_volume_creation_no_free_devices(self):
        """Validate heketi error is returned when no free devices available"""
        node, server_url = self.heketi_client_node, self.heketi_server_url

        # Get nodes info
        node_id_list = heketi_ops.heketi_node_list(node, server_url)
        node_info_list = []
        for node_id in node_id_list[0:3]:
            node_info = heketi_ops.heketi_node_info(
                node, server_url, node_id, json=True)
            node_info_list.append(node_info)

        # Disable 4th and other nodes
        for node_id in node_id_list[3:]:
            heketi_ops.heketi_node_disable(node, server_url, node_id)
            self.addCleanup(
                heketi_ops.heketi_node_enable, node, server_url, node_id)

        # Disable second and other devices on the first 3 nodes
        for node_info in node_info_list[0:3]:
            devices = node_info["devices"]
            self.assertTrue(
                devices, "Node '%s' does not have devices." % node_info["id"])
            if devices[0]["state"].strip().lower() != "online":
                self.skipTest("Test expects first device to be enabled.")
            if len(devices) < 2:
                continue
            for device in node_info["devices"][1:]:
                out = heketi_ops.heketi_device_disable(
                    node, server_url, device["id"])
                self.assertTrue(
                    out, "Failed to disable the device %s" % device["id"])
                self.addCleanup(
                    heketi_ops.heketi_device_enable,
                    node, server_url, device["id"])

        # Calculate common available space
        available_spaces = [
            int(node_info["devices"][0]["storage"]["free"])
            for n in node_info_list[0:3]]
        min_space_gb = int(min(available_spaces) / 1024**2)
        self.assertGreater(min_space_gb, 3, "Not enough available free space.")

        # Create first small volume
        vol = heketi_ops.heketi_volume_create(node, server_url, 1, json=True)
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol["id"])

        # Try to create second volume getting "no free space" error
        try:
            vol_fail = heketi_ops.heketi_volume_create(
                node, server_url, min_space_gb, json=True)
        except AssertionError:
            g.log.info("Volume was not created as expected.")
        else:
            self.addCleanup(
                heketi_ops.heketi_volume_delete, self.heketi_client_node,
                self.heketi_server_url, vol_fail["bricks"][0]["volume"])
            self.assertFalse(
                vol_fail,
                "Volume should have not been created. Out: %s" % vol_fail)

    @podcmd.GlustoPod()
    def test_volume_create_replica_2(self):
        """Validate creation of a replica 2 volume"""
        vol_create_info = heketi_ops.heketi_volume_create(
            self.heketi_client_node, self.heketi_server_url, 1,
            replica=2, json=True)
        self.addCleanup(
            heketi_ops.heketi_volume_delete, self.heketi_client_node,
            self.heketi_server_url, vol_create_info["id"],
            raise_on_error=True)
        actual_replica = int(
            vol_create_info["durability"]["replicate"]["replica"])
        self.assertEqual(
            actual_replica, 2,
            "Volume '%s' has '%s' as value for replica,"
            " expected 2." % (vol_create_info["id"], actual_replica))
        vol_name = vol_create_info['name']

        # Get gluster volume info
        gluster_vol = volume_ops.get_volume_info(
            'auto_get_gluster_endpoint', volname=vol_name)
        self.assertTrue(
            gluster_vol, "Failed to get volume '%s' info" % vol_name)

        # Check amount of bricks
        brick_amount = len(gluster_vol[vol_name]['bricks']['brick'])
        self.assertEqual(brick_amount, 2,
                         "Brick amount is expected to be 2. "
                         "Actual amount is '%s'" % brick_amount)