summaryrefslogtreecommitdiffstats
path: root/tests/functional/glusterd/test_volume_reset.py
blob: 2bb8c4c24a3253b40e07f493bb5d80c2a993f808 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
#  Copyright (C) 2017-2018  Red Hat, Inc. <http://www.redhat.com>
#
#  This program is free software; you can redistribute it and/or modify
#  it under the terms of the GNU General Public License as published by
#  the Free Software Foundation; either version 2 of the License, or
#  any later version.
#
#  This program is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#  GNU General Public License for more details.
#
#  You should have received a copy of the GNU General Public License along
#  with this program; if not, write to the Free Software Foundation, Inc.,
#  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.

""" Description:
        Test Cases in this module related to Glusterd volume reset validation
        with bitd, scrub and snapd daemons running or not
"""
from glusto.core import Glusto as g
from glustolibs.gluster.exceptions import ExecutionError
from glustolibs.gluster.gluster_base_class import GlusterBaseClass, runs_on
from glustolibs.gluster.peer_ops import peer_probe_servers
from glustolibs.gluster.volume_libs import cleanup_volume
from glustolibs.gluster.bitrot_ops import (enable_bitrot, is_bitd_running,
                                           is_scrub_process_running)
from glustolibs.gluster.uss_ops import enable_uss, is_snapd_running


@runs_on([['distributed', 'replicated', 'distributed-replicated',
           'dispersed', 'distributed-dispersed'], ['glusterfs']])
class GlusterdVolumeReset(GlusterBaseClass):
    @classmethod
    def setUpClass(cls):
        GlusterBaseClass.setUpClass.im_func(cls)
        g.log.info("Starting %s " % cls.__name__)
        '''
        checking for peer status from every node, if peers are  in not
        connected state, performing peer probe.
        '''
        ret = cls.validate_peers_are_connected()
        if not ret:
            ret = peer_probe_servers(cls.mnode, cls.servers)
            if ret:
                g.log.info("peers are connected successfully from %s to other \
                servers in severlist %s:" % (cls.mnode, cls.servers))
            else:
                g.log.error("Peer probe failed from %s to other \
                servers in severlist %s:" % (cls.mnode, cls.servers))
                raise ExecutionError("Peer probe failed ")
        else:
            g.log.info("All server peers are already in connected state\
            %s:" % cls.servers)

        # Creating Volume
        g.log.info("Started creating volume")
        ret = cls.setup_volume()
        if ret:
            g.log.info("Volme created successfully : %s" % cls.volname)
        else:
            raise ExecutionError("Volume creation failed: %s" % cls.volname)

    def setUp(self):
        """
        setUp method for every test
        """
        # calling GlusterBaseClass setUp
        GlusterBaseClass.setUp.im_func(self)

        # command for volume reset
        g.log.info("started resetting volume")
        cmd = "gluster volume reset " + self.volname
        ret, out, _ = g.run(self.mnode, cmd)
        if (ret == 0):
            g.log.info("volume restted successfully :%s" % self.volname)
        else:
            raise ExecutionError("Volume reset Failed :%s" % self.volname)

    def tearDown(self):
        """
        tearDown for every test
        """
        # Calling GlusterBaseClass tearDown
        GlusterBaseClass.tearDown.im_func(self)

    @classmethod
    def tearDownClass(cls):
        # stopping the volume and Cleaning up the volume
        ret = cleanup_volume(cls.mnode, cls.volname)
        if ret:
            g.log.info("Volume deleted successfully : %s" % cls.volname)
        else:
            raise ExecutionError("Failed Cleanup the Volume %s" % cls.volname)

    def test_bitd_scrubd_snapd_after_volume_reset(self):
        '''
        -> Create volume
        -> Enable BitD, Scrub and Uss on volume
        -> Verify  the BitD, Scrub and Uss  daemons are running on every node
        -> Reset the volume
        -> Verify the Daemons (BitD, Scrub & Uss ) are running or not
        -> Eanble Uss on same volume
        -> Reset the volume with force
        -> Verify all the daemons(BitD, Scrub & Uss) are running or not
        :return:
        '''

        # enable bitrot and scrub on volume
        g.log.info("Enabling bitrot")
        ret, out, _ = enable_bitrot(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable bitrot on\
        volume: %s" % self.volname)
        g.log.info("Bitd and scrub daemons enabled\
        successfully on volume :%s" % self.volname)

        # enable uss on volume
        g.log.info("Enabling snaphot(uss)")
        ret, out, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable uss on\
        volume: %s" % self.volname)
        g.log.info("uss enabled successfully on  volume :%s" % self.volname)

        # Checks bitd, snapd, scrub daemons running or not
        g.log.info("checking snapshot, scrub and bitrot\
        daemons running or not")
        for mnode in self.servers:
            ret = is_bitd_running(mnode, self.volname)
            self.assertTrue(ret, "Bitrot Daemon\
            not running on %s server:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertTrue(ret, "Scrub Daemon\
            not running on %s server:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertTrue(ret, "Snap Daemon\
            not running %s server:" % mnode)
        g.log.info("bitd, scrub and snapd running\
        successflly on volume :%s" % self.volname)

        # command for volume reset
        g.log.info("started resetting volume")
        cmd = "gluster volume reset " + self.volname
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, "volume reset failed\
        for : %s" % self.volname)
        g.log.info("volume resetted succefully :%s" % self.volname)

        '''
        After volume reset snap daemon will not be running,
        bitd and scrub deamons will be in running state.
        '''
        g.log.info("checking snapshot, scrub and bitrot daemons\
        running or not after volume reset")
        for mnode in self.servers:
            ret = is_bitd_running(mnode, self.volname)
            self.assertTrue(ret, "Bitrot Daemon\
            not running on %s server:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertTrue(ret, "Scrub Daemon\
            not running on %s server:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertFalse(ret, "Snap Daemon should not be\
            running on %s server after volume reset:" % mnode)
        g.log.info("bitd and scrub daemons are running after volume reset\
        snapd is not running as expected on volume :%s" % self.volname)

        # enable uss on volume
        g.log.info("Enabling snaphot(uss)")
        ret, out, _ = enable_uss(self.mnode, self.volname)
        self.assertEqual(ret, 0, "Failed to enable\
        uss on volume: %s" % self.volname)
        g.log.info("uss enabled successfully on volume :%s" % self.volname)

        # command for volume reset with force
        g.log.info("started resetting volume with force option")
        cmd = "gluster volume reset " + self.volname + " force"
        ret, out, _ = g.run(self.mnode, cmd)
        self.assertEqual(ret, 0, "volume reset fail\
               for : %s" % self.volname)
        g.log.info("Volume resetted sucessfully with\
        force option :%s" % self.volname)

        '''
         After volume reset bitd, snapd, scrub daemons will not be running,
         all three daemons will get die
         '''
        g.log.info("checking snapshot, scrub and bitrot daemons\
        running or not after volume reset with force")
        for mnode in self.servers:
            ret = is_bitd_running(mnode, self.volname)
            self.assertFalse(ret, "Bitrot Daemon should not be\
            running on %s server after volume reset with force:" % mnode)
            ret = is_scrub_process_running(mnode, self.volname)
            self.assertFalse(ret, "Scrub Daemon shiuld not be running\
            on %s server after volume reset with force:" % mnode)
            ret = is_snapd_running(mnode, self.volname)
            self.assertFalse(ret, "Snap Daemon should not be\
            running on %s server after volume reset force:" % mnode)
        g.log.info("After volume reset bitd, scrub and snapd are not running after\
        volume reset with force on volume :%s" % self.volname)