summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorApeksha D Khakharia <akhakhar@redhat.com>2018-07-27 21:05:39 +0530
committerApeksha D Khakharia <akhakhar@redhat.com>2018-08-06 18:55:30 +0530
commit3f2f40ec0e4c2d2ff548fcc3b2db5dc9b700007d (patch)
tree937bb3be98aaa355ce3f2879379f2d1492fc7de5
parentce5bb6aaab0051d32cbc2b697a288824d0ebde52 (diff)
CNS: new library - heketi-scale-pod, pod-state-ready
Change-Id: I97eb3ae1d0af48d405a0187fe585e2732658f809 Signed-off-by: Apeksha D Khakharia <akhakhar@redhat.com>
-rw-r--r--cns-libs/cnslibs/common/dynamic_provisioning.py115
-rw-r--r--cns-libs/cnslibs/common/openshift_ops.py41
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py76
-rw-r--r--tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py69
4 files changed, 148 insertions, 153 deletions
diff --git a/cns-libs/cnslibs/common/dynamic_provisioning.py b/cns-libs/cnslibs/common/dynamic_provisioning.py
index 5743bf6c..288d6245 100644
--- a/cns-libs/cnslibs/common/dynamic_provisioning.py
+++ b/cns-libs/cnslibs/common/dynamic_provisioning.py
@@ -200,65 +200,96 @@ def create_storage_class_file(hostname, sc_name, resturl,
return True
-def verify_pod_status_running(hostname, pod_name,
- timeout=1200, wait_step=60):
+def wait_for_pod_be_ready(hostname, pod_name,
+ timeout=1200, wait_step=60):
'''
- MAkes sure pod is running
+ This funciton waits for pod to be in ready state
Args:
hostname (str): hostname on which we want to check the pod status
pod_name (str): pod_name for which we need the status
- timeout (int): timeout value, if pod status is ContainerCreating,
- checks the status after wait_step value till timeout
+ timeout (int): timeout value,,
default value is 1200 sec
wait_step( int): wait step,
default value is 60 sec
Returns:
- bool: True if pod status is Running,
- otherwise False
+ bool: True if pod status is Running and ready state,
+ otherwise Raise Exception
+ '''
+ for w in Waiter(timeout, wait_step):
+ # command to find pod status and its phase
+ cmd = ("oc get pods %s -o=custom-columns="
+ ":.status.containerStatuses[0].ready,"
+ ":.status.phase") % pod_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ msg = ("failed to execute cmd %s" % cmd)
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ output = out.strip().split()
+ # command to find if pod is ready
+ if output[0] == "true" and output[1] == "Running":
+ g.log.info("pod %s is in ready state and is "
+ "Running" % pod_name)
+ return True
+ elif output[1] == "Error":
+ msg = ("pod %s status error" % pod_name)
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ else:
+ g.log.info("pod %s ready state is %s,"
+ " phase is %s,"
+ " sleeping for %s sec" % (
+ pod_name, output[0],
+ output[1], wait_step))
+ continue
+ if w.expired:
+ err_msg = ("exceeded timeout %s for waiting for pod %s "
+ "to be in ready state" % (timeout, pod_name))
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
+
+
+def get_pod_name_from_dc(hostname, dc_name,
+ timeout=1200, wait_step=60):
'''
- status_flag = False
+ This funciton return pod_name from dc_name
+ Args:
+ hostname (str): hostname on which we can execute oc
+ commands
+ dc_name (str): deployment_confidg name
+ timeout (int): timeout value
+ default value is 1200 sec
+ wait_step( int): wait step,
+ default value is 60 sec
+ Returns:
+ str: pod_name if successful
+ otherwise Raise Exception
+ '''
+ cmd = ("oc get pods --all-namespaces -o=custom-columns="
+ ":.metadata.name "
+ "--no-headers=true "
+ "--selector deploymentconfig=%s" % dc_name)
for w in Waiter(timeout, wait_step):
- cmd = ("oc get pods | grep '%s'| grep -v deploy | "
- "awk '{print $3}'") % pod_name
ret, out, err = g.run(hostname, cmd, "root")
if ret != 0:
- g.log.error("failed to execute cmd %s" % cmd)
- break
- output = out.strip().split("\n")[0].strip()
+ msg = ("failed to execute cmd %s" % cmd)
+ g.log.error(msg)
+ raise exceptions.ExecutionError(msg)
+ output = out.strip()
if output == "":
- g.log.info("pod %s not found sleeping for %s "
- "sec" % (pod_name, wait_step))
- continue
- elif output == "ContainerCreating":
- g.log.info("pod %s creating sleeping for %s "
- "sec" % (pod_name, wait_step))
- continue
- elif output == "Running":
- status_flag = True
- g.log.info("pod %s is up and running" % pod_name)
- break
- elif output == "Error":
- g.log.error("pod %s status error" % pod_name)
- break
- elif output == "Terminating":
- g.log.info("pod %s is terminating state sleeping "
- "for %s sec" % (pod_name, wait_step))
- continue
- elif output == "Pending":
- g.log.info("pod %s is pending state sleeping "
- "for %s sec" % (pod_name, wait_step))
+ g.log.info("podname for dc %s not found sleeping for "
+ "%s sec" % (dc_name, wait_step))
continue
else:
- g.log.error("pod %s has different status - %s "
- "sleeping for %s sec" % (
- pod_name, output, wait_step))
- continue
+ g.log.info("podname is %s for dc %s" % (
+ output, dc_name))
+ return output
if w.expired:
- g.log.error("exceeded timeout %s for verifying running "
- "status of pod %s" % (timeout, pod_name))
- return False
- return status_flag
+ err_msg = ("exceeded timeout %s for waiting for pod_name"
+ "for dc %s " % (timeout, dc_name))
+ g.log.error(err_msg)
+ raise exceptions.ExecutionError(err_msg)
def create_mongodb_pod(hostname, pvc_name, pvc_size, sc_name):
diff --git a/cns-libs/cnslibs/common/openshift_ops.py b/cns-libs/cnslibs/common/openshift_ops.py
index 2a61f6ac..7f2d2de0 100644
--- a/cns-libs/cnslibs/common/openshift_ops.py
+++ b/cns-libs/cnslibs/common/openshift_ops.py
@@ -14,6 +14,8 @@ import yaml
from cnslibs.common import exceptions
from cnslibs.common import utils
from cnslibs.common import waiter
+from cnslibs.common.dynamic_provisioning import (
+ wait_for_pod_be_ready)
PODS_WIDE_RE = re.compile(
@@ -414,3 +416,42 @@ def wait_for_resource_absence(ocp_node, rtype, name,
rtype, name, timeout)
g.log.error(error_msg)
raise exceptions.ExecutionError(error_msg)
+
+
+def scale_heketi_pod_amount_and_wait(hostname, dc_name,
+ namespace, pod_amount=1):
+ '''
+ This function scales heketi_pod and waits
+ If pod_amount 0 waits for its absence
+ If pod_amount =>1 waits for all pods to be ready
+ Args:
+ hostname (str): Node on which the ocp command will run
+ dc_name (str): Name of heketi dc
+ namespace (str): Namespace
+ pod_amount (int): Number of heketi pods to scale
+ ex: 0, 1 or 2
+ '''
+ heketi_scale_cmd = "oc scale --replicas=%d dc/%s --namespace %s" % (
+ dc_name, pod_amount, namespace)
+ ret, out, err = g.run(hostname, heketi_scale_cmd, "root")
+ if ret != 0:
+ error_msg = ("failed to execute cmd %s "
+ "out- %s err %s" % (heketi_scale_cmd, out, err))
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+ get_heketi_podname_cmd = (
+ "oc get pods --all-namespaces -o=custom-columns=:.metadata.name "
+ "--no-headers=true "
+ "--selector deploymentconfig=%s" % dc_name)
+ ret, out, err = g.run(hostname, get_heketi_podname_cmd)
+ if ret != 0:
+ error_msg = ("failed to execute cmd %s "
+ "out- %s err %s" % (get_heketi_podname_cmd, out, err))
+ g.log.error(error_msg)
+ raise exceptions.ExecutionError(error_msg)
+ pod_list = out.strip().split("\n")
+ for pod in pod_list:
+ if pod_amount == 0:
+ wait_for_resource_absence(hostname, 'pod', pod)
+ else:
+ wait_for_pod_be_ready(hostname, pod)
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
index 70039ba1..d3e42c5c 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_block_p0_cases.py
@@ -5,7 +5,8 @@ from cnslibs.common.dynamic_provisioning import (
create_secret_file,
create_storage_class_file,
get_pvc_status,
- verify_pod_status_running,
+ get_pod_name_from_dc,
+ wait_for_pod_be_ready,
verify_pvc_status_is_bound)
from cnslibs.cns.cns_baseclass import CnsGlusterBlockBaseClass
from cnslibs.common.exceptions import ExecutionError
@@ -68,43 +69,15 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
oc_delete, self.ocp_master_node[0], 'service', pvc_name)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', pvc_name)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', pvc_name)
- ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
-
cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % mongodb_filepath
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
- oc_delete(self.ocp_master_node[0], 'pod', pod_name)
- ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name)
- self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
- cmd = "dd if=/dev/urandom of=%s bs=1K count=100" % mongodb_filepath
- ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- oc_delete(self.ocp_master_node[0], 'pod', pod_name)
- ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name)
- self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
cmd = "ls -lrt %s" % mongodb_filepath
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
@@ -129,15 +102,10 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
oc_delete, self.ocp_master_node[0], 'service', pvc_name)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', pvc_name)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', pvc_name)
- ret = verify_pod_status_running(
- self.ocp_master_node[0], pvc_name, wait_step=5, timeout=300)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
+ wait_step=5, timeout=300)
self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
"bs=1K count=100")
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
@@ -186,8 +154,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
# Wait for Heketi POD be up and running
ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
- ret = verify_pod_status_running(
- self.ocp_master_node[0], out.strip(), wait_step=5, timeout=120)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], out.strip(),
+ wait_step=5, timeout=120)
self.assertTrue(ret, "verify heketi pod status as running failed")
# Verify App pod #2
@@ -209,16 +177,11 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
self.assertEqual(status, "Bound", "pvc status of %s "
"is not in Bound state, its state is %s" % (
pvc_name3, status))
- ret = verify_pod_status_running(
- self.ocp_master_node[0], pvc_name3, wait_step=5, timeout=300)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name3)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
+ wait_step=5, timeout=300)
self.assertTrue(ret, "verify %s pod status as "
"running failed" % pvc_name3)
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "|awk {'print $1'}") % pvc_name3
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
"bs=1K count=100")
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
@@ -285,19 +248,12 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
pvc_name4)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'pvc', pvc_name4)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc', pvc_name4)
- ret = verify_pod_status_running(self.ocp_master_node[0], pvc_name4)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name4)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name4
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file "
"bs=1000K count=1000") % pod_name
proc = g.run_async(self.ocp_master_node[0], io_cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0])
g.log.info("gluster_pod_list - %s" % gluster_pod_list)
gluster_pod_name = gluster_pod_list[0]
@@ -326,8 +282,8 @@ class TestDynamicProvisioningBlockP0(CnsGlusterBlockBaseClass):
cmd, self.ocp_master_node[0]))
new_gluster_pod_name = out.strip().split("\n")[0].strip()
g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- new_gluster_pod_name)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0],
+ new_gluster_pod_name)
self.assertTrue(ret, "verify %s pod status as running "
"failed" % new_gluster_pod_name)
ret, out, err = proc.async_communicate()
diff --git a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
index 9bd85db8..0038c18c 100644
--- a/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
+++ b/tests/functional/common/provisioning/test_dynamic_provisioning_p0_cases.py
@@ -5,7 +5,8 @@ from cnslibs.common.dynamic_provisioning import (
create_secret_file,
create_storage_class_file,
get_pvc_status,
- verify_pod_status_running)
+ get_pod_name_from_dc,
+ wait_for_pod_be_ready)
from cnslibs.cns.cns_baseclass import CnsBaseClass
from cnslibs.common.exceptions import ExecutionError
from cnslibs.common.heketi_ops import (
@@ -72,8 +73,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
pvc_name)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
pvc_name)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name)
self.assertTrue(ret, "verify mongodb pod status as running failed")
if volname_prefix:
ret = verify_volume_name_prefix(self.ocp_master_node[0],
@@ -81,27 +82,11 @@ class TestDynamicProvisioningP0(CnsBaseClass):
storage_class['secretnamespace'],
pvc_name, resturl)
self.assertTrue(ret, "verify volnameprefix failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
"bs=1K count=100")
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
cmd, self.ocp_master_node[0]))
- oc_delete(self.ocp_master_node[0], 'pod', pod_name)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name)
- self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "| awk {'print $1'}") % pvc_name
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
cmd = "ls -lrt /var/lib/mongodb/data/file"
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
@@ -170,15 +155,10 @@ class TestDynamicProvisioningP0(CnsBaseClass):
pvc_name2)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
pvc_name2)
- ret = verify_pod_status_running(
- self.ocp_master_node[0], pvc_name2, wait_step=5, timeout=300)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name2)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
+ wait_step=5, timeout=300)
self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "|awk {'print $1'}") % pvc_name2
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
"bs=1K count=100")
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
@@ -227,8 +207,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
# Wait for Heketi POD be up and running
ret, out, err = g.run(self.ocp_master_node[0], get_heketi_podname_cmd)
- ret = verify_pod_status_running(
- self.ocp_master_node[0], out.strip(), wait_step=5, timeout=120)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], out.strip(),
+ wait_step=5, timeout=120)
self.assertTrue(ret, "verify heketi pod status as running failed")
# Verify App pod #2
@@ -256,7 +236,7 @@ class TestDynamicProvisioningP0(CnsBaseClass):
oc_create(self.ocp_master_node[0], file_path)
for w in Waiter(600, 30):
ret, status = get_pvc_status(self.ocp_master_node[0],
- pvc_name3)
+ pvc_name3)
self.assertTrue(ret, "failed to get pvc status of %s" % (
pvc_name3))
if status != "Bound":
@@ -273,17 +253,11 @@ class TestDynamicProvisioningP0(CnsBaseClass):
self.assertEqual(status, "Bound", "pvc status of %s "
"is not in Bound state, its state is %s" % (
pvc_name3, status))
- ret = verify_pod_status_running(
- self.ocp_master_node[0], pvc_name3, wait_step=5, timeout=300)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name3)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
+ wait_step=5, timeout=300)
self.assertTrue(ret, "verify %s pod status "
"as running failed" % pvc_name3)
-
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "|awk {'print $1'}") % pvc_name3
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
cmd = ("dd if=/dev/urandom of=/var/lib/mongodb/data/file "
"bs=1K count=100")
ret, out, err = oc_rsh(self.ocp_master_node[0], pod_name, cmd)
@@ -337,20 +311,13 @@ class TestDynamicProvisioningP0(CnsBaseClass):
pvc_name4)
self.addCleanup(oc_delete, self.ocp_master_node[0], 'dc',
pvc_name4)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- pvc_name4)
+ pod_name = get_pod_name_from_dc(self.ocp_master_node[0], pvc_name4)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0], pod_name,
+ wait_step=5, timeout=300)
self.assertTrue(ret, "verify mongodb pod status as running failed")
- cmd = ("oc get pods | grep %s | grep -v deploy "
- "|awk {'print $1'}") % pvc_name4
- ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
- pod_name = out.strip().split("\n")[0]
io_cmd = ("oc rsh %s dd if=/dev/urandom of=/var/lib/mongodb/data/file "
"bs=1000K count=1000") % pod_name
proc = g.run_async(self.ocp_master_node[0], io_cmd, "root")
- self.assertEqual(ret, 0, "failed to execute command %s on %s" % (
- cmd, self.ocp_master_node[0]))
gluster_pod_list = get_ocp_gluster_pod_names(self.ocp_master_node[0])
g.log.info("gluster_pod_list - %s" % gluster_pod_list)
gluster_pod_name = gluster_pod_list[0]
@@ -379,8 +346,8 @@ class TestDynamicProvisioningP0(CnsBaseClass):
cmd, self.ocp_master_node[0]))
new_gluster_pod_name = out.strip().split("\n")[0].strip()
g.log.info("new gluster pod name is %s" % new_gluster_pod_name)
- ret = verify_pod_status_running(self.ocp_master_node[0],
- new_gluster_pod_name)
+ ret = wait_for_pod_be_ready(self.ocp_master_node[0],
+ new_gluster_pod_name)
self.assertTrue(ret, "verify %s pod status as running "
"failed" % new_gluster_pod_name)
ret, out, err = proc.async_communicate()