summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKrishnaram Karthick Ramdoss <kramdoss@redhat.com>2018-01-31 05:49:49 +0000
committerGerrit Code Review <gerrit2@gerrit.host.prod.eng.bos.redhat.com>2018-01-31 05:49:49 +0000
commitc75ef4632f8323fafeb7f0c5657f0f51695c5db7 (patch)
tree77f9d868a2b5771e8b06bbf148e11a70570b0221
parent9d82673846f107b79c5576cdc0d2320fc79de55c (diff)
parent287b2c341e9dd035cefcb0ee04bdb638e36ab99f (diff)
Merge "CNS: adding libraries for dynamic provisioning"
-rw-r--r--cns-libs/cnslibs/common/dynamic_provisioning.py318
-rw-r--r--cns-libs/cnslibs/common/mongodb-template.json255
-rw-r--r--cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json20
-rw-r--r--cns-libs/cnslibs/common/sample-glusterfs-secret.yaml10
-rw-r--r--cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml7
-rw-r--r--cns-libs/cnslibs/common/sample-nginx-pod.yaml18
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/functional/__init__.py0
-rw-r--r--tests/functional/common/test_dynamic_provisioning.py86
9 files changed, 714 insertions, 0 deletions
diff --git a/cns-libs/cnslibs/common/dynamic_provisioning.py b/cns-libs/cnslibs/common/dynamic_provisioning.py
new file mode 100644
index 00000000..9d6a062f
--- /dev/null
+++ b/cns-libs/cnslibs/common/dynamic_provisioning.py
@@ -0,0 +1,318 @@
+from collections import OrderedDict
+from cnslibs.common.waiter import Waiter
+from glusto.core import Glusto as g
+from glustolibs.misc.misc_libs import upload_scripts
+import json
+import rtyaml
+import time
+
+
+def create_pvc_file(hostname, claim_name, storage_class, size):
+ '''
+ This function creates pvc file
+ Args:
+ hostname (str): hostname on which we need to
+ create pvc file
+ claim_name (str): name of the claim
+ ex: storage-claim1
+ storage_class(str): name of the storage class
+ size (int): size of the claim in GB
+ ex: 10 (for 10GB claim)
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ with open("cnslibs/common/sample-glusterfs-pvc-claim.json") as data_file:
+ data = json.load(data_file, object_pairs_hook=OrderedDict)
+ data['metadata']['annotations'][
+ 'volume.beta.kubernetes.io/storage-class'] = storage_class
+ data['metadata']['name'] = claim_name
+ data['spec']['resources']['requests']['storage'] = "%dGi" % size
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+
+ with conn.builtin.open('/%s.json' % claim_name, 'w') as data_file:
+ json.dump(data, data_file, sort_keys=False,
+ indent=4, ensure_ascii=False)
+ except Exception as err:
+ g.log.error("failed to create pvc file %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of pvc file %s successful" % claim_name)
+ return True
+
+
+def create_app_pod_file(hostname, claim_name, app_name, sample_app_name):
+ '''
+ This function creates app_pod_name file
+ Args:
+ hostname (str): hostname on which we need to
+ create app pod file
+ claim_name (str): name of the claim
+ ex: storage-claim1
+ app_name (str): name of the app-pod to create
+ ex: nginx1
+ sample_app_name (str): sample-app-pod-name
+ ex: nginx
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ data = rtyaml.load(open("cnslibs/common/sample-%s-pod."
+ "yaml" % sample_app_name))
+ data['spec']['volumes'][0]['persistentVolumeClaim'][
+ 'claimName'] = claim_name
+ data['metadata']['name'] = app_name
+ data['spec']['containers'][0]['name'] = app_name
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ rtyaml.dump(data, conn.builtin.open('/%s.yaml' % app_name, "w"))
+ except Exception as err:
+ g.log.error("failed to create app file %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of %s app file successful" % app_name)
+ return True
+
+
+def create_secret_file(hostname, secret_name, namespace,
+ data_key, secret_type):
+ '''
+ This function creates secret yaml file
+ Args:
+ hostname (str): hostname on which we need to create
+ secret yaml file
+ sc_name (str): secret name ex: heketi-secret
+ namespace (str): namespace ex: storage-project
+ data_key (str): data-key ex: cGFzc3dvcmQ=
+ secret_type (str): type ex: kubernetes.io/glusterfs
+ or gluster.org/glusterblock
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ data = rtyaml.load(open("cnslibs/common/sample-glusterfs-secret.yaml"))
+
+ data['metadata']['name'] = secret_name
+ data['data']['key'] = data_key
+ data['metadata']['namespace'] = namespace
+ data['type'] = secret_type
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ rtyaml.dump(data, conn.builtin.open('/%s.yaml' % secret_name, "w"))
+ except Exception as err:
+ g.log.error("failed to create %s.yaml file %s" % (secret_name, err))
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of %s.yaml file successful" % secret_name)
+ return True
+
+
+def create_storage_class_file(hostname, sc_name, resturl,
+ provisioner, **kwargs):
+ '''
+ This function creates storageclass yaml file
+ Args:
+ hostname (str): hostname on which we need to create
+ stoargeclass yaml file
+ sc_name (str): stoargeclass name ex: fast
+ resturl (str): resturl
+ ex: http://heketi-storage-project.cloudapps.mystorage.com
+ provisioner (str): provisioner
+ ex: kubernetes.io/glusterfs
+ or gluster.org/glusterblock
+ auth (bool): Authorization
+ ex: True/False
+ Kwargs:
+ **kwargs
+ The keys, values in kwargs are:
+ restuser:str ex: username: test-admin
+ hacount:int ex: hacount:3
+ clusterids:str
+ ex: clusterids: "630372ccdc720a92c681fb928f27b53f"
+ chapauthenabled:bool ex: chapauthenabled:True/False
+ restauthenabled:bool ex: restauthenabled:True/False
+ secretnamespace:str ex: secretnamespace:"storage-project"
+ secretname:str ex: secretname:"heketi-secret"
+ restsecretnamespace:str
+ ex: restsecretnamespace:"storage-project"
+ restsecretname:str ex: restsecretname:"heketi-secret"
+ Returns:
+ bool: True if successful,
+ otherwise False
+ '''
+ data = rtyaml.load(open("cnslibs/common/sample-glusterfs"
+ "-storageclass.yaml"))
+
+ data['metadata']['name'] = sc_name
+ data['parameters']['resturl'] = resturl
+ data['provisioner'] = provisioner
+
+ for key in ('secretnamespace', 'restuser', 'secretname',
+ 'restauthenabled', 'restsecretnamespace',
+ 'restsecretname', 'hacount', 'clusterids',
+ 'chapauthenabled'):
+ if kwargs.get(key):
+ data['parameters'][key] = kwargs.get(key)
+
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ provisioner_name = provisioner.split("/")
+ file_path = ("/%s-%s-storage-class"
+ ".yaml" % (
+ sc_name, provisioner_name[1]))
+ rtyaml.dump(data, conn.builtin.open(file_path, "w"))
+ except Exception as err:
+ g.log.error("failed to create storage-class file %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of %s-storage-class file successful" % sc_name)
+ return True
+
+
+def verify_pod_status_running(hostname, pod_name,
+ timeout=1200, wait_step=60):
+ '''
+ MAkes sure pod is running
+ Args:
+ hostname (str): hostname on which we want to check the pod status
+ pod_name (str): pod_name for which we need the status
+ timeout (int): timeout value, if pod status is ContainerCreating,
+ checks the status after wait_step value till timeout
+ default value is 1200 sec
+ wait_step( int): wait step,
+ default value is 60 sec
+ Returns:
+ bool: True if pod status is Running,
+ otherwise False
+
+ '''
+ status_flag = False
+ for w in Waiter(timeout, wait_step):
+ cmd = ("oc get pods | grep '%s'| grep -v deploy | "
+ "awk '{print $3}'") % pod_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ break
+ output = out.strip().split("\n")[0].strip()
+ if output == "":
+ g.log.info("pod not found sleeping for %s "
+ "sec" % wait_step)
+ continue
+ elif output == "ContainerCreating":
+ g.log.info("pod creating sleeping for %s "
+ "sec" % wait_step)
+ continue
+ elif output == "Running":
+ status_flag = True
+ g.log.info("pod %s is up and running" % pod_name)
+ break
+ elif output == "Error":
+ g.log.error("pod %s status error" % pod_name)
+ break
+ elif output == "Terminating":
+ g.log.info("pod is terminating state sleeping "
+ "for %s sec" % wait_step)
+ continue
+ else:
+ g.log.error("pod %s has different status - "
+ "%s" % (pod_name, output))
+ break
+ if w.expired:
+ g.log.error("exceeded timeout %s for verifying running "
+ "status of pod %s" % (timeout, pod_name))
+ return False
+ return status_flag
+
+
+def create_mongodb_pod(hostname, pvc_name, pvc_size, sc_name):
+ '''
+ This function creates mongodb pod
+ Args:
+ hostname (str): hostname on which we want to create
+ mongodb pod
+ pvc_name (str): name of the pvc
+ ex: pvc-claim1
+ sc_name (str): name of the storage class
+ ex: fast
+ Returns: True if successfull,
+ False otherwise
+ '''
+ ret = upload_scripts(hostname,
+ "cnslibs/common/mongodb-template.json",
+ "/tmp/app-templates", "root")
+ if not ret:
+ g.log.error("Failed to upload mongodp template to %s" % hostname)
+ return False
+ try:
+ conn = g.rpyc_get_connection(hostname, user="root")
+ if conn is None:
+ g.log.error("Failed to get rpyc connection of node %s"
+ % hostname)
+ return False
+ with conn.builtin.open(
+ '/tmp/app-templates/mongodb-template.json', 'r') as data_file:
+ data = json.load(data_file, object_pairs_hook=OrderedDict)
+ data['objects'][1]['metadata']['annotations'][
+ 'volume.beta.kubernetes.io/storage-class'] = sc_name
+ with conn.builtin.open('/%s.json' % pvc_name, 'w') as data_file:
+ json.dump(data, data_file, sort_keys=False,
+ indent=4, ensure_ascii=False)
+ cmd = ("oc new-app /%s.json --param=DATABASE_SERVICE_NAME=%s "
+ "--param=VOLUME_CAPACITY=%sGi") % (
+ pvc_name, pvc_name, pvc_size)
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s on %s" % (
+ cmd, hostname))
+ return False
+
+ except Exception as err:
+ g.log.error("failed to create mongodb pod %s" % err)
+ return False
+ finally:
+ g.rpyc_close_connection(hostname, user="root")
+ g.log.info("creation of mongodb pod successfull")
+ return True
+
+
+def get_pvc_status(hostname, pvc_name):
+ '''
+ This function verifies the if pod is running
+ Args:
+ hostname (str): hostname on which we want
+ to check the pvc status
+ pvc_name (str): pod_name for which we
+ need the status
+ Returns:
+ bool, status (str): True, status of pvc
+ otherwise False, error message.
+ '''
+ cmd = "oc get pvc | grep %s | awk '{print $2}'" % pvc_name
+ ret, out, err = g.run(hostname, cmd, "root")
+ if ret != 0:
+ g.log.error("failed to execute cmd %s" % cmd)
+ return False, err
+ output = out.strip().split("\n")[0].strip()
+ return True, output
diff --git a/cns-libs/cnslibs/common/mongodb-template.json b/cns-libs/cnslibs/common/mongodb-template.json
new file mode 100644
index 00000000..60938bb8
--- /dev/null
+++ b/cns-libs/cnslibs/common/mongodb-template.json
@@ -0,0 +1,255 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Persistent)",
+ "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "volume.beta.kubernetes.io/storage-class": "gluster-block"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:${MONGODB_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${MONGODB_USER}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${MONGODB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${MONGODB_ADMIN_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false,
+ "runAsUser": 0,
+ "supplementalGroups": 0
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "64Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "displayName": "MongoDB Connection Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "displayName": "MongoDB Connection Password",
+ "description": "Password for the MongoDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "displayName": "MongoDB Database Name",
+ "description": "Name of the MongoDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "displayName": "MongoDB Admin Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "4Gi",
+ "required": true
+ },
+ {
+ "name": "MONGODB_VERSION",
+ "displayName": "Version of MongoDB Image",
+ "description": "Version of MongoDB image to be used (2.4, 2.6, 3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json b/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json
new file mode 100644
index 00000000..3bc22506
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-glusterfs-pvc-claim.json
@@ -0,0 +1,20 @@
+{
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "claim1",
+ "annotations": {
+ "volume.beta.kubernetes.io/storage-class": "gold"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "100Gi"
+ }
+ }
+ }
+}
diff --git a/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml b/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml
new file mode 100644
index 00000000..c9001764
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-glusterfs-secret.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: heketi-secret
+ namespace: default
+data:
+ #base64 encoded password. E.g.: echo -n "mypassword" | base64
+ key: cGFzc3dvcmQ=
+type: kubernetes.io/glusterfs
+
diff --git a/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml b/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml
new file mode 100644
index 00000000..a1515fe8
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-glusterfs-storageclass.yaml
@@ -0,0 +1,7 @@
+apiVersion: storage.k8s.io/v1beta1
+kind: StorageClass
+metadata:
+ name: slow
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://heketi-storage-project.cloudapps.mystorage.com"
diff --git a/cns-libs/cnslibs/common/sample-nginx-pod.yaml b/cns-libs/cnslibs/common/sample-nginx-pod.yaml
new file mode 100644
index 00000000..b820a42a
--- /dev/null
+++ b/cns-libs/cnslibs/common/sample-nginx-pod.yaml
@@ -0,0 +1,18 @@
+apiVersion: v1
+id: gluster-nginx-pvc
+kind: Pod
+metadata:
+ name: gluster-nginx-pod
+spec:
+ containers:
+ - name: gluster-nginx-pod
+ image: fedora/nginx
+ volumeMounts:
+ - mountPath: /var/www/html
+ name: gluster-volume-claim
+ securityContext:
+ privileged: true
+ volumes:
+ - name: gluster-volume-claim
+ persistentVolumeClaim:
+ claimName: claim
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/tests/functional/__init__.py
diff --git a/tests/functional/common/test_dynamic_provisioning.py b/tests/functional/common/test_dynamic_provisioning.py
new file mode 100644
index 00000000..8428f2e6
--- /dev/null
+++ b/tests/functional/common/test_dynamic_provisioning.py
@@ -0,0 +1,86 @@
+from cnslibs.cns.cns_baseclass import CnsSetupBaseClass
+from cnslibs.common.dynamic_provisioning import (
+ create_secret_file,
+ create_storage_class_file,
+ create_pvc_file,
+ create_app_pod_file)
+from cnslibs.common.openshift_ops import oc_create
+from glusto.core import Glusto as g
+
+
+class TestDynamicProvisioning(CnsSetupBaseClass):
+ '''
+ Class for basic dynamic provisioning
+ '''
+ @classmethod
+ def setUpClass(cls):
+ super(TestDynamicProvisioning, cls).setUpClass()
+ super(TestDynamicProvisioning, cls).cns_deploy()
+
+ def test_dynamic_provisioning(self):
+ g.log.info("testcase to test basic dynamic provisioning")
+ storage_class = self.cns_storage_class['storage_class1']
+ sc_name = storage_class['name']
+ ret = create_storage_class_file(
+ self.ocp_master_node[0],
+ sc_name,
+ storage_class['resturl'],
+ storage_class['provisioner'],
+ restuser=storage_class['restuser'],
+ secretnamespace=storage_class['secretnamespace'],
+ secretname=storage_class['secretname'])
+ self.assertTrue(ret, "creation of storage-class file failed")
+ provisioner_name = storage_class['provisioner'].split("/")
+ file_path = ("/%s-%s-storage-class.yaml" % (
+ sc_name, provisioner_name[1]))
+ oc_create(self.ocp_master_node[0], file_path)
+ secret = self.cns_secret['secret1']
+ ret = create_secret_file(self.ocp_master_node[0],
+ secret['secret_name'],
+ secret['namespace'],
+ secret['data_key'],
+ secret['type'])
+ self.assertTrue(ret, "creation of heketi-secret file failed")
+ oc_create(self.ocp_master_node[0],
+ "/%s.yaml" % secret['secret_name'])
+ count = self.start_count_for_pvc
+ for size, pvc in self.cns_pvc_size_number_dict.items():
+ for i in range(1, pvc + 1):
+ pvc_name = "pvc-claim%d" % count
+ g.log.info("starting creation of claim file "
+ "for %s", pvc_name)
+ ret = create_pvc_file(self.ocp_master_node[0],
+ pvc_name, sc_name, size)
+ self.assertTrue(ret, "create pvc file - %s failed" % pvc_name)
+ file_path = "/pvc-claim%d.json" % count
+ g.log.info("starting to create claim %s", pvc_name)
+ oc_create(self.ocp_master_node[0], file_path)
+ count = count + 1
+ cmd = 'oc get pvc | grep pvc-claim | awk \'{print $1}\''
+ ret, out, err = g.run(self.ocp_master_node[0], cmd, "root")
+ self.assertEqual(ret, 0, "failed to execute cmd %s on %s err %s" % (
+ cmd, self.ocp_master_node[0], out))
+ complete_pvc_list = out.strip().split("\n")
+ complete_pvc_list = map(str.strip, complete_pvc_list)
+ count = self.start_count_for_pvc
+ exisisting_pvc_list = []
+ for i in range(1, count):
+ exisisting_pvc_list.append("pvc-claim%d" % i)
+ pvc_list = list(set(complete_pvc_list) - set(exisisting_pvc_list))
+ index = 0
+ for key, value in self.app_pvc_count_dict.items():
+ for i in range(1, value + 1):
+ claim_name = pvc_list[index]
+ app_name = key + str(count)
+ sample_app_name = key
+ g.log.info("starting to create app_pod_file for %s", app_name)
+ ret = create_app_pod_file(
+ self.ocp_master_node[0], claim_name,
+ app_name, sample_app_name)
+ self.assertTrue(
+ ret, "creating app-pod file - %s failed" % app_name)
+ file_path = "/%s.yaml" % app_name
+ g.log.info("starting to create app_pod_%s", app_name)
+ oc_create(self.ocp_master_node[0], file_path)
+ index = index + 1
+ count = count + 1