summaryrefslogtreecommitdiffstats
path: root/deployment/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'deployment/playbooks')
-rw-r--r--deployment/playbooks/add-node-prerequisite.yaml16
-rw-r--r--deployment/playbooks/add-node.yaml142
-rw-r--r--deployment/playbooks/clean.yaml66
-rw-r--r--deployment/playbooks/cleanup-cns.yaml38
-rw-r--r--deployment/playbooks/cleanup-crs.yaml38
-rw-r--r--deployment/playbooks/cns-node-setup.yaml131
-rw-r--r--deployment/playbooks/cns-setup.yaml164
-rw-r--r--deployment/playbooks/cns-storage.yaml15
-rw-r--r--deployment/playbooks/crs-node-setup.yaml123
-rw-r--r--deployment/playbooks/crs-setup.yaml209
-rw-r--r--deployment/playbooks/crs-storage.yaml12
-rw-r--r--deployment/playbooks/gather_logs.yaml883
-rw-r--r--deployment/playbooks/generate-tests-config.yaml140
-rw-r--r--deployment/playbooks/get_ocp_info.yaml233
l---------deployment/playbooks/library/rpm_q.py1
-rw-r--r--deployment/playbooks/library/vmware_folder.py268
-rw-r--r--deployment/playbooks/library/vmware_resource_pool.py361
-rw-r--r--deployment/playbooks/node-setup.yaml92
-rw-r--r--deployment/playbooks/noop.yaml7
-rw-r--r--deployment/playbooks/ocp-configure.yaml16
-rw-r--r--deployment/playbooks/ocp-end-to-end.yaml15
-rw-r--r--deployment/playbooks/ocp-install.yaml365
-rw-r--r--deployment/playbooks/prerequisite.yaml26
-rw-r--r--deployment/playbooks/prod-ose-cns.yaml11
-rw-r--r--deployment/playbooks/prod-ose-crs.yaml11
-rw-r--r--deployment/playbooks/prod.yaml19
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml13
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j211
-rw-r--r--deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml8
-rw-r--r--deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml142
-rw-r--r--deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml143
-rw-r--r--deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml157
-rw-r--r--deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml66
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml7
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml39
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j24
-rw-r--r--deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j27
-rw-r--r--deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml15
-rw-r--r--deployment/playbooks/roles/etcd-storage/tasks/main.yaml24
-rw-r--r--deployment/playbooks/roles/gluster-ports/defaults/main.yaml3
-rw-r--r--deployment/playbooks/roles/gluster-ports/tasks/main.yaml34
-rw-r--r--deployment/playbooks/roles/instance-groups/tasks/main.yaml152
-rw-r--r--deployment/playbooks/roles/master-prerequisites/tasks/main.yaml6
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml27
-rw-r--r--deployment/playbooks/roles/package-repos/tasks/main.yaml23
-rw-r--r--deployment/playbooks/roles/prerequisites/defaults/main.yaml6
l---------deployment/playbooks/roles/prerequisites/library/openshift_facts.py1
-rw-r--r--deployment/playbooks/roles/prerequisites/library/rpm_q.py73
-rw-r--r--deployment/playbooks/roles/prerequisites/tasks/main.yaml84
-rw-r--r--deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml14
-rw-r--r--deployment/playbooks/roles/rhsm/defaults/main.yaml5
-rw-r--r--deployment/playbooks/roles/rhsm/tasks/main.yaml49
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml83
-rw-r--r--deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml29
-rw-r--r--deployment/playbooks/roles/storage-class-configure/tasks/main.yaml22
-rw-r--r--deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j28
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml6
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml89
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j219
-rw-r--r--deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml3
-rw-r--r--deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml48
-rw-r--r--deployment/playbooks/scaleup.yaml35
-rw-r--r--deployment/playbooks/setup.yaml27
-rw-r--r--deployment/playbooks/vars/main.yaml76
66 files changed, 0 insertions, 4968 deletions
diff --git a/deployment/playbooks/add-node-prerequisite.yaml b/deployment/playbooks/add-node-prerequisite.yaml
deleted file mode 100644
index f43b3545..00000000
--- a/deployment/playbooks/add-node-prerequisite.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: new_nodes
- gather_facts: yes
- become: yes
- vars_files:
- - vars/main.yaml
- roles:
- - package-repos
-
-- hosts: new_nodes
- gather_facts: no
- become: yes
- vars_files:
- - vars/main.yaml
- roles:
- - prerequisites
diff --git a/deployment/playbooks/add-node.yaml b/deployment/playbooks/add-node.yaml
deleted file mode 100644
index 51971644..00000000
--- a/deployment/playbooks/add-node.yaml
+++ /dev/null
@@ -1,142 +0,0 @@
----
-- hosts: localhost
- connection: local
- gather_facts: no
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - create-vm-add-prod-ose
- - setup-custom-domain-names-for-ansible-runner
-
-- hosts: new_nodes
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - instance-groups
- - package-repos
- - vmware-guest-setup
- - cloud-provider-setup
- - docker-storage-setup
- - openshift-volume-quota
-
-# 'openshift_node_groups' var started being required since OCP3.10
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - set_fact:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- include: add-node-prerequisite.yaml
- when: openshift_vers in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Map domain names and IP addresses of old and new nodes to each other
- hosts: master, compute, cns, crs, !new_nodes
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
-
-- include: node-setup.yaml
-
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - name: Make sure dnsmasq is running, enabled and restarted
- service: name=dnsmasq state=restarted enabled=yes
-
-- hosts: localhost
- gather_facts: no
- become: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/clean.yaml b/deployment/playbooks/clean.yaml
deleted file mode 100644
index 68da95ec..00000000
--- a/deployment/playbooks/clean.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- hosts: localhost
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
-
-- hosts: allnodes
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - rhsm-unregister
-
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Delete all added VMs
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ hostvars[item].inventory_hostname }}"
- datacenter: "{{ vcenter_datacenter }}"
- cluster: "{{ vcenter_cluster }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- folder: "/{{ vcenter_datacenter }}/vm/{{ vcenter_folder }}"
- state: absent
- force: true
- with_items: "{{ groups['allnodes'] }}"
-
- - name: Get current user home dir
- shell: 'eval echo "~$USER"'
- register: home_dir
- - name: Set hosts files paths
- set_fact:
- home_hosts_file: "{{ home_dir.stdout_lines[0] + '/.ssh/config' }}"
- system_hosts_file: "/etc/hosts"
- - name: Check 'write' permissions for system hosts file
- stat:
- path: "{{ system_hosts_file }}"
- register: stat_system_hosts
-
- - name: Update system hosts file if writeable
- lineinfile:
- dest: "{{ system_hosts_file }}"
- state: absent
- regexp: "{{ hostvars[item].inventory_hostname }}"
- create: true
- with_items: "{{ groups['allnodes'] }}"
- when: "stat_system_hosts.stat.writeable"
- - name: Update user's SSH hosts file
- lineinfile:
- dest: "{{ home_hosts_file }}"
- state: present
- line: "Host obsolete-{{ item }}"
- regexp: "Host {{ item }}"
- create: true
- mode: '644'
- with_items: "{{ groups['allnodes'] }}"
diff --git a/deployment/playbooks/cleanup-cns.yaml b/deployment/playbooks/cleanup-cns.yaml
deleted file mode 100644
index 5a2d8497..00000000
--- a/deployment/playbooks/cleanup-cns.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
-
-- hosts: cns
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - rhsm-unregister
-
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Delete cns VMs
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- folder: "/{{ vcenter_folder }}"
- name: "{{ item.value.guestname }}"
- state: absent
- force: true
- with_dict: "{{host_inventory}}"
- when: "'cns' in item.value.guestname"
diff --git a/deployment/playbooks/cleanup-crs.yaml b/deployment/playbooks/cleanup-crs.yaml
deleted file mode 100644
index 3d6ee533..00000000
--- a/deployment/playbooks/cleanup-crs.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
-
-- hosts: crs
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- - rhsm-unregister
-
-- hosts: localhost
- user: root
- become: false
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Delete crs VMs
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- folder: "/{{ vcenter_folder }}"
- name: "{{ item.value.guestname }}"
- state: absent
- force: true
- with_dict: "{{host_inventory}}"
- when: "'crs' in item.value.guestname"
diff --git a/deployment/playbooks/cns-node-setup.yaml b/deployment/playbooks/cns-node-setup.yaml
deleted file mode 100644
index c5293619..00000000
--- a/deployment/playbooks/cns-node-setup.yaml
+++ /dev/null
@@ -1,131 +0,0 @@
----
-- hosts: cns
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - instance-groups
- - package-repos
- - vmware-guest-setup
- - cloud-provider-setup
- - docker-storage-setup
- - openshift-volume-quota
- - gluster-ports
-
-# 'openshift_node_groups' var started being required since OCP3.10
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - set_fact:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- include: add-node-prerequisite.yaml
- when: openshift_vers in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
- ) }} hosts=new_nodes"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Map domain names and IP addresses of old and new nodes to each other
- hosts: master, compute, crs
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
-
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - name: Make sure dnsmasq is running, enabled and restarted
- service: name=dnsmasq state=restarted enabled=yes
-
-- hosts: localhost
- gather_facts: no
- become: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/cns-setup.yaml b/deployment/playbooks/cns-setup.yaml
deleted file mode 100644
index ce17cc08..00000000
--- a/deployment/playbooks/cns-setup.yaml
+++ /dev/null
@@ -1,164 +0,0 @@
----
-- hosts: cns
- tasks:
- - name: Install required kernel modules on CNS nodes
- import_role:
- name: openshift_storage_glusterfs
- tasks_from: kernel_modules.yml
-
-- name: Restart dnsmasq to make our custom configs take effect
- hosts: allnodes
- tasks:
- - service:
- name: dnsmasq
- state: restarted
-
-- hosts: single_master
- tasks:
- - name: Perform actions on master node which are required to install CNS
- import_role:
- name: openshift_storage_glusterfs
- vars:
- openshift_storage_glusterfs_name: 'storage'
- openshift_storage_glusterfs_namespace: 'storage'
- openshift_storage_glusterfs_is_native: true
- openshift_storage_glusterfs_storageclass: true
- openshift_storage_glusterfs_block_storageclass: true
- openshift_storage_glusterfs_s3_deploy: false
- openshift_storage_glusterfs_heketi_admin_key: "{{
- (dp_tool_heketi_admin_key.strip() != '') |
- ternary(dp_tool_heketi_admin_key.strip(), omit) }}"
- openshift_storage_glusterfs_heketi_user_key: "{{
- (dp_tool_heketi_user_key.strip() != '') |
- ternary(dp_tool_heketi_user_key.strip(), omit) }}"
- openshift_storage_glusterfs_heketi_topology_load: true
- - name: Allow to expand PVCs using 'glusterfs' storageclass.
- oc_edit:
- kind: sc
- name: glusterfs-{{ glusterfs_name }}
- content:
- allowVolumeExpansion: true
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Get IP address of the node with router
- hosts: single_master
- tasks:
- - command: "oc get endpoints router -o=custom-columns=:.subsets[*].addresses[0].ip -n default"
- register: router_get
- - set_fact:
- router_ip: "{{ router_get.stdout_lines[1].strip() }}"
- delegate_to: "{{ item }}"
- delegate_facts: True
- with_items: "{{ groups['allnodes'] }}"
-
-- name: Update dnsmasq config with custom domain zone for apps
- hosts: allnodes
- tasks:
- - lineinfile:
- path: /etc/dnsmasq.conf
- line: "address=/.{{ app_dns_prefix }}.{{ dns_zone }}/{{ router_ip }}"
- - service:
- name: dnsmasq
- state: restarted
-
-- hosts: single_master
- tasks:
- - name: Get IPv4 address of the main master node
- command: "python -c \"import yaml ;
- config = yaml.load(open('/etc/origin/master/master-config.yaml', 'r'));
- print(config['kubernetesMasterConfig']['masterIP'])
- \""
- register: master_ipv4
- - set_fact:
- master_ipv4: "{{ master_ipv4.stdout_lines[0] }}"
- - name: Read Heketi dc name
- shell: "oc get dc -n storage | grep heketi | awk '{ print $1}'"
- register: heketi_dc_name_raw
- - name: Read Heketi svc name
- shell: "oc get svc -n storage | grep -e heketi | grep -v heketi-db | awk '{ print $1}'"
- register: heketi_svc_name_raw
- - name: Save Heketi DC and SVC names in separate vars
- set_fact:
- heketi_dc_name: "{{ heketi_dc_name_raw.stdout.strip() }}"
- heketi_svc_name: "{{ heketi_svc_name_raw.stdout.strip() }}"
- - command: "oc patch svc {{ heketi_svc_name }} --namespace storage
- -p '{\"spec\":{\"externalIPs\":[\"{{ master_ipv4 }}\"]}}'"
- run_once: true
-
-# Following updates config file
-# which is required for automated tests from 'cns-automation' repo
-
-- name: Update 'cns-automation' config file
- hosts: localhost
- tasks:
- - set_fact:
- master_ipv4: "{{ hostvars[groups['single_master'][0]].master_ipv4 }}"
- - yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: openshift.storage_project_name
- value: "storage"
- - key: openshift.heketi_config.heketi_dc_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_dc_name }}"
- - key: openshift.heketi_config.heketi_service_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_svc_name }}"
- - key: openshift.heketi_config.heketi_client_node
- value: "{{ master_ipv4 }}"
- - key: openshift.heketi_config.heketi_server_url
- value: "http://{{ master_ipv4 }}:8080"
- - key: openshift.heketi_config.heketi_cli_user
- value: 'admin'
- - key: openshift.heketi_config.heketi_cli_key
- value: "{{ dp_tool_heketi_admin_key }}"
- - key: openshift.dynamic_provisioning.storage_classes
- value:
- file_storage_class:
- provisioner: "kubernetes.io/glusterfs"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- secretnamespace: "storage"
- volumenameprefix: "autotests-file"
- block_storage_class:
- provisioner: "gluster.org/glusterblock"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- restsecretnamespace: "storage"
- volumenameprefix: "autotests-block"
- hacount: "3"
- chapauthenabled: "true"
- when: cns_automation_config_file_path | length > 0
- run_once: true
-
-- name: Install 'heketi-client' package on all the master nodes
- hosts: master
- gather_facts: no
- tasks:
- - name: Get Heketi POD name on master node
- command: "oc get pods -l glusterfs=heketi-storage-pod --no-headers {{ ''
- }}-o=custom-columns=:.metadata.name --namespace storage"
- register: heketi_pod_name
- - name: Read heketi-client package version from Heketi POD
- shell: "oc exec --namespace storage {{ heketi_pod_name.stdout_lines[0]
- }} -- rpm -q heketi-client --queryformat{{ ''
- }} '%{version}-%{release}\n' | cut -d '.' -f 1,2,3"
- register: heketi_client_version
- - name: Enable Gluster 3 repo on master node
- import_role:
- name: enable-gluster-repo
- - name: Remove existing heketi client from master node if present
- package:
- name: "heketi-client"
- state: absent
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- - name: Install heketi client on master node for CNS and CRS needs
- package:
- name: "heketi-client-{{heketi_client_version.stdout_lines[0].strip() }}*"
- state: present
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
diff --git a/deployment/playbooks/cns-storage.yaml b/deployment/playbooks/cns-storage.yaml
deleted file mode 100644
index 6df9dbd7..00000000
--- a/deployment/playbooks/cns-storage.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- include: prod-ose-cns.yaml
- tags: ['vms']
-
-- include: cns-node-setup.yaml
- tags: [ 'node-setup']
-
-- include: node-setup.yaml
- tags: [ 'node-setup']
-
-- include: cns-setup.yaml
- tags: [ 'node-setup']
-
-- include: cleanup-cns.yaml
- tags: ['clean']
diff --git a/deployment/playbooks/crs-node-setup.yaml b/deployment/playbooks/crs-node-setup.yaml
deleted file mode 100644
index c762f48a..00000000
--- a/deployment/playbooks/crs-node-setup.yaml
+++ /dev/null
@@ -1,123 +0,0 @@
----
-- hosts: crs
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - instance-groups
- - package-repos
- - vmware-guest-setup
- - crs-prerequisite
- - gluster-ports
-
-# 'openshift_node_groups' var started being required since OCP3.10
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - set_fact:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- hosts: crs
- gather_facts: no
- become: no
- vars_files:
- - vars/main.yaml
- tasks:
- - name: Install required kernel modules on CRS nodes
- import_role:
- name: openshift_storage_glusterfs
- tasks_from: kernel_modules.yml
-
-- name: Map domain names and IP addresses of old and new nodes to each other
- hosts: master, compute, cns
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
-
-- hosts: allnodes
- gather_facts: no
- become: no
- tasks:
- - name: be sure dnsmasq is running and enabled
- service: name=dnsmasq state=restarted enabled=yes
-
-- hosts: localhost
- gather_facts: no
- become: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/crs-setup.yaml b/deployment/playbooks/crs-setup.yaml
deleted file mode 100644
index fbba5f37..00000000
--- a/deployment/playbooks/crs-setup.yaml
+++ /dev/null
@@ -1,209 +0,0 @@
----
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'noop.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/init/main.yml'
- ) }} hosts=single_master"
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- hosts: single_master
- tasks:
- - name: Label common compute nodes be suitable for Heketi POD
- oc_label:
- name: '{{ item }}'
- kind: 'node'
- state: 'add'
- labels:
- - key: 'glusterfs'
- value: 'heketi-host'
- - key: 'heketi'
- value: 'heketi-host'
- with_items: "{{ groups[cluster_id + '-compute'] }}"
- ignore_errors: true
-
-# Prepare SSH key pair before CRS installation
-- hosts: localhost
- ignore_errors: no
- tasks:
- - name: Get home dir of the current user
- shell: "getent passwd $(whoami) | cut -d: -f6"
- register: user_home_dir
- - name: Define path for the SSH key
- set_fact:
- crs_ssh_keypath: "{{ user_home_dir.stdout_lines[0].strip() }}/.ssh/crs_nodes_{{
- cluster_id + '_' + (999999999999999 | random | string ) }}"
- - name: Generate SSH key pair for Heketi and CRS interactions
- shell: "yes y| ssh-keygen -b 2048 -t rsa -f {{ crs_ssh_keypath }} -q -N ''"
- args:
- creates: "{{ crs_ssh_keypath }}"
- - name: Read contents of the public SSH key
- command: "cat {{ crs_ssh_keypath }}.pub"
- register: crs_pub_key_raw
- - name: Save public SSH key data to the variable
- set_fact:
- crs_pub_key: "{{ crs_pub_key_raw.stdout_lines[0].strip() }}"
- - name: Copy public SSH key onto CRS nodes
- shell: "echo {{ crs_pub_key }} >> /root/.ssh/authorized_keys"
- delegate_to: "{{ item }}"
- delegate_facts: true
- with_items: "{{ groups[cluster_id + '-crs'] }}"
- - name: Set var with SSH key path for master nodes
- set_fact:
- crs_ssh_keypath: "{{ crs_ssh_keypath }}"
- delegate_to: "{{ item }}"
- delegate_facts: true
- with_items: "{{ groups[cluster_id + '-master'] }}"
-
-# Run CRS installation
-- hosts: single_master
- tasks:
- - name: Perform actions on master node which are required to install CRS
- import_role:
- name: openshift_storage_glusterfs
- vars:
- openshift_storage_glusterfs_name: 'storage'
- openshift_storage_glusterfs_namespace: 'storage'
- openshift_storage_glusterfs_is_native: false
- openshift_storage_glusterfs_heketi_is_native: true
- openshift_storage_glusterfs_heketi_admin_key: "{{
- (dp_tool_heketi_admin_key.strip() != '') |
- ternary(dp_tool_heketi_admin_key.strip(), omit) }}"
- openshift_storage_glusterfs_heketi_user_key: "{{
- (dp_tool_heketi_user_key.strip() != '') |
- ternary(dp_tool_heketi_user_key.strip(), omit) }}"
- openshift_storage_glusterfs_storageclass: true
- openshift_storage_glusterfs_block_storageclass: true
- openshift_storage_glusterfs_s3_deploy: false
- openshift_storage_glusterfs_nodeselector: 'role=compute'
- openshift_storage_glusterfs_heketi_executor: 'ssh'
- openshift_storage_glusterfs_heketi_ssh_keyfile: "{{ crs_ssh_keypath }}"
- - name: Allow to expand PVCs using 'glusterfs' storageclass.
- oc_edit:
- kind: sc
- name: glusterfs-{{ glusterfs_name }}
- content:
- allowVolumeExpansion: true
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- name: Get IP address of the node with router
- hosts: single_master
- tasks:
- - command: "oc get endpoints router -o=custom-columns=:.subsets[*].addresses[0].ip -n default"
- register: router_get
- - set_fact:
- router_ip: "{{ router_get.stdout_lines[1].strip() }}"
- delegate_to: "{{ item }}"
- delegate_facts: True
- with_items: "{{ groups['allnodes'] }}"
-
-- name: Restart dnsmasq on all the nodes to apply all the changes we made
- hosts: allnodes
- tasks:
- - lineinfile:
- path: /etc/dnsmasq.conf
- line: "address=/.{{ app_dns_prefix }}.{{ dns_zone }}/{{ router_ip }}"
- - service:
- name: dnsmasq
- state: restarted
-
-- hosts: single_master
- tasks:
- - name: Get IPv4 address of the main master node
- command: "python -c \"import yaml ;
- config = yaml.load(open('/etc/origin/master/master-config.yaml', 'r'));
- print(config['kubernetesMasterConfig']['masterIP'])
- \""
- register: master_ipv4
- - set_fact:
- master_ipv4: "{{ master_ipv4.stdout_lines[0] }}"
- - name: Read Heketi dc name
- shell: "oc get dc -n storage | grep heketi | awk '{ print $1}'"
- register: heketi_dc_name_raw
- - name: Read Heketi svc name
- shell: "oc get svc -n storage | grep -e heketi | grep -v heketi-db | awk '{ print $1}'"
- register: heketi_svc_name_raw
- - name: Save Heketi DC and SVC names in separate vars
- set_fact:
- heketi_dc_name: "{{ heketi_dc_name_raw.stdout.strip() }}"
- heketi_svc_name: "{{ heketi_svc_name_raw.stdout.strip() }}"
- - command: "oc patch svc {{ heketi_svc_name }} --namespace storage
- -p '{\"spec\":{\"externalIPs\":[\"{{ master_ipv4 }}\"]}}'"
- run_once: true
-
-# Following updates config file
-# which is required for automated tests from 'cns-automation' repo
-
-- name: Update 'cns-automation' config file
- hosts: localhost
- tasks:
- - set_fact:
- master_ipv4: "{{ hostvars[groups['single_master'][0]].master_ipv4 }}"
- - yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: openshift.storage_project_name
- value: "storage"
- - key: openshift.heketi_config.heketi_dc_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_dc_name }}"
- - key: openshift.heketi_config.heketi_service_name
- value: "{{ hostvars[groups['single_master'][0]].heketi_svc_name }}"
- - key: openshift.heketi_config.heketi_client_node
- value: "{{ master_ipv4 }}"
- - key: openshift.heketi_config.heketi_server_url
- value: "http://{{ master_ipv4 }}:8080"
- - key: openshift.heketi_config.heketi_cli_user
- value: 'admin'
- - key: openshift.heketi_config.heketi_cli_key
- value: "{{ dp_tool_heketi_admin_key }}"
- - key: openshift.dynamic_provisioning.storage_classes
- value:
- file_storage_class:
- provisioner: "kubernetes.io/glusterfs"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- secretnamespace: "storage"
- volumenameprefix: "autotests-file"
- block_storage_class:
- provisioner: "gluster.org/glusterblock"
- resturl: "http://{{ master_ipv4 }}:8080"
- restuser: "admin"
- restsecretnamespace: "storage"
- volumenameprefix: "autotests-block"
- hacount: "3"
- chapauthenabled: "true"
- when: cns_automation_config_file_path | length > 0
- run_once: true
-
-- name: Install 'heketi-client' package on all the master nodes
- hosts: master
- gather_facts: no
- tasks:
- - name: Get Heketi POD name on master node
- command: "oc get pods -l glusterfs=heketi-storage-pod --no-headers {{ ''
- }}-o=custom-columns=:.metadata.name --namespace storage"
- register: heketi_pod_name
- - name: Read heketi-client package version from Heketi POD
- shell: "oc exec --namespace storage {{ heketi_pod_name.stdout_lines[0]
- }} -- rpm -q heketi-client --queryformat{{ ''
- }} '%{version}-%{release}\n' | cut -d '.' -f 1,2,3"
- register: heketi_client_version
- - name: Enable Gluster 3 repo on master node
- import_role:
- name: enable-gluster-repo
- - name: Remove existing heketi client from master node if present
- package:
- name: "heketi-client"
- state: absent
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- - name: Install heketi client on master node for CNS and CRS needs
- package:
- name: "heketi-client-{{heketi_client_version.stdout_lines[0].strip() }}*"
- state: present
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
diff --git a/deployment/playbooks/crs-storage.yaml b/deployment/playbooks/crs-storage.yaml
deleted file mode 100644
index cee0da69..00000000
--- a/deployment/playbooks/crs-storage.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- include: prod-ose-crs.yaml
- tags: ['vms']
-
-- include: crs-node-setup.yaml
- tags: [ 'node-setup' ]
-
-- include: crs-setup.yaml
- tags: [ 'node-setup']
-
-- include: cleanup-crs.yaml
- tags: ['clean']
diff --git a/deployment/playbooks/gather_logs.yaml b/deployment/playbooks/gather_logs.yaml
deleted file mode 100644
index 33b9114f..00000000
--- a/deployment/playbooks/gather_logs.yaml
+++ /dev/null
@@ -1,883 +0,0 @@
-# Expected vars:
-#
-# - 'config_filepath' - required. It is expected to be provided and must
-# store filepath for the config file used by automated test caes.
-# Current playbook will take all the nodes info from it.
-#
-# - 'output_artifacts_dir' - optional. It is directory where should be saved
-# generated/gathered files.
-#
-# Command to run this playbook:
-#
-# $ tox -e ocp3.11 -- ansible-playbook -i 127.0.0.1, \
-# playbooks/gather_logs.yaml \
-# -e config_filepath=/path/to/the/cns-automation-config.yaml \
-# -e output_artifacts_dir=../cluster_logs/
-
----
-- hosts: localhost
- connection: local
- gather_facts: no
- tasks:
- - name: Process config file and find all the nodes of an OpenShift cluster
- command: "python -c \"import yaml ;
- config = yaml.load(open('{{ config_filepath }}', 'r'));
- print(' '.join(list(config['ocp_servers']['master'].keys())));
- print(' '.join(list(config['ocp_servers']['nodes'].keys())));
- print(' '.join(list(config['gluster_servers'].keys())));
- print(config.get('openshift', config.get('cns',
- {}))['heketi_config']['heketi_server_url']);
- print(config.get('openshift', config.get('cns',
- {}))['heketi_config']['heketi_cli_user']);
- print(config.get('openshift', config.get('cns',
- {}))['heketi_config']['heketi_cli_key'])\""
- register: config_data
- - debug:
- msg: "{{ config_data }}"
- - name: Save config data to the host vars
- set_fact:
- master_nodes: "{{ config_data.stdout_lines[0].split(' ') }}"
- compute_nodes: "{{ config_data.stdout_lines[1].split(' ') }}"
- gluster_nodes: "{{ config_data.stdout_lines[2].split(' ') }}"
- heketi_server_url: "{{ config_data.stdout_lines[3] }}"
- heketi_cli_user: "{{ config_data.stdout_lines[4] }}"
- heketi_cli_key: "{{ config_data.stdout_lines[5] }}"
- - name: Print list of master nodes IP addresses
- debug:
- msg: "{{ master_nodes }}"
- - name: Print list of compute nodes IP addresses
- debug:
- msg: "{{ compute_nodes }}"
- - name: Print list of gluster nodes IP addresses
- debug:
- msg: "{{ gluster_nodes }}"
- - name: Add gathered master IP addresses to the Ansible host list
- add_host:
- hostname: "{{ item }}"
- ansible_host: "{{ item }}"
- groups: logs_ocp_nodes, logs_single_master_node, logs_master_nodes
- with_items: "{{ master_nodes }}"
- - name: Add gathered compute IP addresses to the Ansible host list
- add_host:
- hostname: "{{ item }}"
- ansible_host: "{{ item }}"
- groups: logs_ocp_nodes, logs_compute_nodes
- with_items: "{{ compute_nodes }}"
- - name: Add gathered gluster IP addresses to the Ansible host list
- add_host:
- hostname: "{{ item }}"
- ansible_host: "{{ item }}"
- groups: logs_ocp_nodes, logs_gluster_nodes
- with_items: "{{ gluster_nodes }}"
-
-- hosts: all
- gather_facts: no
- tasks:
- - set_fact:
- output_artifacts_dir: "{{
- (output_artifacts_dir | default('../cluster_logs/')) }}"
- - set_fact:
- output_artifacts_dir: "{{ output_artifacts_dir
- }}{% if output_artifacts_dir[-1] != '/' %}{{ '/' }}{% endif %}"
- - name: Get hostname of the current host
- shell: "hostname"
- register: hostname
- - name: Save hostname to the var
- set_fact:
- current_hostname: "{{ hostname.stdout_lines[0].strip() }}"
- separator_line: "{{ '=' * 79 }}"
- - name: Create output artifacts directory if absent
- delegate_to: localhost
- file:
- path: "{{ output_artifacts_dir }}"
- state: directory
- run_once: yes
- - name: Install 'rsync' package which is required by 'synchronize' module
- yum:
- name: rsync
- state: installed
- ignore_errors: yes
-
-# All nodes SOS reports
-- hosts: logs_ocp_nodes
- gather_facts: no
- tasks:
- - name: Install 'sos' package if absent
- package:
- name: sos
- state: present
- - name: Run sosreport command
- shell: "sosreport --batch --verbose --tmp-dir . --label customuniquelabel"
- register: sosreport_output
- - name: Print the output of the sosreport command
- debug:
- msg: "{{ sosreport_output }}"
- - name: Get name of the generated sos-file
- shell: 'echo -e "{{ sosreport_output.stdout }}" | grep customuniquelabel'
- register: sos_filepath
- - name: Copy generated files to the localhost
- fetch:
- src: "{{ sos_filepath.stdout_lines[0].strip() }}"
- dest: "{{ output_artifacts_dir }}sosreports/"
- flat: yes
- fail_on_missing: yes
-
-- hosts: logs_single_master_node
- gather_facts: no
- vars:
- master_package_list:
- - docker
- - heketi
- master_service_list:
- - docker
- - multipathd
- heketi_pod_package_list:
- - gluster
- - heketi
- gluster_pod_package_list:
- - gluster
- - heketi
- - targetcli
- - gluster-block
- - tcmu-runner
- - python-configshell
- - python-rtslib
- gluster_pod_service_list:
- - glusterd
- - heketi
- - gluster-blockd
- - gluster-block-target
- - tcmu-runner
- - rpcbind
- tasks:
-
- # Master node info
- - name: Get distro version
- shell: "uname -a"
- register: master_linux_kernel_version
- - name: Get Red Hat release info
- shell: "cat /etc/redhat-release"
- register: master_rh_release
- - name: Create grep filter with all the packages we are interested in
- set_fact:
- package_filter: "{{ package_filter | default('grep') + ' -e ' + item }}"
- with_items: "{{ master_package_list }}"
- - name: Get list of installed packages we are interested in
- shell: "rpm -qa | {{ package_filter }}"
- register: master_packages
- - name: Get status of services on OCP Master node
- shell: "systemctl list-units {{ master_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend"
- register: master_services
- - name: Get OpenShift client version
- shell: "(oc version | grep -e 'oc ' -e 'openshift' -e 'kube') ||
- echo failed_to_get_oc_version_info"
- register: master_oc_version
- - name: Get list of OCP nodes
- shell: "oc get nodes -o wide || echo failed_to_get_list_of_nodes"
- register: master_ocp_nodes
- - name: Get info about all the docker images used in OCP cluster
- shell: "(oc get pods --all-namespaces
- -o=custom-columns=:.status.containerStatuses[*].image | grep -v -e '^$' | uniq) ||
- echo failed_to_get_list_of_images"
- register: master_image_info
- - name: Write master data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ output_artifacts_dir }}master_data.yaml"
- state: present
- edits:
- - key: master
- value:
- Linux kernel version: "{{ master_linux_kernel_version.stdout_lines }}"
- Red Hat release info: "{{ master_rh_release.stdout_lines }}"
- List of Packages: "{{ master_packages.stdout_lines }}"
- List of services: "{{ master_services.stdout_lines }}"
- OC Version: "{{ master_oc_version.stdout_lines }}"
- OCP nodes: "{{ master_ocp_nodes.stdout_lines }}"
- Images info: "{{ master_image_info.stdout_lines }}"
-
- # Heketi POD logs, config and DB dump
- - name: Get heketi POD
- shell: "oc get pods --all-namespaces -l heketi --no-headers
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: heketi_pods
- retries: 10
- delay: 6
- until: heketi_pods is succeeded
- - name: DEBUG HEKETI
- debug:
- msg: "{{ heketi_pods }}"
- - block:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}heketi_pod/"
- - name: Create compute directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- - name: Set Heketi POD name and Heketi namespace as vars
- set_fact:
- heketi_pod_name: "{{
- (heketi_pods.stdout_lines[0].split(' ') | list)[0] }}"
- heketi_pod_namespace: "{{
- (heketi_pods.stdout_lines[0].split(' ') | list)[-1] }}"
- - name: Set Heketi pod command prefix
- set_fact:
- heketi_pod_cmd_prefix: "oc exec {{ heketi_pod_name
- }} --namespace {{ heketi_pod_namespace }} --"
- - name: Get the Heketi config from the Heketi POD
- shell: 'echo -e "$({{ heketi_pod_cmd_prefix
- }} cat /etc/heketi/heketi.json ||
- echo error_failed_to_get_the_heketi_config_file)" |
- tee /tmp/heketi_config.json'
- - name: Copy the Heketi config
- fetch:
- src: "/tmp/heketi_config.json"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- - name: Save Heketi POD logs
- shell: "(oc logs {{ heketi_pod_name }} ||
- echo 'ERROR! Failed to get the Heketi logs.') > /tmp/heketi.log"
- - name: Copy the Heketi logs
- fetch:
- src: "/tmp/heketi.log"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- - name: Dump the Heketi DB
- shell: 'echo -e "$({{ heketi_pod_cmd_prefix }} heketi-cli --server {{
- hostvars["localhost"]["heketi_server_url"] }} --user {{
- hostvars["localhost"]["heketi_cli_user"] }} --secret {{
- hostvars["localhost"]["heketi_cli_key"]
- }} db dump ||
- echo \{\"error\"\:\"failed_to_get_the_Heketi_db_dump\"\})" |
- python -m json.tool > /tmp/heketi_db_dump.json'
- - name: Copy the Heketi DB dump
- fetch:
- src: "/tmp/heketi_db_dump.json"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- - name: Get storage release version from Heketi POD
- shell: "{{ heketi_pod_cmd_prefix }} cat /etc/redhat-storage-release ||
- echo failed_to_read_redhat_storage_release_info"
- register: heketi_pod_storage_release_version
- - name: Get info about packages on Heketi POD
- shell: "{{ heketi_pod_cmd_prefix }} rpm -qa | grep -e {{
- heketi_pod_package_list | join(' -e ') }} ||
- echo failed_to_read_list_of_installed_packages"
- register: heketi_pod_packages
- - name: Write Heketi data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ dir_path }}heketi_pod_data.yaml"
- state: present
- edits:
- - key: "Storage_release_version"
- value: "{{ heketi_pod_storage_release_version.stdout_lines }}"
- - key: "List_of_Packages"
- value: "{{ heketi_pod_packages.stdout_lines }}"
- when: "((heketi_pods.stdout_lines | join('')).strip() | length) > 0"
-
- # Gluster PODs
- - name: Get list of Gluster PODs
- shell: "oc get pods --all-namespaces -l glusterfs-node --no-headers
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: gluster_pods
- retries: 10
- delay: 6
- until: gluster_pods is succeeded
- - name: DEBUG GLUSTER
- debug:
- msg: "{{ gluster_pods }}"
- - name: Create var describing the Gluster cluster deployment type
- set_fact:
- is_gluster_containerized: "{{
- ((gluster_pods.stdout_lines | join('')).strip() | length) > 0 }}"
- delegate_to: "{{ item }}"
- delegate_facts: yes
- with_items: "{{ groups['all'] }}"
- - block:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}gluster_pods/"
- - name: Create directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- - name: Define storage namespace
- set_fact:
- storage_namespace: "{{ (gluster_pods.stdout_lines[0].split(
- ' ') | list)[-1] }}"
- - name: Define gluster POD names
- set_fact:
- gluster_pod_names: "{{ (gluster_pod_names | default([])) +
- [(item.split(' ') | list)[0]] }}"
- with_items: "{{ gluster_pods.stdout_lines[0:] }}"
- - debug:
- msg: "{{ gluster_pod_names }}"
-
- - name: Get storage release version from Gluster PODs
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- cat /etc/redhat-storage-release) ||
- echo failed_to_get_redhat_storage_release_info"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_storage_release_version_results
- - name: Process gluster PODs storage release versions results
- set_fact:
- gluster_pod_storage_release_version_processed: "{{
- gluster_pod_storage_release_version_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_storage_release_version_results.results }}"
- - name: Get info about packages on Gluster PODs
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- rpm -qa | grep -e {{ gluster_pod_package_list | join(' -e ') }}) ||
- echo failed_to_get_packages_info_from_gluster_pod"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_package_list_results
- - name: Process gluster PODs package lists results
- set_fact:
- gluster_pod_package_list_processed: "{{
- gluster_pod_package_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_package_list_results.results }}"
- - name: Get info about services on Gluster PODs
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- systemctl list-units {{
- gluster_pod_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend) ||
- echo failed_to_get_services_info_from_gluster_pod"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_service_list_results
- - name: Process gluster PODs service lists results
- set_fact:
- gluster_pod_service_list_processed: "{{
- gluster_pod_service_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_service_list_results.results }}"
- - name: Write Gluster PODs data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ dir_path }}gluster_pods_packages_and_services_data.yaml"
- state: present
- edits:
- - key: gluster_pods
- value:
- Storage release version: "{{
- gluster_pod_storage_release_version_processed }}"
- List of Packages: "{{ gluster_pod_package_list_processed }}"
- List of Services: "{{ gluster_pod_service_list_processed }}"
-
- - name: Get 'targetcli ls' output
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} --
- targetcli ls) || echo failed_to_get_targetcli_ls_output"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_targetcli_ls_results
- - debug:
- msg: "{{ gluster_pod_targetcli_ls_results }}"
- - name: Write Gluster PODs data to the data file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-targetcli-ls"
- with_items: "{{ gluster_pod_targetcli_ls_results.results }}"
-
- - name: Create gluster log directories on the master node
- file:
- path: "/tmp/gluster/{{ item }}-var_log_glusterfs"
- state: directory
- with_items: "{{ gluster_pod_names }}"
- - name: Copy '/var/log/glusterfs/*' files to the master
- shell: "(oc cp {{ storage_namespace }}/{{ item }}:/var/log/glusterfs
- /tmp/gluster/{{ item }}-var_log_glusterfs) ||
- echo failed_to_copy_var_log_glusterfs_files"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy '/var/log/glusterfs/*' files from the master to the localhost
- synchronize:
- src: "/tmp/gluster/{{ item }}-var_log_glusterfs/"
- dest: "{{ dir_path }}{{ item }}-var_log_glusterfs/"
- mode: pull
- recursive: yes
- use_ssh_args: yes
- with_items: "{{ gluster_pod_names }}"
- ignore_errors: yes
-
- - name: Get 'dmesg -T' info
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- dmesg -T) || echo failed_toget_dmesg_-T_info"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pods_dmesg_results
- - name: Write Gluster PODs dmesg data to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-dmesg"
- with_items: "{{ gluster_pods_dmesg_results.results }}"
-
- - name: Get list of processes and info for processes with 'D' stat
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace }} -- ps aux ;
- oc exec {{ item }} --namespace {{ storage_namespace }} --
- ps -aux | tee /dev/tty | awk {'if ( $8 ~ \"D\" ) print $2'} |
- while read -r pid ;
- do echo -e \"\nRunning '/proc/$pid/stack' command:\";
- cat /proc/$pid/stack ;
- done) || echo failed_to_get_info_about_processes_with_D_stat"
- with_items: "{{ gluster_pod_names }}"
- register: gluster_pod_io_processes_info
- - name: Write Gluster PODs 'I\O' proccesses info to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-ps"
- with_items: "{{ gluster_pod_io_processes_info.results }}"
-
- - name: List dirs and files in '/sys/module/dm_multipath'
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- ls -l /sys/module/dm_multipath) ||
- echo failed_to_list_files_in__sys_module_dm_multipath"
- with_items: "{{ gluster_pod_names }}"
- register: sys_module_dm_multipath_results
- - name: Write Gluster PODs 'ls -l /sys/module/dm_multipath' output to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0]
- }}-ls-sys_module_dm_multipath"
- with_items: "{{ sys_module_dm_multipath_results.results }}"
-
- - name: "Run 'lsmod | egrep target_core' command"
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- lsmod | egrep target_core) ||
- echo failed_to_get_lsmod_info_for_target_core"
- with_items: "{{ gluster_pod_names }}"
- register: lsmod_target_core_results
- - name: Write Gluster PODs 'lsmod | egrep target_core' command results to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{
- (item.item.strip().split(' ') | list)[0] }}-lsmod_target_core"
- with_items: "{{ lsmod_target_core_results.results }}"
-
- - name: Get info about devices
- shell: '(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- bash -c "echo -e \"{{ separator_line }}\nlsblk info:\"; lsblk;
- echo -e \"{{ separator_line }}\nPVs info:\"; pvs;
- echo -e \"{{ separator_line }}\nVGs info:\"; vgs;
- echo -e \"{{ separator_line }}\nLVs info:\"; lvs;
- echo -e \"{{ separator_line }}\"") ||
- echo failed_to_get_list_of_pvs_vgs_and_lvs'
- with_items: "{{ gluster_pod_names }}"
- register: lsblk_pvs_vgs_lvs_results
- - name: Write Gluster PODs info about PVs, VGs and LVs to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0]
- }}-lsblk_pvs_vgs_lvs"
- with_items: "{{ lsblk_pvs_vgs_lvs_results.results }}"
-
- - name: Read 'journalctl' output
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- journalctl) || echo failed_to_read_journalctl_output"
- with_items: "{{ gluster_pod_names }}"
- register: journalctl_results
- - name: Write Gluster PODs 'journalctl' output to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0]
- }}-journalctl"
- with_items: "{{ journalctl_results.results }}"
-
- - name: Read 'mount' output
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- mount) || echo failed_to_read_mount_output"
- with_items: "{{ gluster_pod_names }}"
- register: mount_results
- - name: Write Gluster PODs 'mount' output to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ item.stdout }}"
- dest: "{{ dir_path }}{{ (item.item.strip().split(' ') | list)[0] }}-mount"
- with_items: "{{ mount_results.results }}"
-
- - name: Create archive from ' /etc/target/' dir
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- tar -czvf etc_target.tar.gz /etc/target/ ) ||
- echo failed_to_archive__etc_target_dir"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/etc/target/' dir to the master
- shell: "(oc cp {{ storage_namespace }}/{{ item }}:/etc_target.tar.gz
- /tmp/gluster/{{ item }}-etc_target.tar.gz) ||
- echo failed_to_copy_etc_target_file"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/etc/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ item }}-etc_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- with_items: "{{ gluster_pod_names }}"
-
- - name: Create archive from '/sys/kernel/config/target/' dir
- shell: "(oc exec {{ item }} --namespace {{ storage_namespace
- }} -- tar -czvf sys_kernel_config_target.tar.gz /sys/kernel/config/target/ ) ||
- echo failed_to_archive__sys_kernel_config_target_dir"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/sys/kernel/config/target/' dir to the master
- shell: "(oc cp {{ storage_namespace }}/{{ item }}:/sys_kernel_config_target.tar.gz
- /tmp/gluster/{{ item }}-sys_kernel_config_target.tar.gz) ||
- echo failed_to_copy_sys_kernel_config_target_file"
- with_items: "{{ gluster_pod_names }}"
- - name: Copy archive of the '/sys/kernel/config/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ item }}-sys_kernel_config_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
- with_items: "{{ gluster_pod_names }}"
- when: "is_gluster_containerized"
-
-# Gather info from gluster nodes in case of 'standalone' deployment
-- hosts: logs_gluster_nodes
- gather_facts: no
- vars:
- gluster_package_list:
- - gluster
- - heketi
- - targetcli
- - gluster-block
- - tcmu-runner
- - python-configshell
- - python-rtslib
- gluster_service_list:
- - glusterd
- - heketi
- - gluster-blockd
- - gluster-block-target
- - tcmu-runner
- - rpcbind
- tasks:
- - block:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}gluster_nodes/"
- - name: Create directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- run_once: yes
-
- - name: Get storage release version from Gluster node
- shell: "(cat /etc/redhat-storage-release) ||
- echo failed_to_get_redhat_storage_release_info"
- register: gluster_node_storage_release_version_results
- - name: Get info about packages on Gluster node
- shell: "(rpm -qa | grep -e {{ gluster_package_list | join(' -e ') }}) ||
- echo failed_to_get_packages_info_from_gluster_node"
- register: gluster_node_package_list_results
- - name: Get info about services on Gluster node
- shell: "(systemctl list-units {{
- gluster_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend) ||
- echo failed_to_get_services_info_from_gluster_node"
- register: gluster_node_service_list_results
- - name: Write Gluster node data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ dir_path }}gluster_nodes_packages_and_services_data.yaml"
- state: present
- edits:
- - key: "gluster_node_{{ current_hostname }}"
- value:
- Storage release version: "{{
- gluster_node_storage_release_version_results.stdout }}"
- List of Packages: "{{ gluster_node_package_list_results.stdout_lines }}"
- List of Services: "{{ gluster_node_service_list_results.stdout_lines }}"
-
- - name: Get 'targetcli ls' output
- shell: "targetcli ls || echo failed_to_get_targetcli_ls_output"
- register: gluster_node_targetcli_ls_results
- - name: Write Gluster node data to the data file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ gluster_node_targetcli_ls_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-targetcli-ls"
-
- - name: Copy '/var/log/glusterfs/*' files from the current gluster node
- synchronize:
- src: "/var/log/glusterfs/"
- dest: "{{ dir_path }}{{ current_hostname }}-var_log_glusterfs/"
- mode: pull
- recursive: yes
- use_ssh_args: yes
- ignore_errors: yes
-
- - name: Get info about space usage
- shell: '(echo -e "File system disk space usage on the {{
- current_hostname }} node:\n"; df -Th) ||echo failed_to_get_df_info'
- register: df_info
- - name: Save mounts info into a file
- delegate_to: localhost
- copy:
- content: "{{ df_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_df"
-
- - name: Get 'dmesg -T' info
- shell: "dmesg -T || echo failed_toget_dmesg_-T_info"
- register: gluster_node_dmesg_results
- - name: Write Gluster node dmesg data to files
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ gluster_node_dmesg_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-dmesg"
-
- - name: Get list of processes and info for processes with 'D' stat
- shell: "( ps aux ;
- ps -aux | tee /dev/tty | awk {'if ( $8 ~ \"D\" ) print $2'} |
- while read -r pid ;
- do echo -e \"\nRunning '/proc/$pid/stack' command:\";
- cat /proc/$pid/stack ;
- done) || echo failed_to_get_info_about_processes_with_D_stat"
- register: gluster_node_io_processes_info
- - name: Write Gluster node 'I\O' proccesses info to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ gluster_node_io_processes_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-ps"
-
- - name: List dirs and files in '/sys/module/dm_multipath'
- shell: "ls -l /sys/module/dm_multipath ||
- echo failed_to_list_files_in__sys_module_dm_multipath"
- register: sys_module_dm_multipath_results
- - name: Write Gluster node 'ls -l /sys/module/dm_multipath' output to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ sys_module_dm_multipath_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-ls-sys_module_dm_multipath"
-
- - name: "Run 'lsmod | egrep target_core' command"
- shell: "(lsmod | egrep target_core) ||
- echo failed_to_get_lsmod_info_for_target_core"
- register: lsmod_target_core_results
- - name: Write Gluster node 'lsmod | egrep target_core' command results to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ lsmod_target_core_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-lsmod_target_core"
-
- - name: Get info about devices
- shell: '(echo -e "{{ separator_line }}\nlsblk info:"; lsblk;
- echo -e "{{ separator_line }}\nPVs info:"; pvs;
- echo -e "{{ separator_line }}\nVGs info:"; vgs;
- echo -e "{{ separator_line }}\nLVs info:"; lvs;
- echo -e "{{ separator_line }}\nll /dev/disk/by-path/ip-*:";
- ll /dev/disk/by-path/ip-*; echo {{ separator_line }}) ||
- echo failed_to_get_info'
- register: lsblk_pvs_vgs_lvs
- - name: Save devices info into a file
- delegate_to: localhost
- copy:
- content: "{{ lsblk_pvs_vgs_lvs.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_lsblk_pvs_vgs_lvs"
-
- - name: Read 'journalctl' output
- shell: "journalctl || echo failed_to_read_journalctl_output"
- register: journalctl_results
- - name: Write Gluster node 'journalctl' output to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ journalctl_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-journalctl"
-
- - name: Read 'mount' output
- shell: "mount || echo failed_to_read_mount_output"
- register: mount_results
- - name: Write Gluster node 'mount' output to a file
- delegate_to: localhost
- no_log: yes
- copy:
- content: "{{ mount_results.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}-mount"
-
- - name: Create archive from ' /etc/target/' dir
- shell: "tar -czvf /tmp/gluster/{{ current_hostname
- }}-etc_target.tar.gz /etc/target/ ||
- echo failed_to_archive__etc_target_dir"
- - name: Copy archive of the '/etc/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ current_hostname }}-etc_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
-
- - name: Create archive from '/sys/kernel/config/target/' dir
- shell: "tar -czvf /tmp/gluster/{{ current_hostname
- }}-sys_kernel_config_target.tar.gz /sys/kernel/config/target/ ||
- echo failed_to_archive__sys_kernel_config_target_dir"
- - name: Copy archive of the '/sys/kernel/config/target/' dir to the localhost
- fetch:
- src: "/tmp/gluster/{{ current_hostname }}-sys_kernel_config_target.tar.gz"
- dest: "{{ dir_path }}"
- flat: yes
- fail_on_missing: yes
-
- - name: Create archive from '/var/log/messages' dir
- shell: "tar -czvf var_log_messages.tar.gz /var/log/messages"
- retries: 15
- delay: 2
- register: result
- until: result is succeeded
- ignore_errors: yes
- - name: Copy the archive to the localhost
- fetch:
- src: "var_log_messages.tar.gz"
- dest: "{{ dir_path }}{{ current_hostname }}_var_log_messages.tar.gz"
- flat: yes
- fail_on_missing: yes
- ignore_errors: yes
- when: "not is_gluster_containerized"
-
-# Gather info from compute nodes
-- hosts: logs_compute_nodes
- gather_facts: no
- vars:
- compute_package_list:
- - docker
- - heketi
- - rpcbind
- compute_service_list:
- - docker
- - multipathd
- - rpcbind
- - iscsid
- tasks:
- - name: Create var with destination dir path
- set_fact:
- dir_path: "{{ output_artifacts_dir }}compute_nodes/"
- - name: Create compute directory if absent
- delegate_to: localhost
- file:
- path: "{{ dir_path }}"
- state: directory
- run_once: yes
-
- - name: Create grep filter with all the packages we are interested in
- set_fact:
- package_filter: "{{ package_filter | default('grep') + ' -e ' + item }}"
- with_items: "{{ compute_package_list }}"
- - name: Get list of installed packages we are interested in
- shell: "rpm -qa | {{ package_filter }} || echo failed_to_get_info"
- register: compute_packages
- - name: Write compute nodes package list to a file
- delegate_to: localhost
- copy:
- content: "{{ compute_packages.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_packages.yaml"
-
- - name: Get status of services on OCP Compute nodes
- shell: "systemctl list-units {{
- compute_service_list | join('.service ') }}.service --no-pager
- --type=service --all --no-legend || echo failed_to_get_info"
- register: compute_services
- - name: Write compute nodes service list to a file
- delegate_to: localhost
- copy:
- content: "{{ compute_services.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_services.yaml"
-
- - name: Get multipath info
- shell: "(echo 'Multipath config is following:'; cat /etc/multipath.conf;
- echo -e \"{{ separator_line }}\nResults of 'nmultipath -ll' command:\";
- multipath -ll; echo {{ separator_line }}) || echo failed_to_get_info"
- register: multipath_info
- - name: Save multipath info into a file
- delegate_to: localhost
- copy:
- content: "{{ multipath_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_multipath_info"
-
- - name: Get info about devices
- shell: '(echo -e "{{ separator_line }}\nlsblk info:"; lsblk;
- echo -e "{{ separator_line }}\nPVs info:"; pvs;
- echo -e "{{ separator_line }}\nVGs info:"; vgs;
- echo -e "{{ separator_line }}\nLVs info:"; lvs;
- echo -e "{{ separator_line }}\nll /dev/disk/by-path/ip-*:";
- ll /dev/disk/by-path/ip-*; echo {{ separator_line }}) ||
- echo failed_to_get_info'
- register: lsblk_pvs_vgs_lvs
- - name: Save devices info into a file
- delegate_to: localhost
- copy:
- content: "{{ lsblk_pvs_vgs_lvs.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_lsblk_pvs_vgs_lvs"
-
- - name: Get info about mounts
- shell: '(echo -e "Mounts on the {{ current_hostname }} node:\n"; mount) ||
- echo failed_to_get_info'
- register: mounts
- - name: Save mounts info into a file
- delegate_to: localhost
- copy:
- content: "{{ mounts.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_mount"
-
- - name: Get info about space usage
- shell: '(echo -e "File system disk space usage on the {{
- current_hostname }} node:\n"; df -Th) ||echo failed_to_get_df_info'
- register: df_info
- - name: Save mounts info into a file
- delegate_to: localhost
- copy:
- content: "{{ df_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_df"
-
- - name: Read 'dmesg -T' info
- shell: "dmesg -T || echo failed_to_get_info"
- register: dmesg_info
- - name: Save dmesg info into a file
- delegate_to: localhost
- copy:
- content: "{{ dmesg_info.stdout }}"
- dest: "{{ dir_path }}{{ current_hostname }}_dmesg"
-
- - name: Create archive from '/var/log/messages' dir
- shell: "tar -czvf var_log_messages.tar.gz /var/log/messages"
- retries: 15
- delay: 2
- register: result
- until: result is succeeded
- ignore_errors: yes
- - name: Copy the archive to the localhost
- fetch:
- src: "var_log_messages.tar.gz"
- dest: "{{ dir_path }}{{ current_hostname }}_var_log_messages.tar.gz"
- flat: yes
- fail_on_missing: yes
- ignore_errors: yes
diff --git a/deployment/playbooks/generate-tests-config.yaml b/deployment/playbooks/generate-tests-config.yaml
deleted file mode 100644
index a4f77f9f..00000000
--- a/deployment/playbooks/generate-tests-config.yaml
+++ /dev/null
@@ -1,140 +0,0 @@
-# Run this playbook this way:
-#
-# $ tox -e ocp3.11 -- ansible-playbook -i ocp-master-node-hostname-or-ip, \
-# playbooks/generate-tests-config.yaml \
-# -e output_filepath=tests-config.yaml \
-# -e additional_devices=/dev/sdf
-#
-# Supported vars:
-# - output_filepath: optional. Defines path for an output tests config file.
-# - additional_devices: optional. Device names like "/dev/sdf" separated by
-# comma. Should be bare devices which can be attached to a Heketi cluster.
-# If it is not specified or empty, then appropriate test cases will be
-# skipped.
-# - master_ip: optional. Will be used as master node IP address if provided.
-#
-# Requirements:
-# - 'yedit' module should be enabled. It will be enabled running this playbook
-# via 'tox' command.
-# - playbook should run only on one host - OpenShift master node.
-#
-# Notes:
-# - tox's env name can be any of the following:
-# 'ocp3.6', 'ocp3.7', 'ocp3.9', 'ocp3.10' or 'ocp3.11'. The criterion is
-# to have 'yedit' module enabled. Which is enabled in any of those envs.
-
----
-- hosts: all[0]
- gather_facts: no
- tasks:
- - name: Read full hostname of the master node
- shell: "hostname -f"
- register: master_hostname_raw
- - name: Init vars
- set_fact:
- master_hostname: "{{ master_hostname_raw.stdout.strip() }}"
- master_ip: "{{ master_ip | default(hostvars.keys()[0]) }}"
- output_filepath: "{{
- output_filepath | default('../tests-config.yaml') }}"
- additional_devices_list: []
- - name: Process specified additional devices
- set_fact:
- additional_devices_list: "{{ additional_devices_list + [item] }}"
- with_items: "{{ (additional_devices | default('')).split(',') }}"
- when: item.strip() != ''
-
- - name: Read namespace of a Heketi deployment config
- shell: "oc get dc --all-namespaces | grep -e heketi | grep -v registry | awk '{ print $1}'"
- register: heketi_namespace_raw
- - name: Save namespace name in a separate var
- set_fact:
- heketi_namespace: "{{ heketi_namespace_raw.stdout.strip() }}"
- - name: Read Heketi dc name
- shell: "oc get dc -n {{ heketi_namespace
- }} | grep heketi | awk '{ print $1}'"
- register: heketi_dc_name_raw
- - name: Read Heketi svc name
- shell: "oc get svc -n {{ heketi_namespace
- }} | grep -e heketi | grep -v heketi-db | awk '{ print $1}'"
- register: heketi_svc_name_raw
- - name: Save Heketi DC and SVC names in separate vars
- set_fact:
- heketi_dc_name: "{{ heketi_dc_name_raw.stdout.strip() }}"
- heketi_svc_name: "{{ heketi_svc_name_raw.stdout.strip() }}"
- - name: Read Heketi service IP address
- shell: "oc get svc {{ heketi_svc_name }} -n {{ heketi_namespace
- }} -o=custom-columns=:.spec.clusterIP --no-headers"
- register: heketi_ip_raw
- - name: Read Heketi admin's secret
- shell: oc get dc -n {{ heketi_namespace }} {{ heketi_dc_name
- }} -o jsonpath='{.spec.template.spec.containers[0].env[?(@.name==
- "HEKETI_ADMIN_KEY")].value}'
- register: heketi_admin_secret_raw
- - name: Save Heketi admin secret to a var
- set_fact:
- heketi_admin_secret: "{{ heketi_admin_secret_raw.stdout.strip() }}"
- - name: Read Gluster nodes addresses
- shell: "heketi-cli --server=http://{{
- heketi_ip_raw.stdout.strip() }}:8080 --user=admin --secret={{
- heketi_admin_secret }} topology info --json |
- python -c \"from __future__ import print_function; import sys, json;
- topology = json.loads(sys.stdin.readlines()[0]);
- ips = [(n['hostnames']['manage'][0], n['hostnames']['storage'][0])
- for n in topology['clusters'][0]['nodes']];
- [print(ip[0], ip[1]) for ip in ips]\""
- register: gluster_nodes_addresses_raw
- - name: Process gluster data and save it to a var
- set_fact:
- gluster_servers_data: "{{ gluster_servers_data | default({}) | combine(
- {item.split(' ')[1]: {
- 'manage': item.split(' ')[0], 'storage': item.split(' ')[1],
- 'additional_devices': additional_devices_list,
- }}
- )}}"
- with_items: "{{ gluster_nodes_addresses_raw.stdout_lines }}"
-
- - name: Save all the calculated data to the tests config file
- delegate_to: localhost
- connection: local
- yedit:
- src: "{{ output_filepath }}"
- state: present
- backup: false
- edits:
- - key: common
- value:
- stop_on_first_failure: false
- - key: gluster_servers
- value: "{{ gluster_servers_data }}"
- - key: ocp_servers.client
- value: "{{ {master_ip: {'hostname': master_hostname}} }}"
- - key: ocp_servers.master
- value: "{{ {master_ip: {'hostname': master_hostname}} }}"
- - key: ocp_servers.nodes
- value: {}
- - key: openshift.storage_project_name
- value: "{{ heketi_namespace }}"
- - key: openshift.dynamic_provisioning.storage_classes.block_storage_class
- value:
- chapauthenabled: "true"
- hacount: "3"
- provisioner: "gluster.org/glusterblock"
- restsecretnamespace: "{{ heketi_namespace }}"
- resturl: "http://{{ master_ip }}:8080"
- restuser: "admin"
- volumenameprefix: "autotests-block"
- - key: openshift.dynamic_provisioning.storage_classes.file_storage_class
- value:
- provisioner: "kubernetes.io/glusterfs"
- resturl: "http://{{ master_ip }}:8080"
- restuser: "admin"
- secretnamespace: "{{ heketi_namespace }}"
- volumenameprefix: "autotests-file"
- - key: openshift.heketi_config
- value:
- heketi_cli_key: "{{ heketi_admin_secret }}"
- heketi_cli_user: "admin"
- heketi_client_node: "{{ master_ip }}"
- heketi_dc_name: "{{ heketi_dc_name }}"
- heketi_server_url: "http://{{ master_ip }}:8080"
- heketi_service_name: "{{ heketi_svc_name }}"
diff --git a/deployment/playbooks/get_ocp_info.yaml b/deployment/playbooks/get_ocp_info.yaml
deleted file mode 100644
index 7046ccc6..00000000
--- a/deployment/playbooks/get_ocp_info.yaml
+++ /dev/null
@@ -1,233 +0,0 @@
----
-# Run this playbook that way:
-# $ ansible-playbook -i ocp-master-node-hostname-or-ip, get_ocp_info.yaml
-
-# Ansible runner machine info
-- hosts: localhost
- gather_facts: no
- tasks:
- - name: Generate name for data file
- set_fact:
- data_file_path: "{{ lookup('env', 'VIRTUAL_ENV') }}/../../ocp_{{
- (groups['all'][0]).replace('.', '_')
- }}_info.yaml"
- - name: Print data file name
- debug:
- msg: "Data file path is '{{ data_file_path }}'"
- - name: "[Re]Create file where we are going to store gathered data"
- copy:
- content: ""
- dest: "{{ data_file_path }}"
- mode: 0644
- force: yes
-
- - name: Get Linux kernel version of ansible runner
- shell: "uname -a"
- register: ansible_runner_linux_kernel_version
- - name: Get Red Hat release info for ansible runner
- shell: "cat /etc/redhat-release"
- register: ansible_runner_rh_release
- ignore_errors: yes
- - name: Get ansible-playbook version from ansible runner
- shell: "{{ lookup('env', 'VIRTUAL_ENV') }}/bin/ansible-playbook --version |
- grep '^ansible-playbook' | awk '{print $2}'"
- register: ansible_runner_ansible_playbook_version
- - name: Get 'openshift-ansible' lib version used by ansible runner
- shell: "echo \"openshift-ansible-$(cat {{
- lookup('env', 'VIRTUAL_ENV')
- }}/usr/share/ansible/openshift-ansible/.tito/packages/openshift-ansible | awk '{print $1}')\""
- register: ansible_runner_oc_lib_version
- - name: Write ansible runner data to the data file
- yedit:
- src: "{{ data_file_path }}"
- state: present
- backup: false
- edits:
- - key: 01_ansible_runner
- value:
- Linux kernel version: "{{ ansible_runner_linux_kernel_version.stdout_lines }}"
- Red Hat release info: "{{
- ansible_runner_rh_release.stdout_lines or
- 'File /etc/redhat-release was not found. Not RHEL machine?' }}"
- ansible-playbook version: "{{ ansible_runner_ansible_playbook_version.stdout_lines }}"
- openshift-ansible lib version: "{{ ansible_runner_oc_lib_version.stdout_lines }}"
-
-# === Master node info ===
-- hosts: all[0]
- gather_facts: no
- vars:
- master_package_list:
- - docker
- - heketi
- master_service_list:
- - docker
- - multipathd
- gluster_pod_package_list:
- - gluster
- - heketi
- - targetcli
- - gluster-block
- - tcmu-runner
- - python-configshell
- - python-rtslib
- gluster_pod_service_list:
- - glusterd
- - heketi
- - gluster-blockd
- - gluster-block-target
- - tcmu-runner
- - rpcbind
- heketi_pod_package_list:
- - gluster
- - heketi
- # NOTE(vponomar): we do not process list of Heketi POD services for 2 reasons:
- # 1) No requirement to get status of any of services on Heketi POD.
- # 2) 'systemctl' does not work on it.
- tasks:
- - name: Get distro version of ansible runner
- shell: "uname -a"
- register: master_linux_kernel_version
- - name: Get Red Hat release info for ansible runner
- shell: "cat /etc/redhat-release"
- register: master_rh_release
- - name: Create grep filter with all the packages we are interested in
- set_fact:
- package_filter: "{{ package_filter | default('grep') + ' -e ' + item }}"
- with_items: "{{ master_package_list }}"
- - name: Get list of installed packages we are interested in
- shell: "rpm -qa | {{ package_filter }}"
- register: master_packages
- - name: Get status of services on OCP Master node
- shell: "systemctl list-units {{ master_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend"
- register: master_services
- - name: Get OpenShift client version
- shell: "oc version | grep -e 'oc ' -e 'openshift' -e 'kube'"
- register: master_oc_version
- - name: Get list of OCP nodes
- shell: "oc get nodes"
- register: master_ocp_nodes
- - name: Get info about all the docker images used in OCP cluster
- shell: "oc get pods --all-namespaces
- -o=custom-columns=:.status.containerStatuses[*].image | grep -v -e '^$' | uniq"
- register: master_image_info
- - name: Write master data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ hostvars['localhost']['data_file_path'] }}"
- state: present
- edits:
- - key: 02_master
- value:
- Linux kernel version: "{{ master_linux_kernel_version.stdout_lines }}"
- Red Hat release info: "{{ master_rh_release.stdout_lines }}"
- List of Packages: "{{ master_packages.stdout_lines }}"
- List of services: "{{ master_services.stdout_lines }}"
- OC Version: "{{ master_oc_version.stdout_lines }}"
- OCP nodes: "{{ master_ocp_nodes.stdout_lines }}"
- Images info: "{{ master_image_info.stdout_lines }}"
-
- # Heketi POD
- - name: Get heketi POD
- shell: "oc get pods --all-namespaces -l heketi
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: heketi_pods
- - name: DEBUG HEKETI
- debug:
- msg: "{{ heketi_pods }}"
- - block:
- - name: Get storage release version from Heketi POD
- shell: "oc exec {{ (heketi_pods.stdout_lines[1].split(' ') | list)[0] }}
- --namespace {{ (heketi_pods.stdout_lines[1].split(' ') | list)[-1] }} --
- cat /etc/redhat-storage-release"
- register: heketi_pod_storage_release_version
- - name: Get info about packages on Heketi POD
- shell: "oc exec {{ (heketi_pods.stdout_lines[1].split(' ') | list)[0] }}
- --namespace {{ (heketi_pods.stdout_lines[1].split(' ') | list)[-1] }} --
- rpm -qa | grep -e {{ heketi_pod_package_list | join(' -e ') }}"
- register: heketi_pod_packages
- - name: Write Heketi data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ hostvars['localhost']['data_file_path'] }}"
- state: present
- edits:
- - key: 03_heketi_pod
- value:
- Storage release version: "{{ heketi_pod_storage_release_version.stdout_lines }}"
- List of Packages: "{{ heketi_pod_packages.stdout_lines }}"
- when: "{{ ((heketi_pods.stdout_lines | join('')).strip() | length) > 0 }}"
-
- # Gluster PODs
- - name: Get list of Gluster PODs
- shell: "oc get pods --all-namespaces -l glusterfs-node
- -o=custom-columns=:.metadata.name,:.metadata.namespace"
- register: gluster_pods
- - name: DEBUG GLUSTER
- debug:
- msg: "{{ gluster_pods }}"
- - block:
- - name: Get storage release version from Gluster PODs
- shell: "oc exec {{ (item.split(' ') | list)[0] }}
- --namespace {{ (item.split(' ') | list)[-1] }} --
- cat /etc/redhat-storage-release"
- with_items: "{{ gluster_pods.stdout_lines[1:] }}"
- register: gluster_pod_storage_release_version_results
- - name: Process gluster PODs storage release versions results
- set_fact:
- gluster_pod_storage_release_version_processed: "{{
- gluster_pod_storage_release_version_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_storage_release_version_results.results }}"
- - name: Get info about packages on Gluster PODs
- shell: "oc exec {{ (item.split(' ') | list)[0] }}
- --namespace {{ (item.split(' ') | list)[-1] }} --
- rpm -qa | grep -e {{ gluster_pod_package_list | join(' -e ') }}"
- with_items: "{{ gluster_pods.stdout_lines[1:] }}"
- register: gluster_pod_package_list_results
- - name: Process gluster PODs package lists results
- set_fact:
- gluster_pod_package_list_processed: "{{
- gluster_pod_package_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_package_list_results.results }}"
- - name: Get info about services on Gluster PODs
- shell: "oc exec {{ (item.split(' ') | list)[0] }}
- --namespace {{ (item.split(' ') | list)[-1] }} --
- systemctl list-units {{ gluster_pod_service_list | join('.service ') }}.service
- --type=service --all --no-pager --no-legend"
- with_items: "{{ gluster_pods.stdout_lines[1:] }}"
- register: gluster_pod_service_list_results
- - name: Process gluster PODs service lists results
- set_fact:
- gluster_pod_service_list_processed: "{{
- gluster_pod_service_list_processed | default({}) | combine(
- {(item.item.strip().split(' ')[0]): item.stdout_lines},
- recursive=True
- ) }}"
- with_items: "{{ gluster_pod_service_list_results.results }}"
- - name: Write Gluster PODs data to the data file
- delegate_to: localhost
- yedit:
- src: "{{ hostvars['localhost']['data_file_path'] }}"
- state: present
- edits:
- - key: 04_gluster_pods
- value:
- Storage release version: "{{ gluster_pod_storage_release_version_processed }}"
- List of Packages: "{{ gluster_pod_package_list_processed }}"
- List of Services: "{{ gluster_pod_service_list_processed }}"
- when: "{{ ((gluster_pods.stdout_lines | join('')).strip() | length) > 0 }}"
-
-- hosts: localhost
- gather_facts: no
- tasks:
- - shell: "cat {{ data_file_path }}"
- register: data_file_content
- - name: Print gathered data
- debug:
- msg: "{{ data_file_content.stdout_lines }}"
diff --git a/deployment/playbooks/library/rpm_q.py b/deployment/playbooks/library/rpm_q.py
deleted file mode 120000
index 43f43786..00000000
--- a/deployment/playbooks/library/rpm_q.py
+++ /dev/null
@@ -1 +0,0 @@
-/usr/share/ansible/openshift-ansible/library/rpm_q.py \ No newline at end of file
diff --git a/deployment/playbooks/library/vmware_folder.py b/deployment/playbooks/library/vmware_folder.py
deleted file mode 100644
index 8e1d9665..00000000
--- a/deployment/playbooks/library/vmware_folder.py
+++ /dev/null
@@ -1,268 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2017, Davis Phillips davis.phillips@gmail.com
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-ANSIBLE_METADATA = {
- 'status': ['preview'],
- 'supported_by': 'community',
- 'version': '1.0',
-}
-
-DOCUMENTATION = """
----
-module: vmware_folder
-short_description: Add/remove folders to/from vCenter
-description:
- - This module can be used to add/remove a folder to/from vCenter
-version_added: 2.3
-author: "Davis Phillips (@dav1x)"
-notes:
- - Tested on vSphere 6.5
-requirements:
- - "python >= 2.6"
- - PyVmomi
-options:
- datacenter:
- description:
- - Name of the datacenter to add the host
- required: True
- cluster:
- description:
- - Name of the cluster to add the host
- required: True
- folder:
- description:
- - Folder name to manage
- required: True
- hostname:
- description:
- - ESXi hostname to manage
- required: True
- username:
- description:
- - ESXi username
- required: True
- password:
- description:
- - ESXi password
- required: True
- state:
- description:
- - Add or remove the folder
- default: 'present'
- choices:
- - 'present'
- - 'absent'
-extends_documentation_fragment: vmware.documentation
-
-
-EXAMPLES =
-# Create a folder
- - name: Add a folder to vCenter
- vmware_folder:
- hostname: vcsa_host
- username: vcsa_user
- password: vcsa_pass
- datacenter: datacenter
- cluster: cluster
- folder: folder
- state: present
-
-
-RETURN =
-instance:
- descripton: metadata about the new folder
- returned: always
- type: dict
- sample: None
-"""
-
-try:
- from pyVmomi import vim, vmodl
- HAS_PYVMOMI = True
-except ImportError:
- HAS_PYVMOMI = False
-
-from ansible.module_utils import basic # noqa
-from ansible.module_utils.vmware import ( # noqa
- connect_to_api,
- vmware_argument_spec,
- find_datacenter_by_name,
- find_cluster_by_name_datacenter,
- wait_for_task,
-)
-
-
-class VMwareFolder(object):
- def __init__(self, module):
- self.module = module
- self.datacenter = module.params['datacenter']
- self.cluster = module.params['cluster']
- self.folder = module.params['folder']
- self.hostname = module.params['hostname']
- self.username = module.params['username']
- self.password = module.params['password']
- self.state = module.params['state']
- self.dc_obj = None
- self.cluster_obj = None
- self.host_obj = None
- self.folder_obj = None
- self.folder_name = None
- self.folder_expanded = None
- self.folder_full_path = []
- self.content = connect_to_api(module)
-
- def find_host_by_cluster_datacenter(self):
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
-
- for host in self.cluster_obj.host:
- if host.name == self.hostname:
- return host, self.cluster
-
- return None, self.cluster
-
- def select_folder(self, host):
- fold_obj = None
- self.folder_expanded = self.folder.split("/")
- last_e = self.folder_expanded.pop()
- fold_obj = self.get_obj([vim.Folder], last_e)
- if fold_obj:
- return fold_obj
- if fold_obj is None:
- return fold_obj
-
- def get_obj(self, vimtype, name, return_all=False):
- obj = list()
- container = self.content.viewManager.CreateContainerView(
- self.content.rootFolder, vimtype, True)
-
- for c in container.view:
- if name in [c.name, c._GetMoId()]:
- if return_all is False:
- return c
- break
- else:
- obj.append(c)
-
- if len(obj) > 0:
- return obj
- else:
- # for backwards-compat
- return None
-
- def process_state(self):
- try:
- folder_states = {
- 'absent': {
- 'present': self.state_remove_folder,
- 'absent': self.state_exit_unchanged,
- },
- 'present': {
- 'present': self.state_exit_unchanged,
- 'absent': self.state_add_folder,
- }
- }
- folder_states[self.state][self.check_folder_state()]()
- except vmodl.RuntimeFault as runtime_fault:
- self.module.fail_json(msg=runtime_fault.msg)
- except vmodl.MethodFault as method_fault:
- self.module.fail_json(msg=method_fault.msg)
- except Exception as e:
- self.module.fail_json(msg=str(e))
-
- def state_exit_unchanged(self):
- self.module.exit_json(changed=False)
-
- def state_remove_folder(self):
- changed = True
- result = None
- self.folder_expanded = self.folder.split("/")
- f = self.folder_expanded.pop()
- task = self.get_obj([vim.Folder], f).Destroy()
-
- try:
- success, result = wait_for_task(task)
- except Exception:
- self.module.fail_json(
- msg="Failed to remove folder '%s'" % self.folder)
-
- self.module.exit_json(changed=changed, result=str(result))
-
- def state_add_folder(self):
- changed = True
-
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
- self.folder_expanded = self.folder.split("/")
- index = 0
- for f in self.folder_expanded:
- if not self.get_obj([vim.Folder], f):
- if index == 0:
- # First object gets created on the datacenter
- self.dc_obj.vmFolder.CreateFolder(name=f)
- else:
- parent_f = self.get_obj(
- [vim.Folder], self.folder_expanded[index - 1])
- parent_f.CreateFolder(name=f)
- index = index + 1
-
- self.module.exit_json(changed=changed)
-
- def check_folder_state(self):
-
- self.host_obj, self.cluster_obj = (
- self.find_host_by_cluster_datacenter())
- self.folder_obj = self.select_folder(self.host_obj)
-
- if self.folder_obj is None:
- return 'absent'
- else:
- return 'present'
-
-
-def main():
- argument_spec = vmware_argument_spec()
- argument_spec.update(dict(datacenter=dict(required=True, type='str'),
- cluster=dict(required=True, type='str'),
- folder=dict(required=True, type='str'),
- hostname=dict(required=True, type='str'),
- username=dict(required=True, type='str'),
- password=dict(
- required=True, type='str', no_log=True),
- state=dict(
- default='present',
- choices=['present', 'absent'], type='str')))
-
- module = basic.AnsibleModule(
- argument_spec=argument_spec, supports_check_mode=True)
-
- if not HAS_PYVMOMI:
- module.fail_json(msg='pyvmomi is required for this module')
-
- vmware_folder = VMwareFolder(module)
- vmware_folder.process_state()
-
-
-if __name__ == '__main__':
- main()
diff --git a/deployment/playbooks/library/vmware_resource_pool.py b/deployment/playbooks/library/vmware_resource_pool.py
deleted file mode 100644
index 0c9ebbd2..00000000
--- a/deployment/playbooks/library/vmware_resource_pool.py
+++ /dev/null
@@ -1,361 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2017, Davis Phillips davis.phillips@gmail.com
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-ANSIBLE_METADATA = {
- 'status': ['preview'],
- 'supported_by': 'community',
- 'version': '1.0',
-}
-
-DOCUMENTATION = """
----
-module: vmware_resource_pool
-short_description: Add/remove resource pools to/from vCenter
-description:
- - This module can be used to add/remove a resource pool to/from vCenter
-version_added: 2.3
-author: "Davis Phillips (@dav1x)"
-notes:
- - Tested on vSphere 6.5
-requirements:
- - "python >= 2.6"
- - PyVmomi
-options:
- datacenter:
- description:
- - Name of the datacenter to add the host
- required: True
- cluster:
- description:
- - Name of the cluster to add the host
- required: True
- resource_pool:
- description:
- - Resource pool name to manage
- required: True
- hostname:
- description:
- - ESXi hostname to manage
- required: True
- username:
- description:
- - ESXi username
- required: True
- password:
- description:
- - ESXi password
- required: True
- cpu_expandable_reservations:
- description:
- - In a resource pool with an expandable reservation,
- the reservation on a resource pool can grow beyond
- the specified value.
- default: True
- cpu_reservation:
- description:
- - Amount of resource that is guaranteed available to
- the virtual machine or resource pool.
- default: 0
- cpu_limit:
- description:
- - The utilization of a virtual machine/resource pool will not
- exceed this limit, even if there are available resources.
- default: -1 (No limit)
- cpu_shares:
- description:
- - Memory shares are used in case of resource contention.
- choices:
- - high
- - custom
- - low
- - normal
- default: Normal
- mem_expandable_reservations:
- description:
- - In a resource pool with an expandable reservation,
- the reservation on a resource pool can grow beyond
- the specified value.
- default: True
- mem_reservation:
- description:
- - Amount of resource that is guaranteed available to
- the virtual machine or resource pool.
- default: 0
- mem_limit:
- description:
- - The utilization of a virtual machine/resource pool will not
- exceed this limit, even if there are available resources.
- default: -1 (No limit)
- mem_shares:
- description:
- - Memory shares are used in case of resource contention.
- choices:
- - high
- - custom
- - low
- - normal
- default: Normal
- state:
- description:
- - Add or remove the resource pool
- default: 'present'
- choices:
- - 'present'
- - 'absent'
-extends_documentation_fragment: vmware.documentation
-
-
-EXAMPLES =
-# Create a resource pool
- - name: Add resource pool to vCenter
- vmware_resource_pool:
- hostname: vcsa_host
- username: vcsa_user
- password: vcsa_pass
- datacenter: datacenter
- cluster: cluster
- resource_pool: resource_pool
- mem_shares: normal
- mem_limit: -1
- mem_reservation: 0
- mem_expandable_reservations: True
- cpu_shares: normal
- cpu_limit: -1
- cpu_reservation: 0
- cpu_expandable_reservations: True
- state: present
-
-
-RETURN =
-instance:
- descripton: metadata about the new resource pool
- returned: always
- type: dict
- sample: None
-"""
-
-try:
- from pyVmomi import vim, vmodl
- HAS_PYVMOMI = True
-except ImportError:
- HAS_PYVMOMI = False
-
-from ansible.module_utils import basic # noqa
-from ansible.module_utils.vmware import ( # noqa
- get_all_objs,
- connect_to_api,
- vmware_argument_spec,
- find_datacenter_by_name,
- find_cluster_by_name_datacenter,
- wait_for_task,
-)
-
-
-class VMwareResourcePool(object):
- def __init__(self, module):
- self.module = module
- self.datacenter = module.params['datacenter']
- self.cluster = module.params['cluster']
- self.resource_pool = module.params['resource_pool']
- self.hostname = module.params['hostname']
- self.username = module.params['username']
- self.password = module.params['password']
- self.state = module.params['state']
- self.mem_shares = module.params['mem_shares']
- self.mem_limit = module.params['mem_limit']
- self.mem_reservation = module.params['mem_reservation']
- self.mem_expandable_reservations = (
- module.params['cpu_expandable_reservations'])
- self.cpu_shares = module.params['cpu_shares']
- self.cpu_limit = module.params['cpu_limit']
- self.cpu_reservation = module.params['cpu_reservation']
- self.cpu_expandable_reservations = (
- module.params['cpu_expandable_reservations'])
- self.dc_obj = None
- self.cluster_obj = None
- self.host_obj = None
- self.resource_pool_obj = None
- self.content = connect_to_api(module)
-
- def find_host_by_cluster_datacenter(self):
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
-
- for host in self.cluster_obj.host:
- if host.name == self.hostname:
- return host, self.cluster
-
- return None, self.cluster
-
- def select_resource_pool(self, host):
- pool_obj = None
-
- resource_pools = get_all_objs(self.content, [vim.ResourcePool])
-
- pool_selections = self.get_obj(
- [vim.ResourcePool], self.resource_pool, return_all=True)
- if pool_selections:
- for p in pool_selections:
- if p in resource_pools:
- pool_obj = p
- break
- return pool_obj
-
- def get_obj(self, vimtype, name, return_all=False):
- obj = list()
- container = self.content.viewManager.CreateContainerView(
- self.content.rootFolder, vimtype, True)
-
- for c in container.view:
- if name in [c.name, c._GetMoId()]:
- if return_all is False:
- return c
- break
- else:
- obj.append(c)
-
- if len(obj) > 0:
- return obj
- else:
- # for backwards-compat
- return None
-
- def process_state(self):
- try:
- rp_states = {
- 'absent': {
- 'present': self.state_remove_rp,
- 'absent': self.state_exit_unchanged,
- },
- 'present': {
- 'present': self.state_exit_unchanged,
- 'absent': self.state_add_rp,
- }
- }
-
- rp_states[self.state][self.check_rp_state()]()
-
- except vmodl.RuntimeFault as runtime_fault:
- self.module.fail_json(msg=runtime_fault.msg)
- except vmodl.MethodFault as method_fault:
- self.module.fail_json(msg=method_fault.msg)
- except Exception as e:
- self.module.fail_json(msg=str(e))
-
- def state_exit_unchanged(self):
- self.module.exit_json(changed=False)
-
- def state_remove_rp(self):
- changed = True
- result = None
- resource_pool = self.select_resource_pool(self.host_obj)
- try:
- task = self.resource_pool_obj.Destroy()
- success, result = wait_for_task(task)
-
- except Exception:
- self.module.fail_json(
- msg="Failed to remove resource pool '%s' '%s'" % (
- self.resource_pool, resource_pool))
- self.module.exit_json(changed=changed, result=str(result))
-
- def state_add_rp(self):
- changed = True
-
- rp_spec = vim.ResourceConfigSpec()
- cpu_alloc = vim.ResourceAllocationInfo()
- cpu_alloc.expandableReservation = self.cpu_expandable_reservations
- cpu_alloc.limit = int(self.cpu_limit)
- cpu_alloc.reservation = int(self.cpu_reservation)
- cpu_alloc_shares = vim.SharesInfo()
- cpu_alloc_shares.level = self.cpu_shares
- cpu_alloc.shares = cpu_alloc_shares
- rp_spec.cpuAllocation = cpu_alloc
- mem_alloc = vim.ResourceAllocationInfo()
- mem_alloc.limit = int(self.mem_limit)
- mem_alloc.expandableReservation = self.mem_expandable_reservations
- mem_alloc.reservation = int(self.mem_reservation)
- mem_alloc_shares = vim.SharesInfo()
- mem_alloc_shares.level = self.mem_shares
- mem_alloc.shares = mem_alloc_shares
- rp_spec.memoryAllocation = mem_alloc
-
- self.dc_obj = find_datacenter_by_name(
- self.content, self.datacenter)
- self.cluster_obj = find_cluster_by_name_datacenter(
- self.dc_obj, self.cluster)
- rootResourcePool = self.cluster_obj.resourcePool
- rootResourcePool.CreateResourcePool(self.resource_pool, rp_spec)
-
- self.module.exit_json(changed=changed)
-
- def check_rp_state(self):
-
- self.host_obj, self.cluster_obj = (
- self.find_host_by_cluster_datacenter())
- self.resource_pool_obj = self.select_resource_pool(self.host_obj)
-
- if self.resource_pool_obj is None:
- return 'absent'
- else:
- return 'present'
-
-
-def main():
- argument_spec = vmware_argument_spec()
- argument_spec.update(dict(datacenter=dict(required=True, type='str'),
- cluster=dict(required=True, type='str'),
- resource_pool=dict(required=True, type='str'),
- hostname=dict(required=True, type='str'),
- username=dict(required=True, type='str'),
- password=dict(
- required=True, type='str', no_log=True),
- mem_shares=dict(
- type='str', default="normal",
- choices=['high', 'custom', 'normal', 'low']),
- mem_limit=dict(type='int', default="-1"),
- mem_reservation=dict(type='int', default="0"),
- mem_expandable_reservations=dict(
- type='bool', default="True"),
- cpu_shares=dict(
- type='str', default="normal",
- choices=['high', 'custom', 'normal', 'low']),
- cpu_limit=dict(type='int', default="-1"),
- cpu_reservation=dict(type='int', default="0"),
- cpu_expandable_reservations=dict(
- type='bool', default="True"),
- state=dict(
- default='present',
- choices=['present', 'absent'], type='str')))
-
- module = basic.AnsibleModule(
- argument_spec=argument_spec, supports_check_mode=True)
-
- if not HAS_PYVMOMI:
- module.fail_json(msg='pyvmomi is required for this module')
-
- vmware_rp = VMwareResourcePool(module)
- vmware_rp.process_state()
-
-
-if __name__ == '__main__':
- main()
diff --git a/deployment/playbooks/node-setup.yaml b/deployment/playbooks/node-setup.yaml
deleted file mode 100644
index c8e5916e..00000000
--- a/deployment/playbooks/node-setup.yaml
+++ /dev/null
@@ -1,92 +0,0 @@
----
-- include: "scaleup.yaml"
- vars:
- debug_level: 2
- openshift_debug_level: "{{ debug_level }}"
- openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
- osm_controller_args:
- osm_api_server_args:
- openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
- openshift_master_access_token_max_seconds: 2419200
- openshift_master_api_port: "{{ console_port }}"
- openshift_master_console_port: "{{ console_port }}"
- osm_cluster_network_cidr: 172.16.0.0/16
- openshift_registry_selector: "role=compute"
- openshift_router_selector: "role=compute"
- openshift_node_local_quota_per_fsgroup: 512Mi
- openshift_master_cluster_method: native
- openshift_cloudprovider_kind: vsphere
- openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
- openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
- openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
- openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
- openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
- openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
- os_sdn_network_plugin_name: "{{ openshift_sdn }}"
- deployment_type: "{{ deployment_type }}"
- load_balancer_hostname: "{{ lb_host }}"
- openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
- openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
diff --git a/deployment/playbooks/noop.yaml b/deployment/playbooks/noop.yaml
deleted file mode 100644
index 94173aed..00000000
--- a/deployment/playbooks/noop.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- ignore_errors: no
- tasks:
- - debug:
- msg: "No operation TASK for placeholder playbook."
diff --git a/deployment/playbooks/ocp-configure.yaml b/deployment/playbooks/ocp-configure.yaml
deleted file mode 100644
index c5123e6a..00000000
--- a/deployment/playbooks/ocp-configure.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: localhost
- gather_facts: yes
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - instance-groups
-
-- hosts: single_master
- gather_facts: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
- - storage-class-configure
diff --git a/deployment/playbooks/ocp-end-to-end.yaml b/deployment/playbooks/ocp-end-to-end.yaml
deleted file mode 100644
index 58f0ca01..00000000
--- a/deployment/playbooks/ocp-end-to-end.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- include: setup.yaml
- tags: ['setup']
-
-- include: prod.yaml
- tags: ['prod']
-
-- include: ocp-install.yaml
- tags: ['ocp-install']
-
-- include: ocp-configure.yaml
- tags: ['ocp-configure']
-
-- include: clean.yaml
- tags: ['clean']
diff --git a/deployment/playbooks/ocp-install.yaml b/deployment/playbooks/ocp-install.yaml
deleted file mode 100644
index 43b92c2f..00000000
--- a/deployment/playbooks/ocp-install.yaml
+++ /dev/null
@@ -1,365 +0,0 @@
----
-- hosts: localhost
- gather_facts: yes
- ignore_errors: yes
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - instance-groups
-
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) | ternary(
- 'prerequisite.yaml',
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml'
- ) }}"
- vars:
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-
-- name: Call openshift includes for OCP3.6 and OCP3.7 installer
- include: "{{
- lookup('env', 'VIRTUAL_ENV')
- }}/usr/share/ansible/openshift-ansible/playbooks/{{
- (openshift_vers in ['v3_6', 'v3_7']) |
- ternary('byo/config.yml', 'deploy_cluster.yml')
- }}"
- vars:
- openshift_release: "v3.{{ openshift_vers.split('_')[-1] }}"
- debug_level: 2
- console_port: 8443
- openshift_debug_level: "{{ debug_level }}"
- openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
- # NOTE(vponomar): following two can be changed to "true" when
- # https://github.com/openshift/openshift-ansible/issues/6086 is fixed
- openshift_enable_service_catalog: false
- template_service_broker_install: false
- osm_controller_args:
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- osm_api_server_args:
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
- openshift_master_access_token_max_seconds: 2419200
- openshift_hosted_router_replicas: 1
- openshift_hosted_registry_replicas: 1
- openshift_master_api_port: "{{ console_port }}"
- openshift_master_console_port: "{{ console_port }}"
- openshift_node_local_quota_per_fsgroup: 512Mi
- osm_cluster_network_cidr: 172.16.0.0/16
- osm_use_cockpit: false
- osm_default_node_selector: "role=compute"
- openshift_registry_selector: "role=compute"
- openshift_override_hostname_check: true
- openshift_router_selector: "role=compute"
- openshift_master_cluster_method: native
- openshift_cloudprovider_kind: vsphere
- openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
- openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
- openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
- openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
- openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
- openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
- wildcard_zone: "{{ app_dns_prefix }}.{{ dns_zone }}"
- osm_default_subdomain: "{{ wildcard_zone }}"
- openshift_master_default_subdomain: "{{osm_default_subdomain}}"
- deployment_type: "{{ deployment_type }}"
- load_balancer_hostname: "{{ lb_host }}"
- openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
- openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
- os_sdn_network_plugin_name: "{{ openshift_sdn }}"
- openshift_master_identity_providers:
- - name: 'allow_all'
- kind: 'AllowAllPasswordIdentityProvider'
- login: True
- challenge: True
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- when: openshift_vers in ['v3_6', 'v3_7']
-
-- name: "Call openshift includes for OCP3.9+ installer"
- include: "{{
- lookup('env', 'VIRTUAL_ENV')
- }}/usr/share/ansible/openshift-ansible/playbooks/{{
- (openshift_vers in ['v3_6', 'v3_7']) |
- ternary('byo/config.yml', 'deploy_cluster.yml')
- }}"
- vars:
- openshift_release: "v3.{{ openshift_vers.split('_')[-1] }}"
- debug_level: 2
- console_port: 8443
- openshift_debug_level: "{{ debug_level }}"
- openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}"
- # NOTE(vponomar): following two can be changed to "true" when
- # https://github.com/openshift/openshift-ansible/issues/6086 is fixed
- openshift_enable_service_catalog: false
- template_service_broker_install: false
- osm_controller_args:
- feature-gates:
- - "ExpandPersistentVolumes=true"
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- osm_api_server_args:
- feature-gates:
- - "ExpandPersistentVolumes=true"
- cloud-provider:
- - "vsphere"
- cloud-config:
- - "/etc/origin/cloudprovider/vsphere.conf"
- openshift_master_admission_plugin_config:
- PersistentVolumeClaimResize:
- configuration:
- apiVersion: v1
- disable: false
- kind: DefaultAdmissionConfig
- openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}"
- openshift_master_access_token_max_seconds: 2419200
- openshift_hosted_router_replicas: 1
- openshift_hosted_registry_replicas: 1
- openshift_master_api_port: "{{ console_port }}"
- openshift_master_console_port: "{{ console_port }}"
- openshift_node_local_quota_per_fsgroup: 512Mi
- osm_cluster_network_cidr: 172.16.0.0/16
- osm_use_cockpit: false
- osm_default_node_selector: "role=compute"
- openshift_registry_selector: "role=compute"
- openshift_override_hostname_check: true
- openshift_router_selector: "role=compute"
- openshift_master_cluster_method: native
- openshift_cloudprovider_kind: vsphere
- openshift_cloudprovider_vsphere_host: "{{ vcenter_host }}"
- openshift_cloudprovider_vsphere_username: "{{ vcenter_username }}"
- openshift_cloudprovider_vsphere_password: "{{ vcenter_password }}"
- openshift_cloudprovider_vsphere_datacenter: "{{ vcenter_datacenter }}"
- openshift_cloudprovider_vsphere_datastore: "{{ vcenter_datastore }}"
- openshift_cloudprovider_vsphere_folder: "{{ vcenter_folder }}"
- wildcard_zone: "{{ app_dns_prefix }}.{{ dns_zone }}"
- osm_default_subdomain: "{{ wildcard_zone }}"
- openshift_master_default_subdomain: "{{osm_default_subdomain}}"
- deployment_type: "{{ deployment_type }}"
- load_balancer_hostname: "{{ lb_host }}"
- openshift_master_cluster_hostname: "{{ load_balancer_hostname }}"
- openshift_master_cluster_public_hostname: "{{ load_balancer_hostname }}"
- os_sdn_network_plugin_name: "{{ openshift_sdn }}"
- openshift_master_identity_providers:
- - name: 'allow_all'
- kind: 'AllowAllPasswordIdentityProvider'
- login: True
- challenge: True
- openshift_crio_docker_gc_node_selector:
- runtime: crio
- # 'openshift_node_groups' is required for OCP3.10
- openshift_node_groups:
- - name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
- - name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
- - name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- - name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
- - name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
- when: openshift_vers not in ['v3_6', 'v3_7']
-
-- hosts: allnodes
- gather_facts: no
- ignore_errors: no
- tasks:
- - service:
- name: dnsmasq
- state: restarted
-
-- name: Run yum_update command on all the nodes and then reboot them
- hosts: localhost
- gather_facts: no
- roles:
- - yum-update-and-reboot
-
-- hosts: single_master
- gather_facts: no
- tasks:
- - name: Make sure oc client is responsive
- command: oc status
- retries: 120
- delay: 5
- register: oc_status_result
- until: oc_status_result is succeeded
diff --git a/deployment/playbooks/prerequisite.yaml b/deployment/playbooks/prerequisite.yaml
deleted file mode 100644
index 5c7cc399..00000000
--- a/deployment/playbooks/prerequisite.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- hosts: cluster_hosts
- gather_facts: yes
- become: yes
- vars_files:
- - vars/main.yaml
- roles:
- - instance-groups
- - package-repos
-
-- hosts: cluster_hosts
- gather_facts: no
- vars_files:
- - vars/main.yaml
- become: yes
- roles:
- - prerequisites
-
-- hosts: master
- gather_facts: yes
- vars_files:
- - vars/main.yaml
- become: yes
- roles:
- - master-prerequisites
- - etcd-storage
diff --git a/deployment/playbooks/prod-ose-cns.yaml b/deployment/playbooks/prod-ose-cns.yaml
deleted file mode 100644
index 80a85f11..00000000
--- a/deployment/playbooks/prod-ose-cns.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: localhost
- connection: local
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - create-vm-cns-prod-ose
- - setup-custom-domain-names-for-ansible-runner
diff --git a/deployment/playbooks/prod-ose-crs.yaml b/deployment/playbooks/prod-ose-crs.yaml
deleted file mode 100644
index aa9537ab..00000000
--- a/deployment/playbooks/prod-ose-crs.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: localhost
- connection: local
- gather_facts: yes
- become: no
- vars_files:
- - vars/main.yaml
- roles:
- # Group systems
- - create-vm-crs-prod-ose
- - setup-custom-domain-names-for-ansible-runner
diff --git a/deployment/playbooks/prod.yaml b/deployment/playbooks/prod.yaml
deleted file mode 100644
index 04be066b..00000000
--- a/deployment/playbooks/prod.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- hosts: localhost
- vars_files:
- - vars/main.yaml
- roles:
- - create-vm-prod-ose
- - setup-custom-domain-names-for-ansible-runner
-
-- name: fulfill OSE3 prerequisites on production hosts roles
- hosts: production_group
- vars_files:
- - vars/main.yaml
- roles:
- - setup-custom-domain-names
- - package-repos
- - vmware-guest-setup
- - cloud-provider-setup
- - docker-storage-setup
- - openshift-volume-quota
diff --git a/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
deleted file mode 100644
index 1b93ce22..00000000
--- a/deployment/playbooks/roles/cloud-provider-setup/tasks/main.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: create /etc/origin/cloudprovider
- file:
- state: directory
- path: "{{ vsphere_conf_dir }}"
-
-- name: create the vsphere.conf file
- template:
- src: "{{ role_path }}/templates/vsphere.conf.j2"
- dest: /etc/origin/cloudprovider/vsphere.conf
- owner: root
- group: root
- mode: 0644
diff --git a/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2 b/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
deleted file mode 100644
index 8abe6e8c..00000000
--- a/deployment/playbooks/roles/cloud-provider-setup/templates/vsphere.conf.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-[Global]
-user = "{{ vcenter_username }}"
-password = "{{ vcenter_password }}"
-server = "{{ vcenter_host }}"
-port = 443
-insecure-flag = 1
-datacenter = {{ vcenter_datacenter }}
-datastore = {{ vcenter_datastore }}
-working-dir = /{{ vcenter_datacenter }}/vm/{{ vcenter_folder }}/
-[Disk]
-scsicontrollertype = pvscsi
diff --git a/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml b/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
deleted file mode 100644
index 81511c01..00000000
--- a/deployment/playbooks/roles/cloud-provider-setup/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-vsphere_conf_dir: /etc/origin/cloudprovider
-vsphere_conf: "{{vsphere_conf_dir }}/vsphere.conf"
diff --git a/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
deleted file mode 100644
index 392b5da1..00000000
--- a/deployment/playbooks/roles/create-vm-add-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Add following nodes to the 'new_nodes' group
- set_fact:
- is_add_nodes: true
-
-- name: Import common node creation role
- import_role:
- name: create-vm-prod-ose
diff --git a/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
deleted file mode 100644
index e01f1dd0..00000000
--- a/deployment/playbooks/roles/create-vm-cns-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,142 +0,0 @@
----
-- name: Define set of main disks (system and heketi)
- set_fact:
- disks_info: "{{ disks_info | default([
- {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
- }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ container_storage_disks.split(',') }}"
-
-- name: Define set of additional disks which will be just attached to nodes
- set_fact:
- additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
-
-- name: Create CNS production VMs on vCenter
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ item.value.guestname }}"
- cluster: "{{ vcenter_cluster}}"
- datacenter: "{{ vcenter_datacenter }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- template: "{{vcenter_template_name}}"
- state: poweredon
- wait_for_ip_address: true
- folder: "/{{ vcenter_folder }}"
- annotation: "{{ item.value.tag }}"
- disk: "{{ disks_info }} + {{ additional_disks_info }}"
- hardware:
- memory_mb: 32768
- networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
- customization:
- domain: "{{dns_zone}}"
- dns_suffix: "{{dns_zone}}"
- hostname: "{{ item.value.guestname}}"
- with_dict: "{{host_inventory}}"
- when: "item.value.guesttype in ['cns', ]"
- async: "{{ 6 * 600 }}"
- poll: 0
- register: async_vms_creation
-
-- name: Check async status of VMs creation
- async_status:
- jid: "{{ async_result_item.ansible_job_id }}"
- with_items: "{{ async_vms_creation.results }}"
- loop_control:
- loop_var: "async_result_item"
- register: async_poll_results
- until: async_poll_results.finished
- retries: "{{ 6 * 100 }}"
-
-- name: Read info of newly created VMs
- vmware_guest_tools_wait:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- folder: "/{{ vcenter_folder }}"
- validate_certs: False
- uuid: "{{ item.instance.hw_product_uuid }}"
- with_items: "{{ async_poll_results.results }}"
- register: facts
-
-- name: Map node names and their IP addresses
- set_fact:
- ip4addrs: "{{ ip4addrs | default({}) | combine(
- {item.instance.hw_name: (
- item.instance.hw_eth0.ipaddresses | ipv4 | first)},
- recursive=True) }}"
- hostnames_for_reboot: "{{
- (hostnames_for_reboot | default([])) +
- [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
- with_items: "{{ facts.results }}"
-
-- name: Define glusterfs devices
- set_fact:
- glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
- {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
- with_indexed_items: "{{ disks_info[3::] }}"
-
-- name: Define glusterfs additional devices
- set_fact:
- glusterfs_additional_devices: "{{
- glusterfs_additional_devices | default([])
- }} + {{
- ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
- }}"
- with_indexed_items: "{{ additional_disks_info }}"
-
-- name: Add CNS production VMs to inventory
- add_host:
- hostname: "{{ item.value.guestname }}"
- ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
- ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
- groups: "{{ item.value.tag }}, new_nodes, storage, cns, glusterfs"
- openshift_node_group_name: "node-config-storage"
- # Following vars are for 'openshift_storage_glusterfs' role from
- # 'openshift/openshift-ansible' repo
- glusterfs_devices: "{{ glusterfs_devices }}"
- glusterfs_hostname: "{{ item.value.guestname }}"
- glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
- glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['cns', ]"
-
-# Following updates config file
-# which is required for automated tests from 'glusterfs-containers-tests' repo
-
-- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
- set_fact:
- gluster_servers: "{{
- gluster_servers | default({}) | combine({
- ip4addrs[item.value.guestname]: {
- 'manage': item.value.guestname,
- 'storage': ip4addrs[item.value.guestname],
- 'additional_devices': glusterfs_additional_devices,
- }
- })
- }}"
- with_dict: "{{ host_inventory }}"
- when:
- - item.value.guesttype in ['cns', ]
- - cns_automation_config_file_path | length > 0
-
-- name: Update 'glusterfs-containers-tests' config file
- yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: gluster_servers
- value: "{{ gluster_servers }}"
- when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
deleted file mode 100644
index 05aa63bb..00000000
--- a/deployment/playbooks/roles/create-vm-crs-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,143 +0,0 @@
----
-- name: Define set of main disks (system and heketi)
- set_fact:
- disks_info: "{{ disks_info | default([
- {'size_gb': 60, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore},
- {'size_gb': 40, 'type': 'thin', 'datastore': vcenter_datastore}])
- }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ container_storage_disks.split(',') }}"
-
-- name: Define set of additional disks which will be just attached to nodes
- set_fact:
- additional_disks_info: "{{ additional_disks_info | default([]) }} + {{
- [{'size_gb': (item.strip() | int),
- 'type': container_storage_disk_type,
- 'datastore': vcenter_datastore}]
- }}"
- with_items: "{{ additional_disks_to_storage_nodes.split(',') }}"
-
-- name: Create CRS production VMs on vCenter
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ item.value.guestname }}"
- cluster: "{{ vcenter_cluster}}"
- datacenter: "{{ vcenter_datacenter }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- template: "{{vcenter_template_name}}"
- state: poweredon
- wait_for_ip_address: true
- folder: "/{{ vcenter_folder }}"
- annotation: "{{ cluster_id }}-crs"
- disk: "{{ disks_info }} + {{ additional_disks_info }}"
- hardware:
- memory_mb: 32768
- networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
- customization:
- domain: "{{dns_zone}}"
- dns_suffix: "{{dns_zone}}"
- hostname: "{{ item.value.guestname}}"
- with_dict: "{{host_inventory}}"
- when: "item.value.guesttype in ['crs', ]"
- async: "{{ 6 * 600 }}"
- poll: 0
- register: async_vms_creation
-
-- name: Check async status of VMs creation
- async_status:
- jid: "{{ async_result_item.ansible_job_id }}"
- with_items: "{{ async_vms_creation.results }}"
- loop_control:
- loop_var: "async_result_item"
- register: async_poll_results
- until: async_poll_results.finished
- retries: "{{ 6 * 100 }}"
-
-- name: Read info of newly created VMs
- vmware_guest_tools_wait:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- folder: "/{{ vcenter_folder }}"
- validate_certs: False
- uuid: "{{ item.instance.hw_product_uuid }}"
- with_items: "{{ async_poll_results.results }}"
- register: facts
-
-- name: Map node names and their IP addresses
- set_fact:
- ip4addrs: "{{ ip4addrs | default({}) | combine(
- {item.instance.hw_name: (
- item.instance.hw_eth0.ipaddresses | ipv4 | first)},
- recursive=True) }}"
- hostnames_for_reboot: "{{
- (hostnames_for_reboot | default([])) +
- [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
- with_items: "{{ facts.results }}"
-
-- name: Define glusterfs devices
- set_fact:
- glusterfs_devices: "{{ glusterfs_devices | default([]) }} +
- {{ ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0]] }}"
- with_indexed_items: "{{ disks_info[3::] }}"
-
-- name: Define glusterfs additional devices
- set_fact:
- glusterfs_additional_devices: "{{
- glusterfs_additional_devices | default([])
- }} + {{
- ['/dev/sd' + 'defghijklmnopqrstuvwxyz'[item.0 + (glusterfs_devices|length)]]
- }}"
- with_indexed_items: "{{ additional_disks_info }}"
-
-- name: Add CRS production VMs to inventory
- add_host:
- hostname: "{{ item.value.guestname }}"
- ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
- ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
- openshift_node_group_name: "node-config-storage"
- # old groups are: crs, production_group, {{cluster-id}}-crs
- groups: "{{ cluster_id }}-crs, crs, storage, glusterfs"
- # Following vars are for 'openshift_storage_glusterfs' role from
- # 'openshift/openshift-ansible' repo
- glusterfs_devices: "{{ glusterfs_devices }}"
- glusterfs_hostname: "{{ item.value.guestname }}"
- glusterfs_ip: "{{ ip4addrs[item.value.guestname] }}"
- glusterfs_zone: "{{ ip4addrs[item.value.guestname].split('.')[-2::] | join('') | int }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['crs', ]"
-
-# Following updates config file
-# which is required for automated tests from 'glusterfs-containers-tests' repo
-
-- name: Combine data about gluster servers for 'glusterfs-containers-tests' config file
- set_fact:
- gluster_servers: "{{
- gluster_servers | default({}) | combine({
- ip4addrs[item.value.guestname]: {
- 'manage': item.value.guestname,
- 'storage': ip4addrs[item.value.guestname],
- 'additional_devices': glusterfs_additional_devices,
- }
- })
- }}"
- with_dict: "{{ host_inventory }}"
- when:
- - item.value.guesttype in ['crs', ]
- - cns_automation_config_file_path | length > 0
-
-- name: Update 'glusterfs-containers-tests' config file
- yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: gluster_servers
- value: "{{ gluster_servers }}"
- when: gluster_servers is defined
diff --git a/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml b/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
deleted file mode 100644
index a0124348..00000000
--- a/deployment/playbooks/roles/create-vm-prod-ose/tasks/main.yaml
+++ /dev/null
@@ -1,157 +0,0 @@
----
-- name: Get to know whether we need to add following nodes to "new_nodes" group or not
- set_fact:
- is_add_nodes: "{{ is_add_nodes | default(false) }}"
-
-- name: Define memory and disk parameters per node type
- set_fact:
- host_data:
- master:
- memory: 16384
- disk:
- - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- compute:
- memory: "{{ ('cns' in container_storage) | ternary(32768, 8192) }}"
- disk:
- - {'size_gb': 60, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
- - {'size_gb': 40, 'type': 'thin', 'datastore': "{{ vcenter_datastore }}"}
-
-- name: Create production VMs on vCenter
- vmware_guest:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- validate_certs: False
- name: "{{ item.value.guestname }}"
- cluster: "{{ vcenter_cluster}}"
- datacenter: "{{ vcenter_datacenter }}"
- resource_pool: "{{ vcenter_resource_pool }}"
- template: "{{vcenter_template_name}}"
- state: poweredon
- wait_for_ip_address: true
- folder: "/{{ vcenter_folder }}"
- annotation: "{{ item.value.tag }}"
- disk: "{{ host_data[item.value.guesttype].disk }}"
- hardware:
- memory_mb: "{{ host_data[item.value.guesttype].memory }}"
- networks: "[{'name': '{{ vm_network }}', 'type': 'dhcp' }]"
- customization:
- domain: "{{dns_zone}}"
- dns_suffix: "{{ dns_zone }}"
- hostname: "{{ item.value.guestname }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['compute', 'master']"
- async: "{{ 6 * 600 }}"
- poll: 0
- register: async_vms_creation
-
-- name: Check async status of VMs creation
- async_status:
- jid: "{{ async_result_item.ansible_job_id }}"
- with_items: "{{ async_vms_creation.results }}"
- loop_control:
- loop_var: "async_result_item"
- register: async_poll_results
- until: async_poll_results.finished
- retries: "{{ 6 * 100 }}"
-
-- name: Read info of newly created VMs
- vmware_guest_tools_wait:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- folder: "/{{ vcenter_folder }}"
- validate_certs: False
- uuid: "{{ item.instance.hw_product_uuid }}"
- with_items: "{{ async_poll_results.results }}"
- register: facts
-
-- name: Map node names and their IP addresses
- set_fact:
- ip4addrs: "{{ ip4addrs | default({}) | combine(
- {item.instance.hw_name: (
- item.instance.hw_eth0.ipaddresses | ipv4 | first)},
- recursive=True) }}"
- hostnames_for_reboot: "{{
- (hostnames_for_reboot | default([])) +
- [(item.instance.hw_eth0.ipaddresses | ipv4 | first)] }}"
- with_items: "{{ facts.results }}"
-
-- name: Add production VMs to inventory
- add_host:
- hostname: "{{ item.value.guestname }}"
- ansible_fqdn: "{{ item.value.guestname }}.{{ dns_zone }}"
- ansible_ssh_host: "{{ ip4addrs[item.value.guestname] }}"
- groups: "{{ item.value.tag }}, production_group{{ is_add_nodes | ternary(', new_nodes', '')}}"
- openshift_node_group_name: "{{
- (item.value.guesttype == 'master') | ternary('node-config-master',
- 'node-config-compute') }}"
- with_dict: "{{ host_inventory }}"
- when: "item.value.guesttype in ['compute', 'master']"
-
-# Following updates config file
-# which is required for automated tests from 'glusterfs-containers-tests' repo
-
-- name: Gather data about existing master nodes for tests config file
- set_fact:
- ocp_master_and_client_nodes: "{{
- ocp_master_and_client_nodes | default({}) | combine({
- (
- ((
- (hostvars[item].guest | default({'net': [{
- 'network': vm_network,
- 'ipaddress': [
- ip4addrs[hostvars[item].inventory_hostname_short]
- ]
- }]})).net | selectattr('network', 'equalto', vm_network)
- ) | list)[0].ipaddress | ipv4 | first
- ): {
- 'hostname': hostvars[item].inventory_hostname_short,
- }
- })
- }}"
- with_items: "{{ groups[cluster_id + '-master'] }}"
- when: cns_automation_config_file_path | length > 0
-
-- name: Gather data about existing compute nodes for tests config file
- set_fact:
- ocp_compute_nodes: "{{
- ocp_compute_nodes | default({}) | combine({
- (
- ((
- (hostvars[item].guest | default({'net': [{
- 'network': vm_network,
- 'ipaddress': [
- ip4addrs[hostvars[item].inventory_hostname_short]
- ]
- }]})).net | selectattr('network', 'equalto', vm_network)
- ) | list)[0].ipaddress | ipv4 | first
- ): {
- 'hostname': hostvars[item].inventory_hostname_short,
- }
- })
- }}"
- with_items: "{{ groups[cluster_id + '-compute'] | default([]) }} "
- when: cns_automation_config_file_path | length > 0
-
-- name: Update 'glusterfs-containers-tests' config file
- yedit:
- src: "{{ cns_automation_config_file_path }}"
- state: present
- edits:
- - key: ocp_servers
- value:
- master: "{{ ocp_master_and_client_nodes }}"
- client: "{{ ocp_master_and_client_nodes }}"
- nodes: "{{ ocp_compute_nodes }}"
- - key: openshift.heketi_config.heketi_client_node
- value: "{{ ocp_master_and_client_nodes.keys()[0] }}"
- - key: openshift.heketi_config.heketi_server_url
- value: "http://{{ ocp_master_and_client_nodes.keys()[0] }}:8080"
- when:
- - ocp_master_and_client_nodes is defined
- - ocp_compute_nodes is defined
diff --git a/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml b/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
deleted file mode 100644
index dfe5e649..00000000
--- a/deployment/playbooks/roles/crs-prerequisite/tasks/main.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- name: Clear yum cache
- command: "yum clean all"
- ignore_errors: true
-
-- name: Install required common rpms
- package:
- name: "{{ item }}"
- state: latest
- with_items:
- - 'iptables'
- - 'iptables-services'
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
-
-- name: Enable Gluster 3 repo
- import_role:
- name: enable-gluster-repo
-
-- name: Install required Gluster 3 rpms
- package:
- name: "{{ item }}"
- state: latest
- with_items:
- - 'redhat-storage-server'
- - 'heketi-client'
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
-
-- name: Install gluster-block package
- package:
- name: "{{ item }}"
- state: latest
- with_items:
- - 'gluster-block'
- retries: 5
- delay: 5
- ignore_errors: yes
-
-- name: Stop firewalld
- service:
- name: firewalld
- state: stopped
- enabled: no
-
-- name: Start Glusterd and iptables
- service:
- name: "{{ item }}"
- state: started
- enabled: true
- with_items:
- - iptables
- - glusterd
-
-- name: Start gluster-blockd service
- service:
- name: "{{ item }}"
- state: started
- enabled: true
- with_items:
- - gluster-blockd
- ignore_errors: yes
diff --git a/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml b/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
deleted file mode 100644
index 062f543a..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/defaults/main.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-docker_dev: "/dev/sdb"
-docker_vg: "docker-vol"
-docker_data_size: "95%VG"
-docker_dm_basesize: "3G"
-container_root_lv_name: "dockerlv"
-container_root_lv_mount_path: "/var/lib/docker"
diff --git a/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml b/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
deleted file mode 100644
index 70c04802..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/tasks/main.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- name: remove any existing docker-storage config file
- file:
- path: /etc/sysconfig/docker-storage
- state: absent
- when: not (openshift_use_crio | default(false) | bool)
-
-- block:
- - name: create the docker-storage config file
- template:
- src: "{{ role_path }}/templates/docker-storage-setup-overlayfs.j2"
- dest: /etc/sysconfig/docker-storage-setup
- owner: root
- group: root
- mode: 0644
- when:
- - ansible_distribution_version | version_compare('7.4', '>=')
- - ansible_distribution == "RedHat"
- - not (openshift_use_crio | default(false) | bool)
-
-- block:
- - name: create the docker-storage-setup config file
- template:
- src: "{{ role_path }}/templates/docker-storage-setup-dm.j2"
- dest: /etc/sysconfig/docker-storage-setup
- owner: root
- group: root
- mode: 0644
- when:
- - ansible_distribution_version | version_compare('7.4', '<')
- - ansible_distribution == "RedHat"
- - not (openshift_use_crio | default(false) | bool)
-
-- name: start docker
- service:
- name: docker
- state: started
- enabled: true
- when: not (openshift_use_crio | default(false) | bool)
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
deleted file mode 100644
index b5869fef..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-dm.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-DEVS="{{ docker_dev }}"
-VG="{{ docker_vg }}"
-DATA_SIZE="{{ docker_data_size }}"
-EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ docker_dm_basesize }}"
diff --git a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2 b/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
deleted file mode 100644
index 61ba30af..00000000
--- a/deployment/playbooks/roles/docker-storage-setup/templates/docker-storage-setup-overlayfs.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-DEVS="{{ docker_dev }}"
-VG="{{ docker_vg }}"
-DATA_SIZE="{{ docker_data_size }}"
-STORAGE_DRIVER=overlay2
-CONTAINER_ROOT_LV_NAME="{{ container_root_lv_name }}"
-CONTAINER_ROOT_LV_MOUNT_PATH="{{ container_root_lv_mount_path }}"
-CONTAINER_ROOT_LV_SIZE=100%FREE \ No newline at end of file
diff --git a/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml b/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
deleted file mode 100644
index 7236d77d..00000000
--- a/deployment/playbooks/roles/enable-gluster-repo/tasks/main.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: Enable main Gluster 3 repo with GA packages
- command: "subscription-manager repos --enable=rh-gluster-3-for-rhel-7-server-rpms"
-# when: gluster_puddle_repo == ''
-
-- name: Create additional repo with downstream packages for Gluster 3
- yum_repository:
- name: "downstream-rh-gluster-3-for-rhel-7-server-rpms"
- baseurl: "{{ gluster_puddle_repo }}"
- description: "Downstream repo with development versions of packages for Gluster 3"
- enabled: "yes"
- gpgcheck: "no"
- sslverify: "no"
- cost: 990
- when: gluster_puddle_repo != ''
diff --git a/deployment/playbooks/roles/etcd-storage/tasks/main.yaml b/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
deleted file mode 100644
index fe13dc17..00000000
--- a/deployment/playbooks/roles/etcd-storage/tasks/main.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: Create openshift volume group
- lvg: vg=etcd_vg pvs=/dev/sdd
-
-- name: Create lvm volumes
- lvol: vg=etcd_vg lv=etcd_lv size=95%FREE state=present shrink=no
-
-- name: Create local partition on lvm lv
- filesystem:
- fstype: xfs
- dev: /dev/etcd_vg/etcd_lv
-
-- name: Make mounts owned by nfsnobody
- file: path=/var/lib/etcd state=directory mode=0755
-
-- name: Mount the partition
- mount:
- name: /var/lib/etcd
- src: /dev/etcd_vg/etcd_lv
- fstype: xfs
- state: present
-
-- name: Remount new partition
- command: "mount -a"
diff --git a/deployment/playbooks/roles/gluster-ports/defaults/main.yaml b/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
deleted file mode 100644
index fadcb096..00000000
--- a/deployment/playbooks/roles/gluster-ports/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-gluster_ports: ['24007', '24008', '2222', '49152:49664', '24010', '3260', '111']
-crs_ports: ['8080']
diff --git a/deployment/playbooks/roles/gluster-ports/tasks/main.yaml b/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
deleted file mode 100644
index a3f0565b..00000000
--- a/deployment/playbooks/roles/gluster-ports/tasks/main.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-- name: open gluster ports
- iptables:
- chain: INPUT
- destination_port: "{{ item }}"
- jump: ACCEPT
- ctstate: NEW
- protocol: tcp
- action: insert
- match: tcp
- with_items: "{{ gluster_ports }}"
- when: groups['storage'] is defined and groups['storage'] != []
- register: rule
-
-- name: save iptables
- shell: iptables-save > /etc/sysconfig/iptables
- when: rule|changed
-
-- name: open gluster ports
- iptables:
- chain: INPUT
- destination_port: "{{ item }}"
- ctstate: NEW
- jump: ACCEPT
- protocol: tcp
- action: insert
- match: tcp
- with_items: "{{ crs_ports }}"
- when: groups['crs'] is defined and groups['crs'] != []
- register: heketi
-
-- name: save iptables
- shell: iptables-save > /etc/sysconfig/iptables
- when: heketi|changed
diff --git a/deployment/playbooks/roles/instance-groups/tasks/main.yaml b/deployment/playbooks/roles/instance-groups/tasks/main.yaml
deleted file mode 100644
index f0f3c0f9..00000000
--- a/deployment/playbooks/roles/instance-groups/tasks/main.yaml
+++ /dev/null
@@ -1,152 +0,0 @@
----
-# create rhsm_user, rhsm_password, rhsm_subscription_pool and
-# rhsm_server for functionality with older rhsm_user
-- name: Set deprecated fact for rhel_subscription_user
- set_fact:
- rhsm_user: "{{ rhel_subscription_user }}"
- when: rhel_subscription_user is defined
-
-- name: Set deprecated fact for rhel_subscription_pass
- set_fact:
- rhsm_password: "{{ rhel_subscription_pass }}"
- when: rhel_subscription_pass is defined
-
-- name: Set deprecated fact for rhel_subscription_pool
- set_fact:
- rhsm_pool: "{{ rhel_subscription_pool }}"
- when: rhel_subscription_pool is defined
-
-- name: Add masters to requisite groups
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, masters, etcd, nodes, cluster_hosts, master, OSEv3
- openshift_node_group_name: "node-config-master{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-master'] }}"
- when:
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add masters to requisite groups
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, masters, etcd, nodes, cluster_hosts, master, OSEv3
- openshift_node_group_name: "node-config-master"
- openshift_node_labels:
- role: master
- node-role.kubernetes.io/master: true
- with_items: "{{ groups[cluster_id + '-master'] }}"
- when:
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add a master to the single master group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: single_master
- openshift_node_group_name: "node-config-master{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-master'][0] }}"
- when:
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add a master to the single master group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: single_master
- openshift_node_group_name: "node-config-master"
- openshift_node_labels:
- role: master
- node-role.kubernetes.io/master: true
- with_items: "{{ groups[cluster_id + '-master'][0] }}"
- when:
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add compute instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute, OSEv3
- openshift_node_group_name: "node-config-compute{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-compute'] }}"
- when:
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add compute instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, nodes, cluster_hosts, schedulable_nodes, compute, OSEv3
- openshift_node_group_name: "node-config-compute"
- openshift_node_labels:
- role: compute
- node-role.kubernetes.io/compute: true
- node-role.kubernetes.io/infra: true
- with_items: "{{ groups[cluster_id + '-compute'] }}"
- when:
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add new node instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, new_nodes
- openshift_node_group_name: "node-config-compute{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups.tag_provision_node | default([]) }}"
- when:
- - add_node is defined
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add new node instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, new_nodes
- openshift_node_group_name: "node-config-compute"
- openshift_node_labels:
- role: "{{ node_type }}"
- node-role.kubernetes.io/compute: true
- node-role.kubernetes.io/infra: true
- with_items: "{{ groups.tag_provision_node | default([]) }}"
- when:
- - add_node is defined
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add cns instances to allnodes
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, OSEv3
- openshift_node_group_name: "node-config-storage{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-storage'] | default([]) }}"
-
-- name: Add crs instances to allnodes
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: allnodes, OSEv3
- openshift_node_group_name: "node-config-storage"
- with_items: "{{ groups[cluster_id + '-crs'] | default([]) }}"
-
-- name: Add cns instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: nodes, cluster_hosts, schedulable_nodes, storage
- openshift_node_group_name: "node-config-storage{{
- (openshift_use_crio | default(false) | bool) | ternary('-crio', '') }}"
- with_items: "{{ groups[cluster_id + '-storage'] }}"
- when:
- - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
- - "openshift_vers not in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-- name: Add cns instances to host group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: nodes, cluster_hosts, schedulable_nodes, storage
- openshift_node_labels:
- role: storage
- node-role.kubernetes.io/storage: true
- openshift_node_group_name: "node-config-storage"
- with_items: "{{ groups[cluster_id + '-storage'] }}"
- when:
- - "'cns' in container_storage and add_node is defined and 'storage' in node_type"
- - "openshift_vers in ['v3_6', 'v3_7', 'v3_9', 'v3_10']"
-
-- name: Add crs nodes to the storage group
- add_host:
- name: "{{ hostvars[item].inventory_hostname }}"
- groups: storage, crs
- openshift_node_group_name: "node-config-storage"
- with_items: "{{ groups[cluster_id + '-crs'] }}"
- when:
- - "'crs' in container_storage and add_node is defined and 'storage' in node_type"
diff --git a/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml b/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
deleted file mode 100644
index de9230d1..00000000
--- a/deployment/playbooks/roles/master-prerequisites/tasks/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: Install git
- package:
- name: git
- state: latest
- when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
deleted file mode 100644
index cd74c20e..00000000
--- a/deployment/playbooks/roles/openshift-volume-quota/defaults/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-local_volumes_device: "/dev/sdc"
-local_volumes_fstype: "xfs"
-local_volumes_fsopts: "gquota"
-local_volumes_path: "/var/lib/origin/openshift.local.volumes"
diff --git a/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml b/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
deleted file mode 100644
index df58fe80..00000000
--- a/deployment/playbooks/roles/openshift-volume-quota/tasks/main.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- name: Create filesystem for /var/lib/origin/openshift.local.volumes
- filesystem:
- fstype: "{{ local_volumes_fstype }}"
- dev: "{{ local_volumes_device }}"
-
-- name: Create local volumes directory
- file:
- path: "{{ local_volumes_path }}"
- state: directory
- recurse: yes
-
-- name: Create fstab entry
- mount:
- name: "{{ local_volumes_path }}"
- src: "{{ local_volumes_device }}"
- fstype: "{{ local_volumes_fstype }}"
- opts: "{{ local_volumes_fsopts }}"
- state: present
-
-- name: Mount fstab entry
- mount:
- name: "{{ local_volumes_path }}"
- src: "{{ local_volumes_device }}"
- fstype: "{{ local_volumes_fstype }}"
- opts: "{{ local_volumes_fsopts }}"
- state: mounted
diff --git a/deployment/playbooks/roles/package-repos/tasks/main.yaml b/deployment/playbooks/roles/package-repos/tasks/main.yaml
deleted file mode 100644
index 3492a9e4..00000000
--- a/deployment/playbooks/roles/package-repos/tasks/main.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-- name: Import RHSM role
- import_role:
- name: rhsm
-
-- name: Evaluate OCP repo name
- set_fact:
- tmp_ose_repo_name: "rhel-7-server-ose-3.{{ openshift_vers.split('_')[-1] }}-rpms"
-
-- name: Disable OpenShift 3.X GA repo
- command: "subscription-manager repos --disable={{ tmp_ose_repo_name }}"
- when: (ose_puddle_repo != '') or ('crs' in group_names)
-
-- name: Create additional repo with downstream packages for OpenShift 3.X
- yum_repository:
- name: "downstream-{{ tmp_ose_repo_name }}"
- baseurl: "{{ ose_puddle_repo }}"
- description: "Downstream repo with development versions of packages for OpenShift"
- enabled: "{{ (ose_puddle_repo != '') | ternary('yes', 'no') }}"
- gpgcheck: "no"
- sslverify: "no"
- cost: 900
- when: (ose_puddle_repo != '') and ('crs' not in group_names)
diff --git a/deployment/playbooks/roles/prerequisites/defaults/main.yaml b/deployment/playbooks/roles/prerequisites/defaults/main.yaml
deleted file mode 100644
index 1705ee4f..00000000
--- a/deployment/playbooks/roles/prerequisites/defaults/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-openshift_required_packages:
-- iptables
-- iptables-services
-- NetworkManager
-- docker{{ '-' + docker_version if docker_version is defined else '' }}
diff --git a/deployment/playbooks/roles/prerequisites/library/openshift_facts.py b/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
deleted file mode 120000
index e0061bb7..00000000
--- a/deployment/playbooks/roles/prerequisites/library/openshift_facts.py
+++ /dev/null
@@ -1 +0,0 @@
-/usr/share/ansible/openshift-ansible/roles/openshift_facts/library/openshift_facts.py \ No newline at end of file
diff --git a/deployment/playbooks/roles/prerequisites/library/rpm_q.py b/deployment/playbooks/roles/prerequisites/library/rpm_q.py
deleted file mode 100644
index afc261ba..00000000
--- a/deployment/playbooks/roles/prerequisites/library/rpm_q.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2015, Tobias Florek <tob@butter.sh>
-# Licensed under the terms of the MIT License
-"""
-An ansible module to query the RPM database. For use, when yum/dnf are not
-available.
-"""
-
-# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
-from ansible.module_utils.basic import * # noqa: F403
-
-DOCUMENTATION = """
----
-module: rpm_q
-short_description: Query the RPM database
-author: Tobias Florek
-options:
- name:
- description:
- - The name of the package to query
- required: true
- state:
- description:
- - Whether the package is supposed to be installed or not
- choices: [present, absent]
- default: present
-"""
-
-EXAMPLES = """
-- rpm_q: name=ansible state=present
-- rpm_q: name=ansible state=absent
-"""
-
-RPM_BINARY = '/bin/rpm'
-
-
-def main():
- """
- Checks rpm -q for the named package and returns the installed packages
- or None if not installed.
- """
- module = AnsibleModule( # noqa: F405
- argument_spec=dict(
- name=dict(required=True),
- state=dict(default='present', choices=['present', 'absent'])
- ),
- supports_check_mode=True
- )
-
- name = module.params['name']
- state = module.params['state']
-
- # pylint: disable=invalid-name
- rc, out, err = module.run_command([RPM_BINARY, '-q', name])
-
- installed = out.rstrip('\n').split('\n')
-
- if rc != 0:
- if state == 'present':
- module.fail_json(msg="%s is not installed" % name,
- stdout=out, stderr=err, rc=rc)
- else:
- module.exit_json(changed=False)
- elif state == 'present':
- module.exit_json(changed=False, installed_versions=installed)
- else:
- module.fail_json(msg="%s is installed", installed_versions=installed)
-
-
-if __name__ == '__main__':
- main()
diff --git a/deployment/playbooks/roles/prerequisites/tasks/main.yaml b/deployment/playbooks/roles/prerequisites/tasks/main.yaml
deleted file mode 100644
index a2686796..00000000
--- a/deployment/playbooks/roles/prerequisites/tasks/main.yaml
+++ /dev/null
@@ -1,84 +0,0 @@
----
-- name: Gather facts
- openshift_facts:
- role: common
-
-- block:
- - name: Clear yum cache
- command: "yum clean all"
- ignore_errors: true
-
- - name: Install the required rpms
- package:
- name: "{{ item }}"
- state: latest
- with_items: "{{ openshift_required_packages }}"
-
- - name: Start NetworkManager and network
- service:
- name: "{{ item }}"
- state: restarted
- enabled: true
- with_items:
- - NetworkManager
- - network
-
- - name: Determine if firewalld is installed
- rpm_q:
- name: "firewalld"
- state: present
- register: firewalld_installed
- failed_when: false
-
- - name: Stop firewalld
- service:
- name: firewalld
- state: stopped
- enabled: false
- when:
- - "{{ firewalld_installed.installed_versions | default([]) | length > 0 }}"
-
- - name: Start iptables
- service:
- name: iptables
- state: started
- enabled: true
-
- - name: Start docker
- service:
- name: docker
- state: started
- enabled: true
-
- when: not (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
-
-# Fail as early as possible if Atomic and old version of Docker
-- block:
- - name: Determine Atomic Host Docker Version
- shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"'
- register: l_atomic_docker_version
-
- - assert:
- msg: Installation on Atomic Host requires Docker 1.12 or later. Attempting to patch.
- that:
- - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
-
- rescue:
- - name: Patching Atomic instances
- shell: atomic host upgrade
- register: patched
-
- - name: Reboot when patched
- shell: sleep 5 && shutdown -r now "Reboot due to Atomic Patching"
- async: 1
- poll: 0
- ignore_errors: true
- when: patched.changed
-
- - name: Wait for hosts to be back
- pause:
- seconds: 60
- delegate_to: 127.0.0.1
- when: patched.changed
-
- when: (openshift.common.is_atomic | default(openshift_is_atomic)) | bool
diff --git a/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml b/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
deleted file mode 100644
index 9b9f3b21..00000000
--- a/deployment/playbooks/roles/rhsm-unregister/rhsm-unregister/tasks/main.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Is the host already registered?
- command: "subscription-manager list"
- register: subscribed
- ignore_errors: yes
-
- - name: Unregister host
- redhat_subscription:
- state: absent
- when: "'Subscribed' in subscribed.stdout"
- ignore_errors: yes
-
- when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/rhsm/defaults/main.yaml b/deployment/playbooks/roles/rhsm/defaults/main.yaml
deleted file mode 100644
index 3207411f..00000000
--- a/deployment/playbooks/roles/rhsm/defaults/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-openshift_required_repos:
-- 'rhel-7-server-rpms'
-- 'rhel-7-server-extras-rpms'
-- 'rhel-7-fast-datapath-rpms'
diff --git a/deployment/playbooks/roles/rhsm/tasks/main.yaml b/deployment/playbooks/roles/rhsm/tasks/main.yaml
deleted file mode 100644
index f793fb2f..00000000
--- a/deployment/playbooks/roles/rhsm/tasks/main.yaml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-- block:
- - name: Allow rhsm a longer timeout to help out with subscription-manager
- lineinfile:
- dest: /etc/rhsm/rhsm.conf
- line: 'server_timeout=600'
- insertafter: '^proxy_password ='
-
- - name: Is the system already registered?
- command: "subscription-manager version"
- register: subscribed
-
- - name: Unregister system if registered
- import_role:
- name: rhsm-unregister
- when:
- - "'not registered' not in subscribed.stdout"
-
- - name: Register system using Red Hat Subscription Manager
- redhat_subscription:
- state: present
- username: "{{ rhsm_user | default(omit) }}"
- password: "{{ rhsm_password | default(omit) }}"
- pool: "{{ rhsm_pool | default(omit) }}"
- server_hostname: "{{ rhsm_satellite | default(omit) }}"
- when:
- - "'not registered' in subscribed.stdout"
- - rhsm_user is defined
- - rhsm_user|trim != ''
- register: rhn
- until: rhn|success
- retries: 5
-
- - name: Obtain currently enabled repos
- shell: 'subscription-manager repos --list-enabled | sed -ne "s/^Repo ID:[^a-zA-Z0-9]*\(.*\)/\1/p"'
- register: enabled_repos
-
- - name: Disable repositories that should not be enabled
- shell: "subscription-manager repos --disable={{ item }}"
- with_items:
- - "{{ enabled_repos.stdout_lines | difference(openshift_required_repos) }}"
- when: provider is not defined
-
- - name: Enable specified repositories not already enabled
- command: "subscription-manager repos --enable={{ item }}"
- with_items:
- - "{{ openshift_required_repos | difference(enabled_repos.stdout_lines) }}"
-
- when: ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
deleted file mode 100644
index e9e06809..00000000
--- a/deployment/playbooks/roles/setup-custom-domain-names-for-ansible-runner/tasks/main.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-# NOTE(vponomar): here we use 2 different sources of IP addresses:
-# 1) hostvars[item].guest.net exists for old nodes, that haven't been created
-# with this playbook run. Such nodes have detailed info in hostvars.
-# 2) hostvars[item].ansible_ssh_host is always correct IP address for newly
-# created nodes. For such nodes we pick it when variant 1 does not work.
-- name: Save matched hosts to temporary var
- set_fact:
- current_cluster_hosts: "{{
- current_cluster_hosts | default([]) | union([{
- 'name_short': hostvars[item].inventory_hostname_short,
- 'name': hostvars[item].inventory_hostname,
- 'net': (hostvars[item].guest | default({})).net | default(
- [{'network': vm_network,
- 'ipaddress': [hostvars[item].ansible_ssh_host]}])
- }]) }}"
- with_items: "{{ groups.all | select('match', ocp_hostname_prefix) | list }}"
-
-- name: Gather current cluster IP addresses
- set_fact:
- current_cluster_ips: "{{
- current_cluster_ips | default({}) | combine({
- (item.1.ipaddress | ipv4 | first): [item.0.name_short, item.0.name]
- }) }}"
- with_subelements: ["{{ current_cluster_hosts }}", net]
- when: "item.1.network == vm_network"
-
-- name: Get current user home dir
- shell: 'eval echo "~$USER"'
- register: home_dir
-- name: Set hosts files paths
- set_fact:
- home_hosts_file: "{{ home_dir.stdout_lines[0] + '/.ssh/config' }}"
- system_hosts_file: "/etc/hosts"
-- name: Check 'write' permissions for system hosts file
- stat:
- path: "{{ system_hosts_file }}"
- register: stat_system_hosts
-
-- name: Update system hosts file if writeable
- block:
- - name: Delete old left-overs if exist
- lineinfile:
- dest: "{{ system_hosts_file }}"
- regexp: '{{ item.name_short }}'
- state: absent
- create: true
- with_items: "{{ current_cluster_hosts }}"
- - name: Add domain name mapping of new cluster nodes to the system hosts file
- lineinfile:
- dest: "{{ system_hosts_file }}"
- line: '{{ item.key }} {{ item.value.0 }} {{ item.value.1 }}'
- create: true
- with_dict: "{{ current_cluster_ips }}"
- when: "stat_system_hosts.stat.writeable"
-
-- name: Update user's SSH hosts file
- block:
- - name: Delete old left-overs if exist
- lineinfile:
- path: "{{ home_hosts_file }}"
- state: absent
- regexp: "{{ item.key }}"
- create: true
- mode: '644'
- with_dict: "{{ current_cluster_ips }}"
- - name: Write line with option group
- lineinfile:
- dest: "{{ home_hosts_file }}"
- state: present
- line: "Host {{ item.value.0 }} {{ item.value.1 }}"
- create: true
- mode: '644'
- with_dict: "{{ current_cluster_ips }}"
- - name: Write line with hostname option
- lineinfile:
- dest: "{{ home_hosts_file }}"
- state: present
- line: " HostName {{ item.key }}"
- insertafter: "Host {{ item.value.0 }} {{ item.value.1 }}"
- create: true
- mode: '644'
- with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml b/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
deleted file mode 100644
index d53fa43f..00000000
--- a/deployment/playbooks/roles/setup-custom-domain-names/tasks/main.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- name: Import role with update of /etc/hosts file
- import_role:
- name: setup-custom-domain-names-for-ansible-runner
-
-- name: Create directory for dnsmasq config file if absent
- file:
- dest: /etc/dnsmasq.d
- state: directory
- mode: 0644
-
-- name: Create custom dnsmasq config file for current cluster
- file:
- dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
- state: touch
-
-- name: Remove stale data from custom dnsmasq config file is exist
- lineinfile:
- dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
- regexp: "{{ item.value.0 }}"
- state: absent
- with_dict: "{{ current_cluster_ips }}"
-
-- name: Write data to custom dnsmasq config file
- lineinfile:
- dest: '/etc/dnsmasq.d/openshift-cluster-{{ cluster_id }}.conf'
- line: "address=/{{ item.value.0 }}/{{ item.key }}\naddress=/{{ item.value.1 }}/{{ item.key }}"
- state: present
- with_dict: "{{ current_cluster_ips }}"
diff --git a/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml b/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
deleted file mode 100644
index d42484e0..00000000
--- a/deployment/playbooks/roles/storage-class-configure/tasks/main.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-- name: Copy cloud provider storage class file
- template:
- src: cloud-provider-storage-class.yaml.j2
- dest: ~/cloud-provider-storage-class.yaml
-
-- name: Copy cloud provider storage class file to single master
- fetch:
- src: ~/cloud-provider-storage-class.yaml
- dest: ~/cloud-provider-storage-class.yaml
- flat: yes
-
-- name: Switch to default project
- command: oc project default
-
-- name: Check to see if storage class is already created
- command: "oc get storageclass"
- register: storage_class
-
-- name: Create storage class
- command: "oc create -f ~/cloud-provider-storage-class.yaml"
- when: "'{{ vcenter_datastore }}' not in storage_class.stdout"
diff --git a/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2 b/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
deleted file mode 100644
index e31d53a4..00000000
--- a/deployment/playbooks/roles/storage-class-configure/templates/cloud-provider-storage-class.yaml.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-kind: StorageClass
-apiVersion: storage.k8s.io/v1
-metadata:
- name: "{{ vcenter_datastore }}"
-provisioner: kubernetes.io/vsphere-volume
-parameters:
- diskformat: zeroedthick
- datastore: "{{ vcenter_datastore }}"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
deleted file mode 100644
index 67898e0c..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/handlers/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-- name: restart chronyd
- service: name=chronyd state=restarted
-
-- name: restart networking
- service: name=networking state=restarted
diff --git a/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
deleted file mode 100644
index e640b861..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/tasks/main.yaml
+++ /dev/null
@@ -1,89 +0,0 @@
----
-- name: Determine if Atomic
- stat: path=/run/ostree-booted
- register: s
- changed_when: false
- check_mode: no
-
-- name: Init the is_atomic fact
- set_fact:
- is_atomic: false
-
-- name: Set the is_atomic fact
- set_fact:
- is_atomic: true
- when: s.stat.exists
-
-- block:
- - name: Install 'sos' package
- yum:
- name: sos
- state: installed
- ignore_errors: yes
- - name: be sure all pre-req packages are installed
- yum: name={{item}} state=installed
- with_items:
- - open-vm-tools
- - PyYAML
- - perl
- - python-ipaddress
- - net-tools
- - chrony
- - python-six
- - iptables
- - iptables-services
- - dnsmasq
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- - name: Install docker
- yum: name={{item}} state=installed
- with_items:
- - docker{{ '-' + docker_version if docker_version is defined else '' }}
- retries: 5
- delay: 5
- register: result
- until: result is succeeded
- when: not (openshift_use_crio | default(false) | bool)
-
- - name: be sure openvmtools is running and enabled
- service: name=vmtoolsd state=started enabled=yes
- when:
- - not is_atomic | bool
- - ansible_distribution == "RedHat"
-
-- name: be sure chrony is configured
- template: src=chrony.conf.j2 dest=/etc/chrony.conf
- notify:
- - restart chronyd
-
-- name: set link to localtime
- command: timedatectl set-timezone {{timezone}}
-
-- name: be sure chronyd is running and enabled
- service: name=chronyd state=started enabled=yes
-
-- block:
- - name: (Atomic) Remove extra docker lv from root vg
- lvol:
- lv: docker-pool
- vg: atomicos
- state: absent
- force: yes
- - name: (Atomic) Grow root lv to fill vg
- lvol:
- lv: root
- vg: atomicos
- size: +100%FREE
- - name: (Atomic) Grow root fs to match lv
- filesystem:
- dev: /dev/mapper/atomicos-root
- fstype: xfs
- resizefs: yes
- - name: (Atomic) Force Ansible to re-gather disk facts
- setup:
- filter: 'ansible_mounts'
- when:
- - is_atomic | bool
- - ansible_distribution == "RedHat"
diff --git a/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2 b/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
deleted file mode 100644
index b8020cb0..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/templates/chrony.conf.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file is managed by Ansible
-
-server 0.rhel.pool.ntp.org
-server 1.rhel.pool.ntp.org
-server 2.rhel.pool.ntp.org
-server 3.rhel.pool.ntp.org
-
-driftfile /var/lib/chrony/drift
-makestep 10 3
-
-keyfile /etc/chrony.keys
-commandkey 1
-generatecommandkey
-
-noclientlog
-logchange 0.5
-
-logdir /var/log/chrony
-log measurements statistics tracking
diff --git a/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml b/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
deleted file mode 100644
index a951d622..00000000
--- a/deployment/playbooks/roles/vmware-guest-setup/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-locale: en_US.UTF-8
-timezone: UTC
diff --git a/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml b/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
deleted file mode 100644
index 826ff498..00000000
--- a/deployment/playbooks/roles/yum-update-and-reboot/tasks/main.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-# NOTE(vponomar): this role should not be run from nodes
-# which are going to be rebooted.
----
-
-- block:
- - name: Check that hostnames_for_reboot var is set and it is not empty list
- fail:
- msg: "Role 'yum-update-and-reboot' expects 'hostnames_for_reboot' var
- to be set as a list of hostnames which should be rebooted."
- when: "(hostnames_for_reboot is not defined) or hostnames_for_reboot | length < 1"
-
- - name: Run yum_update command
- command: "yum update -y {{ (openshift_vers in ['v3_6', 'v3_7']) |
- ternary('--exclude=*docker*', '') }}"
- delegate_to: "{{ item }}"
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Reboot machine to apply all major changes to the system if exist
- shell: "sleep 3 ; /sbin/shutdown -r now 'Reboot triggered by Ansible'"
- async: 1
- poll: 0
- ignore_errors: true
- delegate_to: "{{ item }}"
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Wait for machine to go down
- wait_for:
- host: "{{ item }}"
- port: 22
- delay: 0
- timeout: 180
- connect_timeout: 5
- state: stopped
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Wait for machine to go up
- wait_for:
- host: "{{ item }}"
- port: 22
- delay: 0
- timeout: 360
- connect_timeout: 5
- state: started
- with_items: "{{ hostnames_for_reboot }}"
-
- - name: Sleep for some time to let services start up in time
- shell: "sleep 60"
- when: "disable_yum_update_and_reboot is undefined or not (disable_yum_update_and_reboot | bool)"
diff --git a/deployment/playbooks/scaleup.yaml b/deployment/playbooks/scaleup.yaml
deleted file mode 100644
index 4a21eadc..00000000
--- a/deployment/playbooks/scaleup.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- include: "{{ (openshift_vers in ['v3_6', 'v3_7']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'byo/openshift-node/scaleup.yml',
- 'noop.yaml')
- }}"
-
-- include: "{{ (openshift_vers in ['v3_9']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'openshift-node/scaleup.yml',
- 'noop.yaml')
- }}"
-
-# NOTE(vponomar): following playbooks are what we need from
-# 'playbooks/openshift-node/scaleup.yml' playbook in OCP3.10 and OCP3.11
-# It may be changed for OCP3.11+ versions.
-- include: "{{ (openshift_vers not in ['v3_6', 'v3_7', 'v3_9']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'openshift-node/private/bootstrap.yml',
- 'noop.yaml')
- }}"
-
-- include: "{{ (openshift_vers not in ['v3_6', 'v3_7', 'v3_9']) |
- ternary(
- lookup('env', 'VIRTUAL_ENV') +
- '/usr/share/ansible/openshift-ansible/playbooks/' +
- 'openshift-node/private/join.yml',
- 'noop.yaml')
- }}"
diff --git a/deployment/playbooks/setup.yaml b/deployment/playbooks/setup.yaml
deleted file mode 100644
index 2166c2fc..00000000
--- a/deployment/playbooks/setup.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- hosts: localhost
- user: root
- become: false
- vars_files:
- - vars/main.yaml
- tasks:
- - name: "Create resource pool on vCenter"
- vmware_resource_pool:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- cluster: "{{ vcenter_cluster}}"
- resource_pool: "{{ vcenter_resource_pool }}"
- state: "present"
- validate_certs: False
- - name: "Create folder structure on vCenter"
- vmware_folder:
- hostname: "{{ vcenter_host }}"
- username: "{{ vcenter_username }}"
- password: "{{ vcenter_password }}"
- datacenter: "{{ vcenter_datacenter }}"
- cluster: "{{ vcenter_cluster}}"
- folder: "{{ vcenter_folder }}"
- state: "present"
- validate_certs: False
diff --git a/deployment/playbooks/vars/main.yaml b/deployment/playbooks/vars/main.yaml
deleted file mode 100644
index 0b5a95af..00000000
--- a/deployment/playbooks/vars/main.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
----
-# OpenShift variables
-openshift_master_cluster_hostname: "{{ lb_host }}"
-openshift_master_cluster_public_hostname: "{{ lb_host }}"
-console_port: 8443
-openshift_vers: "{{ openshift_vers | default('v3_6')}}"
-openshift_major_version: "{{ openshift_vers.split('_')[-1] }}"
-openshift_ansible_branch: release-3.{{ openshift_major_version }}
-openshift_required_repos:
-- rhel-7-server-rpms
-- rhel-7-server-extras-rpms
-- rhel-7-server-ose-3.{{ openshift_major_version }}-rpms
-- rhel-7-fast-datapath-rpms
-openshift_crio_docker_gc_node_selector:
- runtime: crio
-# 'openshift_node_groups' is required for OCP3.10
-openshift_node_groups:
-- name: node-config-master
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- edits: []
-- name: node-config-master-crio
- labels:
- - 'node-role.kubernetes.io/master=true'
- - 'role=master'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-- name: node-config-compute
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- edits: []
-- name: node-config-compute-crio
- labels:
- - 'node-role.kubernetes.io/compute=true'
- - 'node-role.kubernetes.io/infra=true'
- - 'role=compute'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]
-- name: node-config-storage
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- edits: []
-- name: node-config-storage-crio
- labels:
- - 'node-role.kubernetes.io/storage=true'
- - 'role=storage'
- - 'runtime=cri-o'
- edits:
- - key: kubeletArguments.container-runtime
- value: ["remote"]
- - key: kubeletArguments.container-runtime-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.image-service-endpoint
- value: ["/var/run/crio/crio.sock"]
- - key: kubeletArguments.runtime-request-timeout
- value: ["10m"]