Browse Source

Merge remote-tracking branch 'upstream/master' into get_kuryr_services

Cloud User 6 years ago
parent
commit
b03ebc4361
80 changed files with 645 additions and 425 deletions
  1. 8 1
      .papr-master-ha.inventory
  2. 6 1
      .papr.all-in-one.inventory
  3. 11 3
      .papr.inventory
  4. 2 0
      .papr.sh
  5. 6 0
      inventory/dynamic/gcp/group_vars/all/00_defaults.yml
  6. 12 8
      inventory/hosts.example
  7. 4 4
      inventory/hosts.glusterfs.registry-only.example
  8. 7 7
      inventory/hosts.glusterfs.storage-and-registry.example
  9. 5 1
      inventory/hosts.localhost
  10. 2 2
      inventory/hosts.openstack
  11. 6 10
      playbooks/azure/openshift-cluster/build_base_image.yml
  12. 5 9
      playbooks/azure/openshift-cluster/build_node_image.yml
  13. 3 7
      playbooks/azure/openshift-cluster/launch.yml
  14. 8 3
      playbooks/azure/openshift-cluster/provisioning_vars.yml.example
  15. 9 0
      playbooks/azure/openshift-cluster/tag_image_as_valid.yml
  16. 12 2
      playbooks/azure/openshift-cluster/tasks/create_image_from_vm.yml
  17. 0 4
      playbooks/azure/openshift-cluster/tasks/provision_instance.yml
  18. 7 0
      playbooks/common/openshift-cluster/upgrades/init.yml
  19. 0 2
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  20. 0 2
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
  21. 3 0
      playbooks/container-runtime/private/setup_storage.yml
  22. 23 0
      playbooks/gcp/openshift-cluster/openshift_node_group.yml
  23. 4 1
      playbooks/init/sanity_checks.yml
  24. 2 0
      playbooks/openshift-hosted/private/upgrade.yml
  25. 11 0
      playbooks/openshift-management/private/config-sebool.yml
  26. 2 0
      playbooks/openshift-management/private/config.yml
  27. 12 0
      playbooks/openshift-master/openshift_node_group.yml
  28. 21 0
      playbooks/openshift-master/private/additional_config.yml
  29. 0 3
      playbooks/openshift-master/private/config.yml
  30. 0 2
      playbooks/openshift-node/private/configure_bootstrap.yml
  31. 8 3
      playbooks/openshift-node/private/restart.yml
  32. 2 0
      playbooks/openstack/OWNERS
  33. 18 17
      playbooks/openstack/configuration.md
  34. 4 8
      playbooks/openstack/inventory.py
  35. 21 1
      playbooks/openstack/sample-inventory/group_vars/OSEv3.yml
  36. 0 13
      playbooks/openstack/sample-inventory/group_vars/all.yml
  37. 1 0
      roles/container_runtime/defaults/main.yml
  38. 17 0
      roles/container_runtime/tasks/extra_storage_setup.yml
  39. 0 1
      roles/container_runtime/tasks/package_crio.yml
  40. 2 0
      roles/kuryr/OWNERS
  41. 136 0
      roles/lib_utils/action_plugins/node_group_checks.py
  42. 4 2
      roles/lib_utils/action_plugins/sanity_checks.py
  43. 10 5
      roles/openshift_aws/defaults/main.yml
  44. 1 0
      roles/openshift_aws/tasks/setup_master_group.yml
  45. 1 0
      roles/openshift_aws/tasks/setup_scale_group_facts.yml
  46. 0 10
      roles/openshift_cloud_provider/tasks/vsphere.yml
  47. 1 1
      roles/openshift_cluster_monitoring_operator/tasks/install.yaml
  48. 0 1
      roles/openshift_control_plane/defaults/main.yml
  49. 15 0
      roles/openshift_facts/defaults/main.yml
  50. 5 0
      roles/openshift_gcp/defaults/main.yml
  51. 25 25
      roles/openshift_gcp/tasks/setup_scale_group_facts.yml
  52. 0 13
      roles/openshift_hosted_templates/files/v3.10/enterprise/registry-console.yaml
  53. 0 13
      roles/openshift_hosted_templates/files/v3.10/origin/registry-console.yaml
  54. 0 22
      roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
  55. 11 1
      roles/openshift_logging_fluentd/tasks/main.yaml
  56. 0 8
      roles/openshift_logging_fluentd/templates/fluentd_label.sh.j2
  57. 0 16
      roles/openshift_manage_node/tasks/config.yml
  58. 1 1
      roles/openshift_monitor_availability/tasks/install_monitor_app_create.yaml
  59. 6 1
      roles/openshift_node/tasks/upgrade.yml
  60. 2 19
      roles/openshift_node/tasks/upgrade/bootstrap_changes.yml
  61. 1 1
      roles/openshift_node/tasks/upgrade/restart.yml
  62. 3 16
      roles/openshift_node_group/defaults/main.yml
  63. 2 3
      roles/openshift_node_group/files/sync.yaml
  64. 0 1
      roles/openshift_node_group/tasks/bootstrap.yml
  65. 2 7
      roles/openshift_node_group/tasks/bootstrap_config.yml
  66. 7 0
      roles/openshift_node_group/tasks/check_for_config.yml
  67. 7 0
      roles/openshift_node_group/tasks/check_for_configs.yml
  68. 94 103
      roles/openshift_node_group/tasks/create_config.yml
  69. 14 0
      roles/openshift_node_group/tasks/fetch_config.yml
  70. 3 3
      roles/openshift_node_group/tasks/main.yml
  71. 3 3
      roles/openshift_node_group/tasks/upgrade.yml
  72. 4 1
      roles/openshift_node_group/templates/node-config.yaml.j2
  73. 3 3
      roles/openshift_node_group/vars/main.yml
  74. 0 6
      roles/openshift_openstack/defaults/main.yml
  75. 4 10
      roles/openshift_openstack/tasks/generate-dns.yml
  76. 3 8
      roles/openshift_openstack/templates/heat_stack.yaml.j2
  77. 5 5
      roles/openshift_openstack/templates/heat_stack_server.yaml.j2
  78. 3 0
      roles/openshift_service_catalog/defaults/main.yml
  79. 1 1
      roles/openshift_service_catalog/tasks/start.yml
  80. 4 1
      roles/openshift_web_console/files/console-template.yaml

+ 8 - 1
.papr-master-ha.inventory

@@ -15,6 +15,13 @@ openshift_portal_net=172.30.0.0/16
 openshift_enable_service_catalog=false
 debug_level=4
 
+my_node_group1_labels=['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']
+my_node_group1={'name': 'node-config-all-in-one', 'labels': {{ my_node_group1_labels }} }
+
+openshift_node_groups=[{{ my_node_group1 }}]
+
+openshift_node_group_name="node-config-all-in-one"
+
 [all:vars]
 # bootstrap configs
 openshift_master_bootstrap_auto_approve=true
@@ -33,6 +40,6 @@ ocp-master2
 ocp-master3
 
 [nodes]
-ocp-master1 openshift_schedulable=true openshift_node_labels="{'node-role.kubernetes.io/infra':'true'}"
+ocp-master1 openshift_schedulable=true
 ocp-master2
 ocp-master3

+ 6 - 1
.papr.all-in-one.inventory

@@ -15,6 +15,11 @@ openshift_portal_net=172.30.0.0/16
 openshift_enable_service_catalog=false
 debug_level=4
 
+my_node_group1_labels=['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']
+my_node_group1={'name': 'node-config-all-in-one', 'labels': {{ my_node_group1_labels }} }
+
+openshift_node_groups=[{{ my_node_group1 }}]
+
 [all:vars]
 # bootstrap configs
 openshift_master_bootstrap_auto_approve=true
@@ -29,4 +34,4 @@ ocp-master
 ocp-master
 
 [nodes]
-ocp-master openshift_schedulable=true openshift_node_labels="{'node-role.kubernetes.io/infra':'true'}" ansible_host="{{ lookup('env', 'RHCI_ocp_master_IP') }}"
+ocp-master openshift_schedulable=true ansible_host="{{ lookup('env', 'RHCI_ocp_master_IP') }}" openshift_node_group_name="node-config-all-in-one"

+ 11 - 3
.papr.inventory

@@ -14,6 +14,14 @@ openshift_check_min_host_memory_gb=1.9
 openshift_portal_net=172.30.0.0/16
 debug_level=4
 
+my_node_group1_labels=['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true']
+my_node_group1={'name': 'node-config-infra-master', 'labels': {{ my_node_group1_labels }} }
+
+my_node_group2_labels=['node-role.kubernetes.io/compute=true']
+my_node_group2={'name': 'node-config-compute', 'labels': {{ my_node_group2_labels }} }
+
+openshift_node_groups=[{{ my_node_group1 }}, {{ my_node_group2 }}]
+
 [all:vars]
 # bootstrap configs
 openshift_master_bootstrap_auto_approve=true
@@ -28,6 +36,6 @@ ocp-master
 ocp-master
 
 [nodes]
-ocp-master openshift_schedulable=true openshift_node_labels="{'node-role.kubernetes.io/infra':'true'}"
-ocp-node1
-ocp-node2
+ocp-master openshift_schedulable=true openshift_node_group_name="node-config-infra-master"
+ocp-node1 openshift_node_group_name="node-config-infra" openshift_node_group_name="node-config-compute"
+ocp-node2 openshift_node_group_name="node-config-infra" openshift_node_group_name="node-config-compute"

+ 2 - 0
.papr.sh

@@ -80,6 +80,8 @@ fi
 # Run upgrade playbook
 if [[ "${PAPR_RUN_UPDATE}" != "0" ]]; then
   update_version="$(echo $target_branch | sed 's/\./_/')"
+  # Create basic node-group configmaps for upgrade
+  ansible-playbook -vvv -i $PAPR_INVENTORY $PAPR_EXTRAVARS playbooks/openshift-master/openshift_node_group.yml
   ansible-playbook -vvv -i $PAPR_INVENTORY playbooks/byo/openshift-cluster/upgrades/v${update_version}/upgrade.yml
 fi
 

+ 6 - 0
inventory/dynamic/gcp/group_vars/all/00_defaults.yml

@@ -35,3 +35,9 @@ openshift_node_sdn_mtu: 1410
 osm_cluster_network_cidr: 172.16.0.0/16
 osm_host_subnet_length: 9
 openshift_portal_net: 172.30.0.0/16
+
+# masters and infra are the same in CI
+openshift_gcp_node_group_mapping:
+  masters: 'node-config-master'
+  infra: 'node-config-master'
+  compute: 'node-config-compute'

+ 12 - 8
inventory/hosts.example

@@ -12,8 +12,8 @@ ose3-master[1:3].test.example.com
 
 [nodes]
 ose3-master[1:3].test.example.com
-ose3-infra[1:2].test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
-ose3-node[1:2].test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
+ose3-infra[1:2].test.example.com
+ose3-node[1:2].test.example.com
 
 [nfs]
 ose3-master1.test.example.com
@@ -144,6 +144,12 @@ debug_level=2
 # Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
 # docker_upgrade=False
 
+# Specify a list of block devices to be formatted and mounted on the nodes
+# during prerequisites.yml. For each hash, "device", "path", "filesystem" are
+# required. To add devices only on certain classes of node, redefine
+# container_runtime_extra_storage as a group var.
+#container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]'
+
 # Enable etcd debug logging, defaults to false
 # etcd_debug=true
 # Set etcd log levels by package
@@ -367,12 +373,11 @@ debug_level=2
 #
 # An OpenShift router will be created during install if there are
 # nodes present with labels matching the default router selector,
-# "node-role.kubernetes.io/infra=true". Set openshift_node_labels per node as needed in
-# order to label nodes.
+# "node-role.kubernetes.io/infra=true".
 #
 # Example:
 # [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
+# node.example.com openshift_node_group_name="node-config-infra"
 #
 # Router selector (optional)
 # Router will only be created if nodes matching this label are present.
@@ -418,12 +423,11 @@ debug_level=2
 #
 # An OpenShift registry will be created during install if there are
 # nodes present with labels matching the default registry selector,
-# "node-role.kubernetes.io/infra=true". Set openshift_node_labels per node as needed in
-# order to label nodes.
+# "node-role.kubernetes.io/infra=true".
 #
 # Example:
 # [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
+# node.example.com openshift_node_group_name="node-config-infra"
 #
 # Registry selector (optional)
 # Registry will only be created if nodes matching this label are present.

+ 4 - 4
inventory/hosts.glusterfs.registry-only.example

@@ -31,16 +31,16 @@ openshift_deployment_type=origin
 openshift_hosted_registry_storage_kind=glusterfs
 
 [masters]
-master
+master openshift_node_group_name="node-config-master"
 
 [nodes]
 # masters should be schedulable to run web console pods
 master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
 # "node-role.kubernetes.io/infra=true".
-node0   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node1   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node2   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node0   openshift_node_group_name="node-config-infra"
+node1   openshift_node_group_name="node-config-infra"
+node2   openshift_node_group_name="node-config-infra"
 
 [etcd]
 master

+ 7 - 7
inventory/hosts.glusterfs.storage-and-registry.example

@@ -36,17 +36,17 @@ master
 
 [nodes]
 # masters should be schedulable to run web console pods
-master  openshift_schedulable=True
+master  openshift_node_group_name="node-config-master" openshift_schedulable=True
 # It is recommended to not use a single cluster for both general and registry
 # storage, so two three-node clusters will be required.
-node0   openshift_schedulable=True
-node1   openshift_schedulable=True
-node2   openshift_schedulable=True
+node0   openshift_node_group_name="node-config-compute"
+node1   openshift_node_group_name="node-config-compute"
+node2   openshift_node_group_name="node-config-compute"
 # A hosted registry, by default, will only be deployed on nodes labeled
 # "node-role.kubernetes.io/infra=true".
-node3   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node4   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node5   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node3   openshift_node_group_name="node-config-infra"
+node4   openshift_node_group_name="node-config-infra"
+node5   openshift_node_group_name="node-config-infra"
 
 [etcd]
 master

+ 5 - 1
inventory/hosts.localhost

@@ -13,6 +13,9 @@ openshift_portal_net=172.30.0.0/16
 # localhost likely doesn't meet the minimum requirements
 openshift_disable_check=disk_availability,memory_availability
 
+openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']}]
+
+
 [masters]
 localhost ansible_connection=local
 
@@ -20,4 +23,5 @@ localhost ansible_connection=local
 localhost ansible_connection=local
 
 [nodes]
-localhost ansible_connection=local openshift_node_labels="{'node-role.kubernetes.io/infra': 'true'}"
+# openshift_node_group_name should refer to a dictionary with matching key of name in list openshift_node_groups.
+localhost ansible_connection=local openshift_node_group_name="node-config-all-in-one"

+ 2 - 2
inventory/hosts.openstack

@@ -33,5 +33,5 @@ jdetiber-etcd.usersys.redhat.com
 #ose3-lb-ansible.test.example.com
 
 [nodes]
-jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
-jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-master"
+jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-compute"

+ 6 - 10
playbooks/azure/openshift-cluster/build_base_image.yml

@@ -2,11 +2,12 @@
 - hosts: localhost
   gather_facts: no
   tasks:
+  - name: calculate input image
+    command: az image list -g "{{ openshift_azure_input_image_ns }}" --query "[?starts_with(name, '{{ openshift_azure_input_image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
+    register: input_image
+
   - name: provision resource group
     import_tasks: tasks/provision_instance.yml
-    vars:
-      image_prefix: "{{ openshift_azure_input_image_prefix }}"
-      image_resource_group: "{{ openshift_azure_input_image_ns }}"
 
 - hosts: nodes
   tasks:
@@ -34,20 +35,15 @@
 - hosts: localhost
   gather_facts: no
   tasks:
-  - name: get current date/time
-    shell: TZ=Etc/UTC date +%Y%m%d%H%M
-    register: now
-
-  - set_fact:
-      image_name: "{{ openshift_azure_output_image_prefix }}-{{ now.stdout }}"
-
   - name: create image
     import_tasks: tasks/create_image_from_vm.yml
     vars:
       image_resource_group: "{{ openshift_azure_output_image_ns }}"
+      image_name: "{{ openshift_azure_output_image_name }}"
       image_tags:
         root_image: "{{ (input_image.stdout | from_json).name }}"
         kernel: "{{ hostvars[groups['nodes'][0]]['ansible_kernel'] }}"
+        valid: true
 
   - name: create blob
     import_tasks: tasks/create_blob_from_vm.yml

+ 5 - 9
playbooks/azure/openshift-cluster/build_node_image.yml

@@ -2,11 +2,13 @@
 - hosts: localhost
   gather_facts: no
   tasks:
+  - name: calculate input image
+    command: az image list -g "{{ openshift_azure_input_image_ns }}" --query "[?starts_with(name, '{{ openshift_azure_input_image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
+    register: input_image
+
   - name: provision resource group
     import_tasks: tasks/provision_instance.yml
     vars:
-      image_prefix: "{{ openshift_azure_input_image_prefix }}"
-      image_resource_group: "{{ openshift_azure_input_image_ns }}"
       create_args: --data-disk-sizes-gb 128
 
   - set_fact:
@@ -73,20 +75,14 @@
 - hosts: localhost
   gather_facts: no
   tasks:
-  - name: get current date/time
-    shell: TZ=Etc/UTC date +%Y%m%d%H%M
-    register: now
-
   - set_fact:
       openshift_rpm: "{{ hostvars[groups['nodes'][0]]['yum'].results | selectattr('name', 'match', '^(origin|atomic-openshift)$') | first }}"
 
-  - set_fact:
-      image_name: "{{ openshift_azure_output_image_prefix }}-{{ openshift_rpm.version | regex_replace('^(\\d+\\.\\d+).*', '\\1') }}-{{ now.stdout }}"
-
   - name: create image
     import_tasks: tasks/create_image_from_vm.yml
     vars:
       image_resource_group: "{{ openshift_azure_output_image_ns }}"
+      image_name: "{{ openshift_azure_output_image_name }}"
       image_tags:
         base_image: "{{ (input_image.stdout | from_json).name }}"
         kernel: "{{ hostvars[groups['nodes'][0]]['ansible_kernel'] }}"

+ 3 - 7
playbooks/azure/openshift-cluster/launch.yml

@@ -5,10 +5,6 @@
   - import_role:
       name: lib_utils
 
-  - name: calculate input image
-    command: az image list -g "{{ openshift_azure_input_image_ns }}" --query "[?starts_with(name, '{{ openshift_azure_input_image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
-    register: input_image
-
   - name: create temporary directory
     tempfile:
       state: directory
@@ -51,17 +47,17 @@
       - key: properties.masterProfile.dnsPrefix
         value: "a{{ 16 | lib_utils_oo_random_word }}a"
       - key: properties.masterProfile.imageReference.name
-        value: "{{ (input_image.stdout | from_json).name }}"
+        value: "{{ openshift_azure_input_image_name }}"
       - key: properties.masterProfile.imageReference.resourceGroup
         value: "{{ openshift_azure_input_image_ns }}"
       # agentpool compute
       - key: properties.agentPoolProfiles[0].imageReference.name
-        value: "{{ (input_image.stdout | from_json).name }}"
+        value: "{{ openshift_azure_input_image_name }}"
       - key: properties.agentPoolProfiles[0].imageReference.resourceGroup
         value: "{{ openshift_azure_input_image_ns }}"
       # agentpool infra
       - key: properties.agentPoolProfiles[1].imageReference.name
-        value: "{{ (input_image.stdout | from_json).name }}"
+        value: "{{ openshift_azure_input_image_name }}"
       - key: properties.agentPoolProfiles[1].imageReference.resourceGroup
         value: "{{ openshift_azure_input_image_ns }}"
       # linuxprofile

+ 8 - 3
playbooks/azure/openshift-cluster/provisioning_vars.yml.example

@@ -9,14 +9,19 @@ openshift_azure_resource_location: eastus
 # input image resource group
 openshift_azure_input_image_ns: images
 
-# input image prefix, e.g. centos7-root or centos7-base
+# input image prefix, needed by base and node image building playbooks,
+# e.g. centos7-root or centos7-base
 openshift_azure_input_image_prefix:
 
+# complete name of input image, needed by launch.yml playbook,
+# e.g. centos7-3.10-201806071434
+openshift_azure_input_image_name:
+
 # output image resource group
 openshift_azure_output_image_ns: images
 
-# output image prefix, e.g. centos7-base or centos7
-openshift_azure_output_image_prefix:
+# complete name of output image, e.g. centos7-base-201806071412 or centos7-3.10-201806071434
+openshift_azure_output_image_name:
 
 # ssh public key for VMs created by playbook; private key must be accessible to
 # ansible

+ 9 - 0
playbooks/azure/openshift-cluster/tag_image_as_valid.yml

@@ -0,0 +1,9 @@
+---
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: add valid tag to the image
+    shell: >
+      jsonrtag=$(az resource show -g '{{ openshift_azure_input_image_ns }}' -n '{{ openshift_azure_input_image_name }}' --resource-type 'Microsoft.Compute/images' --query tags);
+      rt=$(echo $jsonrtag | tr -d '"{},' | sed 's/: /=/g');
+      az resource tag --tags $rt 'valid=true' -g '{{ openshift_azure_input_image_ns }}' -n '{{ openshift_azure_input_image_name }}' --resource-type 'Microsoft.Compute/images'

+ 12 - 2
playbooks/azure/openshift-cluster/tasks/create_image_from_vm.yml

@@ -31,9 +31,19 @@
     --source "{{ (vm.stdout | from_json).storageProfile.osDisk.managedDisk.id }}"
     --os-type Linux
 
-- name: calculate tags
+- name: get input image tags
+  command: az image show -g "{{ openshift_azure_input_image_ns }}" -n "{{ (input_image.stdout | from_json).name }}"
+  register: input_image_tags
+
+- name: remove valid tag from input image tags
+  set_fact:
+    input_image_tags_no_valid: "{{ {} | combine({item.key: item.value}) }}"
+  when: item.key not in ['valid']
+  with_dict: "{{ (input_image_tags.stdout | from_json).tags }}"
+
+- name: calculate final tags
   set_fact:
-    final_tags: "{{ (input_image.stdout | from_json).tags | combine(image_tags) }}"
+    final_tags: "{{ input_image_tags_no_valid | combine(image_tags) }}"
 
 - name: tag image
   command: >

+ 0 - 4
playbooks/azure/openshift-cluster/tasks/provision_instance.yml

@@ -20,10 +20,6 @@
     virtual_network: vnet
     address_prefix: 192.168.0.0/24
 
-- name: calculate input image
-  command: az image list -g "{{ image_resource_group }}" --query "[?starts_with(name, '{{ image_prefix }}-') && tags.valid=='true'] | sort_by(@, &name) | [-1]"
-  register: input_image
-
 - name: create vm
   command: >
     az vm create

+ 7 - 0
playbooks/common/openshift-cluster/upgrades/init.yml

@@ -9,6 +9,13 @@
 - import_playbook: ../../../init/base_packages.yml
 - import_playbook: ../../../init/cluster_facts.yml
 
+- name: Ensure essential node configmaps are present
+  hosts: oo_first_master
+  tasks:
+  - import_role:
+      name: openshift_node_group
+      tasks_from: check_for_configs.yml
+
 - name: Ensure firewall is not switched during upgrade
   hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
   vars:

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -57,8 +57,6 @@
   - import_role:
       name: openshift_node
       tasks_from: upgrade.yml
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-compute') }}"
 
   # Run the upgrade hook prior to make the node schedulable again.
   - debug: msg="Running node upgrade hook {{ openshift_node_upgrade_hook }}"

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml

@@ -93,8 +93,6 @@
   - import_role:
       name: openshift_node
       tasks_from: upgrade
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-master') }}"
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 3 - 0
playbooks/container-runtime/private/setup_storage.yml

@@ -19,3 +19,6 @@
       when:
         - container_runtime_docker_storage_type|default('') == "overlay2"
         - openshift_docker_is_node_or_master | bool
+    - import_role:
+        name: container_runtime
+        tasks_from: extra_storage_setup.yml

+ 23 - 0
playbooks/gcp/openshift-cluster/openshift_node_group.yml

@@ -0,0 +1,23 @@
+# This playbook installs onto a provisioned cluster
+---
+- hosts: localhost
+  connection: local
+  tasks:
+  - name: place all scale groups into Ansible groups
+    include_role:
+      name: openshift_gcp
+      tasks_from: setup_scale_group_facts.yml
+    vars:
+      all_nodes: true
+
+- import_playbook: ../../init/main.yml
+  vars:
+    l_init_fact_hosts: "oo_masters_to_config"
+    l_openshift_version_set_hosts: "all:!all"
+    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
+
+- name: Setup node-group configmaps
+  hosts: oo_first_master
+  tasks:
+  - import_role:
+      name: openshift_node_group

+ 4 - 1
playbooks/init/sanity_checks.yml

@@ -3,6 +3,7 @@
 - name: Verify Requirements
   hosts: oo_first_master
   roles:
+  - role: openshift_facts
   - role: lib_utils
   tasks:
   # sanity_checks is a custom action plugin defined in lib_utils.
@@ -13,4 +14,6 @@
   - name: Run variable sanity checks
     sanity_checks:
       check_hosts: "{{ l_sanity_check_hosts | default(groups['oo_all_hosts']) }}"
-    run_once: True
+  # node_group_checks is a custom action plugin defined in lib_utils.
+  - name: Validate openshift_node_groups and openshift_node_group_name
+    node_group_checks: {}

+ 2 - 0
playbooks/openshift-hosted/private/upgrade.yml

@@ -9,6 +9,8 @@
   - import_role:
       name: openshift_hosted
       tasks_from: upgrade_routers.yml
+    when: openshift_hosted_manage_router | default(True) | bool
   - import_role:
       name: openshift_hosted
       tasks_from: upgrade_registry.yml
+    when: openshift_hosted_manage_registry | default(True) | bool

+ 11 - 0
playbooks/openshift-management/private/config-sebool.yml

@@ -0,0 +1,11 @@
+---
+- name: Enable sebool container_manage_cgroup
+  hosts: oo_nodes_to_config
+  gather_facts: false
+  become: yes
+  tasks:
+  - name: Setting sebool container_manage_cgroup
+    seboolean:
+      name: container_manage_cgroup
+      state: yes
+      persistent: yes

+ 2 - 0
playbooks/openshift-management/private/config.yml

@@ -13,6 +13,8 @@
           status: "In Progress"
           start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
+- import_playbook: config-sebool.yml
+
 - name: Setup CFME
   hosts: oo_first_master
   pre_tasks:

+ 12 - 0
playbooks/openshift-master/openshift_node_group.yml

@@ -0,0 +1,12 @@
+---
+- import_playbook: ../init/main.yml
+  vars:
+    l_init_fact_hosts: "oo_masters_to_config"
+    l_openshift_version_set_hosts: "all:!all"
+    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
+
+- name: Setup node-group configmaps
+  hosts: oo_first_master
+  tasks:
+  - import_role:
+      name: openshift_node_group

+ 21 - 0
playbooks/openshift-master/private/additional_config.yml

@@ -40,6 +40,27 @@
   - role: flannel_register
     when: openshift_use_flannel | default(false) | bool
 
+- name: configure vsphere svc account
+  hosts: oo_first_master
+  tasks:
+  - import_role:
+      name: openshift_cloud_provider
+      tasks_from: vsphere-svc
+    when:
+    - openshift_cloudprovider_kind is defined
+    - openshift_cloudprovider_kind == 'vsphere'
+    - openshift_version | version_compare('3.9', '>=')
+
+- name: update vsphere provider master config
+  hosts: oo_masters_to_config
+  tasks:
+  - import_role:
+      name: openshift_cloud_provider
+      tasks_from: update-vsphere
+    when:
+    - openshift_cloudprovider_kind is defined
+    - openshift_cloudprovider_kind == 'vsphere'
+
 - name: Master Additional Install Checkpoint End
   hosts: all
   gather_facts: false

+ 0 - 3
playbooks/openshift-master/private/config.yml

@@ -84,9 +84,6 @@
     import_role:
       name: openshift_node_group
       tasks_from: bootstrap_config
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-master') }}"
-      r_node_dynamic_config_force: True
 
   roles:
   - role: openshift_master_facts

+ 0 - 2
playbooks/openshift-node/private/configure_bootstrap.yml

@@ -12,7 +12,5 @@
     import_role:
       name: openshift_node_group
       tasks_from: bootstrap_config
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-compute') }}"
   - set_fact:
       openshift_is_bootstrapped: True

+ 8 - 3
playbooks/openshift-node/private/restart.yml

@@ -40,7 +40,12 @@
     register: node_output
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_config
-    until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
-    # Give the node two minutes to come back online.
-    retries: 24
+    until:
+    - node_output.results is defined
+    - node_output.results.returncode is defined
+    - node_output.results.results is defined
+    - node_output.results.returncode == 0
+    - node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+    # Give the node three minutes to come back online.
+    retries: 36
     delay: 5

+ 2 - 0
playbooks/openstack/OWNERS

@@ -1,8 +1,10 @@
 # approval == this is a good idea /approve
 approvers:
+  - luis5tb
   - tomassedovic
   - tzumainn
 # review == this code is good /lgtm
 reviewers:
+  - luis5tb
   - tomassedovic
   - tzumainn

+ 18 - 17
playbooks/openstack/configuration.md

@@ -56,15 +56,6 @@ In `inventory/group_vars/all.yml`:
   * `openshift_openstack_lb_hostname` Defaults to `lb`.
   * `openshift_openstack_etcd_hostname` Defaults to `etcd`.
 * `openshift_openstack_external_network_name` OpenStack network providing external connectivity.
-* `openshift_openstack_cluster_node_labels` Custom labels for openshift cluster node groups; currently supports app and infra node groups.
-The default value of this variable sets `region: primary` to app nodes and `region: infra` to infra nodes. An example of setting a customized label:
-
-```
-openshift_openstack_cluster_node_labels:
-  app:
-    mylabel: myvalue
-```
-
 * `openshift_openstack_provision_user_commands` Allows users to execute shell commands via cloud-init for all of the created Nova servers in the Heat stack, before they are available for SSH connections. Note that you should use [custom Ansible playbooks](./post-install.md#run-custom-post-provision-actions) whenever possible. User specified shell commands for cloud-init need to be either strings or lists:
 
 ```
@@ -386,18 +377,28 @@ On the other hand, there is a multi driver support to enable hybrid
 deployments with different pools drivers. In order to enable the kuryr
 `multi-pool` driver support, we need to also tag the nodes with their
 corresponding `pod_vif` labels so that the right kuryr pool driver is used
-for each VM/node. To do that, uncomment:
+for each VM/node.
+
+To do that, set this in `inventory/group_vars/OSEv3.yml`:
 
 ```yaml
 kuryr_openstack_pool_driver: multi
 
-openshift_openstack_cluster_node_labels:
-  app:
-    region: primary
-    pod_vif: nested-vlan
-  infra:
-    region: infra
-    pod_vif: nested-vlan
+openshift_node_groups:
+  - name: node-config-master
+    labels:
+      - 'node-role.kubernetes.io/master=true'
+    edits: []
+  - name: node-config-infra
+    labels:
+      - 'node-role.kubernetes.io/infra=true'
+      - 'pod_vif=nested-vlan'
+    edits: []
+  - name: node-config-compute
+    labels:
+      - 'node-role.kubernetes.io/compute=true'
+      - 'pod_vif=nested-vlan'
+    edits: []
 ```
 
 

+ 4 - 8
playbooks/openstack/inventory.py

@@ -9,7 +9,6 @@ environment.
 
 from __future__ import print_function
 
-from collections import Mapping
 import json
 import os
 
@@ -105,13 +104,8 @@ def _get_hostvars(server, docker_storage_mountpoints):
     if server.metadata['host-type'] == 'cns':
         hostvars['glusterfs_devices'] = ['/dev/nvme0n1']
 
-    node_labels = server.metadata.get('node_labels')
-    # NOTE(shadower): the node_labels value must be a dict not string
-    if not isinstance(node_labels, Mapping):
-        node_labels = json.loads(node_labels)
-
-    if node_labels:
-        hostvars['openshift_node_labels'] = node_labels
+    group_name = server.metadata.get('openshift_node_group_name')
+    hostvars['openshift_node_group_name'] = group_name
 
     # check for attached docker storage volumes
     if 'os-extended-volumes:volumes_attached' in server:
@@ -174,6 +168,8 @@ def build_inventory():
         except KeyError:
             pass  # Internal LB not specified
 
+        inventory['localhost']['openshift_openstack_private_api_ip'] = \
+            stout.get('private_api_ip')
         inventory['localhost']['openshift_openstack_public_api_ip'] = \
             stout.get('public_api_ip')
         inventory['localhost']['openshift_openstack_public_router_ip'] = \

+ 21 - 1
playbooks/openstack/sample-inventory/group_vars/OSEv3.yml

@@ -21,11 +21,31 @@ openshift_master_default_subdomain: "apps.{{ (openshift_openstack_clusterid|trim
 # domain the OpenShift cluster is configured, though.
 openshift_master_cluster_public_hostname: "console.{{ (openshift_openstack_clusterid|trim == '') | ternary(openshift_openstack_public_dns_domain, openshift_openstack_clusterid + '.' + openshift_openstack_public_dns_domain) }}"
 
-osm_default_node_selector: 'region=primary'
 
 openshift_hosted_router_wait: True
 openshift_hosted_registry_wait: True
 
+
+## Kuryr label configuration
+#kuryr_openstack_pool_driver: multi
+#
+#openshift_node_groups:
+#  - name: node-config-master
+#    labels:
+#      - 'node-role.kubernetes.io/master=true'
+#    edits: []
+#  - name: node-config-infra
+#    labels:
+#      - 'node-role.kubernetes.io/infra=true'
+#      - 'pod_vif=nested-vlan'
+#    edits: []
+#  - name: node-config-compute
+#    labels:
+#      - 'node-role.kubernetes.io/compute=true'
+#      - 'pod_vif=nested-vlan'
+#    edits: []
+
+
 ## Openstack credentials
 #openshift_cloudprovider_kind: openstack
 #openshift_cloudprovider_openstack_auth_url: "{{ lookup('env','OS_AUTH_URL') }}"

+ 0 - 13
playbooks/openstack/sample-inventory/group_vars/all.yml

@@ -184,19 +184,6 @@ ansible_user: openshift
 # NOTE: this is for testing only! Your data will be gone once the VM disappears!
 # openshift_openstack_ephemeral_volumes: false
 
-# # OpenShift node labels
-# # - in order to customise node labels for app and/or infra group, set the
-# #   openshift_openstack_cluster_node_labels variable
-# # - to enable the multi-pool driver support at Kuryr the driver for the pod
-#  #  pod vif need to be added as a node label
-#openshift_openstack_cluster_node_labels:
-#  app:
-#    region: primary
-#    pod_vif: nested-vlan
-#  infra:
-#    region: infra
-#    pod_vif: nested-vlan
-
 ## cloud config
 openshift_openstack_disable_root: true
 openshift_openstack_user: openshift

+ 1 - 0
roles/container_runtime/defaults/main.yml

@@ -65,6 +65,7 @@ docker_storage_extra_options:
 - "{{ '--storage-opt overlay2.size=' ~ docker_storage_size if container_runtime_docker_storage_setup_device is defined and container_runtime_docker_storage_setup_device != '' else '' }}"
 - "--graph={{ docker_storage_path}}"
 
+container_runtime_extra_storage: []
 
 # Set local versions of facts that must be in json format for container-daemon.json
 # NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson

+ 17 - 0
roles/container_runtime/tasks/extra_storage_setup.yml

@@ -0,0 +1,17 @@
+---
+- name: Create file system on extra volume device
+  filesystem:
+    fstype: "{{ item.filesystem }}"
+    dev: "{{ item.device }}"
+    force: "{{ item.force|default(omit) }}"
+  with_items: "{{ container_runtime_extra_storage }}"
+
+
+- name: Create mount entry for extra volume
+  mount:
+    path: "{{ item.path }}"
+    src: "{{ item.device }}"
+    fstype: "{{ item.filesystem }}"
+    opts: "{{ item.options|default(omit) }}"
+    state: mounted
+  with_items: "{{ container_runtime_extra_storage }}"

+ 0 - 1
roles/container_runtime/tasks/package_crio.yml

@@ -40,7 +40,6 @@
     crio_pkgs:
       - "cri-o"
       - "cri-tools"
-      - "podman"
 
 - name: Remove CRI-O default configuration files
   file:

+ 2 - 0
roles/kuryr/OWNERS

@@ -1,8 +1,10 @@
 # approval == this is a good idea /approve
 approvers:
+  - luis5tb
   - tomassedovic
   - tzumainn
 # review == this code is good /lgtm
 reviewers:
+  - luis5tb
   - tomassedovic
   - tzumainn

+ 136 - 0
roles/lib_utils/action_plugins/node_group_checks.py

@@ -0,0 +1,136 @@
+"""
+Ansible action plugin to ensure inventory variables are set
+appropriately related to openshift_node_group_name
+"""
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+# Runs on first master
+# Checks each openshift_node_group_name is found in openshift_node_groups
+# Checks that master label is present in one of those groups
+# Checks that node label is present in one of those groups
+
+
+def get_or_fail(group, key):
+    """Find a key in a group dictionary or fail"""
+    res = group.get(key)
+    if res is None:
+        msg = "Each group in openshift_node_groups must have {} key".format(key)
+        raise errors.AnsibleModuleError(msg)
+    return res
+
+
+def validate_labels(labels_found):
+    """Ensure mandatory_labels are found in the labels we found, labels_found"""
+    mandatory_labels = ('node-role.kubernetes.io/master=true',
+                        'node-role.kubernetes.io/infra=true')
+    for item in mandatory_labels:
+        if item not in labels_found:
+            msg = ("At least one group in openshift_node_groups requires the"
+                   " {} label").format(item)
+            raise errors.AnsibleModuleError(msg)
+
+
+def process_group(group, groups_found, labels_found):
+    """Validate format of each group in openshift_node_groups"""
+    name = get_or_fail(group, 'name')
+    if name in groups_found:
+        msg = ("Duplicate definition of group {} in"
+               " openshift_node_groups").format(name)
+        raise errors.AnsibleModuleError(msg)
+    groups_found.add(name)
+    labels = get_or_fail(group, 'labels')
+    if not issubclass(type(labels), list):
+        msg = "labels value of each group in openshift_node_groups must be a list"
+        raise errors.AnsibleModuleError(msg)
+    labels_found.update(labels)
+
+
+class ActionModule(ActionBase):
+    """Action plugin to execute node_group_checks."""
+    def template_var(self, hostvars, host, varname):
+        """Retrieve a variable from hostvars and template it.
+           If undefined, return None type."""
+        # We will set the current host and variable checked for easy debugging
+        # if there are any unhandled exceptions.
+        # pylint: disable=W0201
+        self.last_checked_var = varname
+        # pylint: disable=W0201
+        self.last_checked_host = host
+        res = hostvars[host].get(varname)
+        if res is None:
+            return None
+        return self._templar.template(res)
+
+    def get_node_group_name(self, hostvars, host):
+        """Ensure openshift_node_group_name is defined for nodes"""
+        group_name = self.template_var(hostvars, host, 'openshift_node_group_name')
+        if not group_name:
+            msg = "openshift_node_group_name must be defined for all nodes"
+            raise errors.AnsibleModuleError(msg)
+        return group_name
+
+    def run_check(self, hostvars, host, groups_found):
+        """Run the check for each host"""
+        group_name = self.get_node_group_name(hostvars, host)
+        if group_name not in groups_found:
+            msg = "Group: {} not found in openshift_node_groups".format(group_name)
+            raise errors.AnsibleModuleError(msg)
+
+    def run(self, tmp=None, task_vars=None):
+        """Run node_group_checks action plugin"""
+        result = super(ActionModule, self).run(tmp, task_vars)
+        result["changed"] = False
+        result["failed"] = False
+        result["msg"] = "Node group checks passed"
+        # self.task_vars holds all in-scope variables.
+        # Ignore settting self.task_vars outside of init.
+        # pylint: disable=W0201
+        self.task_vars = task_vars or {}
+
+        # pylint: disable=W0201
+        self.last_checked_host = "none"
+        # pylint: disable=W0201
+        self.last_checked_var = "none"
+
+        # check_hosts is hard-set to oo_nodes_to_config
+        check_hosts = self.task_vars['groups'].get('oo_nodes_to_config')
+        if not check_hosts:
+            result["msg"] = "skipping; oo_nodes_to_config is required for this check"
+            return result
+
+        # We need to access each host's variables
+        hostvars = self.task_vars.get('hostvars')
+        if not hostvars:
+            msg = hostvars
+            raise errors.AnsibleModuleError(msg)
+
+        openshift_node_groups = self.task_vars.get('openshift_node_groups')
+        if not openshift_node_groups:
+            msg = "openshift_node_groups undefined"
+            raise errors.AnsibleModuleError(msg)
+
+        openshift_node_groups = self._templar.template(openshift_node_groups)
+        groups_found = set()
+        labels_found = set()
+        # gather the groups and labels we believe should be present.
+        for group in openshift_node_groups:
+            process_group(group, groups_found, labels_found)
+
+        if len(groups_found) == 0:
+            msg = "No groups found in openshift_node_groups"
+            raise errors.AnsibleModuleError(msg)
+
+        validate_labels(labels_found)
+
+        # We loop through each host in the provided list check_hosts
+        for host in check_hosts:
+            try:
+                self.run_check(hostvars, host, groups_found)
+            except Exception as uncaught_e:
+                msg = "last_checked_host: {}, last_checked_var: {};"
+                msg = msg.format(self.last_checked_host, self.last_checked_var)
+                msg += str(uncaught_e)
+                raise errors.AnsibleModuleError(msg)
+
+        return result

+ 4 - 2
roles/lib_utils/action_plugins/sanity_checks.py

@@ -69,8 +69,10 @@ REMOVED_VARIABLES = (
     ('openshift_prometheus_image_version', 'openshift_prometheus_image'),
     ('openshift_prometheus_proxy_image_prefix', 'openshift_prometheus_proxy_image'),
     ('openshift_prometheus_proxy_image_version', 'openshift_prometheus_proxy_image'),
-    ('openshift_prometheus_altermanager_image_prefix', 'openshift_prometheus_altermanager_image'),
-    ('openshift_prometheus_altermanager_image_version', 'openshift_prometheus_altermanager_image'),
+    ('openshift_prometheus_altermanager_image_prefix', 'openshift_prometheus_alertmanager_image'),
+    # A typo was introduced at some point, need to warn for this older version.
+    ('openshift_prometheus_altermanager_image_prefix', 'openshift_prometheus_alertmanager_image'),
+    ('openshift_prometheus_alertmanager_image_version', 'openshift_prometheus_alertmanager_image'),
     ('openshift_prometheus_alertbuffer_image_prefix', 'openshift_prometheus_alertbuffer_image'),
     ('openshift_prometheus_alertbuffer_image_version', 'openshift_prometheus_alertbuffer_image'),
     ('openshift_prometheus_node_exporter_image_prefix', 'openshift_prometheus_node_exporter_image'),

+ 10 - 5
roles/openshift_aws/defaults/main.yml

@@ -121,21 +121,21 @@ openshift_aws_elb_dict:
 openshift_aws_node_group_config_master_volumes:
 - device_name: /dev/sda1
   volume_size: 100
-  device_type: gp2
+  volume_type: gp2
   delete_on_termination: False
 - device_name: /dev/sdb
   volume_size: 100
-  device_type: gp2
+  volume_type: gp2
   delete_on_termination: False
 
 openshift_aws_node_group_config_node_volumes:
 - device_name: /dev/sda1
   volume_size: 100
-  device_type: gp2
+  volume_type: gp2
   delete_on_termination: True
 - device_name: /dev/sdb
   volume_size: 100
-  device_type: gp2
+  volume_type: gp2
   delete_on_termination: True
 
 # build_instance_tags is a custom filter in role lib_utils
@@ -154,7 +154,7 @@ openshift_aws_master_group:
   group: master
   tags:
     host-type: master
-    sub-host-type: default
+    sub-host-type: master
     runtime: docker
 
 openshift_aws_node_groups:
@@ -172,6 +172,11 @@ openshift_aws_node_groups:
     sub-host-type: infra
     runtime: docker
 
+openshift_aws_node_group_mappings:
+  master: 'node-config-master'
+  compute: 'node-config-compute'
+  infra: 'node-config-infra'
+
 openshift_aws_created_asgs: []
 openshift_aws_current_asgs: []
 

+ 1 - 0
roles/openshift_aws/tasks/setup_master_group.yml

@@ -26,6 +26,7 @@
     groups: "{{ openshift_aws_masters_groups }}"
     name: "{{ item.public_dns_name }}"
     hostname: "{{ openshift_aws_clusterid }}-master-{{ item.instance_id[:-5] }}"
+    openshift_node_group_name: "{{ openshift_aws_node_group_mappings[item.tags['sub-host-type']] }}"
   with_items: "{{ instancesout.instances }}"
 
 - name: wait for ssh to become available

+ 1 - 0
roles/openshift_aws/tasks/setup_scale_group_facts.yml

@@ -46,4 +46,5 @@
     ansible_ssh_host: "{{ item.public_dns_name }}"
     name: "{{ item.public_dns_name }}"
     hostname: "{{ item.public_dns_name }}"
+    openshift_node_group_name: "{{ openshift_aws_node_group_mappings[item.tags['sub-host-type']] }}"
   with_items: "{{ qinstances.instances }}"

+ 0 - 10
roles/openshift_cloud_provider/tasks/vsphere.yml

@@ -9,13 +9,3 @@
   - openshift_cloudprovider_vsphere_host is defined
   - openshift_cloudprovider_vsphere_datacenter is defined
   - openshift_cloudprovider_vsphere_datastore is defined
-
-- name: Configure vsphere svc account
-  include_tasks: vsphere-svc.yml
-  when:
-  - openshift_version | version_compare('3.9', '>=')
-  - inventory_hostname == openshift_master_hosts[0]
-
-- name: Modify controller args
-  include_tasks: update-vsphere.yml
-  notify: restart master

+ 1 - 1
roles/openshift_cluster_monitoring_operator/tasks/install.yaml

@@ -34,7 +34,7 @@
 
 - name: Apply the cluster monitoring operator template
   shell: >
-    {{ openshift_client_binary }} process -f "{{ mktemp.stdout 	}}/{{ item }}"
+    {{ openshift_client_binary }} process -n openshift-monitoring -f "{{ mktemp.stdout 	}}/{{ item }}"
     --param OPERATOR_IMAGE="{{ openshift_cluster_monitoring_operator_image }}"
     --param PROMETHEUS_OPERATOR_IMAGE="{{ openshift_cluster_monitoring_operator_prometheus_operator_repo }}"
     --param ALERTMANAGER_IMAGE="{{ openshift_cluster_monitoring_operator_alertmanager_repo }}"

+ 0 - 1
roles/openshift_control_plane/defaults/main.yml

@@ -134,7 +134,6 @@ openshift_master_auth_token_max_seconds: 500
 # oo_htpasswd_users_from_file is a custom filter in role lib_utils
 l_osm_htpasswd_users_none: {}
 openshift_master_htpasswd_users: "{{ lookup('file', openshift_master_htpasswd_file) | oo_htpasswd_users_from_file if openshift_master_htpasswd_file is defined else l_osm_htpasswd_users_none }}"
-openshift_master_manage_htpasswd: True
 l_osm_request_header_none: {}
 openshift_master_request_header_ca: "{{ lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else l_osm_request_header_none }}"
 openshift_master_oauth_grant_method: auto

+ 15 - 0
roles/openshift_facts/defaults/main.yml

@@ -116,3 +116,18 @@ openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_typ
 openshift_master_api_port: "8443"
 openshift_ca_host: "{{ groups.oo_first_master.0 }}"
 openshift_use_openshift_sdn: true
+
+openshift_node_groups:
+  - name: node-config-master
+    labels:
+      - 'node-role.kubernetes.io/master=true'
+    edits: []
+  - name: node-config-infra
+    labels:
+      - 'node-role.kubernetes.io/infra=true'
+    edits: []
+  - name: node-config-compute
+    labels:
+      - 'node-role.kubernetes.io/compute=true'
+    edits: []
+openshift_master_manage_htpasswd: True

+ 5 - 0
roles/openshift_gcp/defaults/main.yml

@@ -60,3 +60,8 @@ openshift_gcp_startup_script_file: "{{ role_path }}/files/bootstrap-script.sh"
 openshift_gcp_user_data_file: ''
 
 openshift_gcp_multizone: False
+
+openshift_gcp_node_group_mapping:
+  masters: 'node-config-master'
+  infra: 'node-config-infra'
+  compute: 'node-config-compute'

+ 25 - 25
roles/openshift_gcp/tasks/setup_scale_group_facts.yml

@@ -1,11 +1,26 @@
 ---
-- name: Add masters to requisite groups
+- name: Add node instances to node group
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: masters, etcd
-    openshift_node_labels:
-      node-role.kubernetes.io/master: "true"
-  with_items: "{{ groups['tag_ocp-master'] }}"
+    groups: nodes, new_nodes
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['compute'] }}"
+  with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add bootstrap node instances
+  add_host:
+    name: "{{ hostvars[item].gce_name }}"
+    groups: bootstrap_nodes
+    openshift_is_bootstrapped: True
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['compute'] }}"
+  with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add bootstrap node instances as nodes
+  add_host:
+    name: "{{ item }}"
+    groups: nodes, new_nodes
+    openshift_is_bootstrapped: True
+  with_items: "{{ groups['tag_ocp-bootstrap'] | default([]) }}"
+  when: all_nodes | default(False)
 
 - name: Add a master to the primary masters group
   add_host:
@@ -23,27 +38,12 @@
   add_host:
     name: "{{ hostvars[item].gce_name }}"
     groups: nodes, new_nodes
-    openshift_node_labels:
-      node-role.kubernetes.io/infra: "true"
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['infra'] }}"
   with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
 
-- name: Add node instances to node group
-  add_host:
-    name: "{{ hostvars[item].gce_name }}"
-    groups: nodes, new_nodes
-  with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
-
-- name: Add bootstrap node instances
+- name: Add masters to requisite groups
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: bootstrap_nodes
-    openshift_is_bootstrapped: True
-  with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
-
-- name: Add bootstrap node instances as nodes
-  add_host:
-    name: "{{ item }}"
-    groups: nodes, new_nodes
-    openshift_is_bootstrapped: True
-  with_items: "{{ groups['tag_ocp-bootstrap'] | default([]) }}"
-  when: all_nodes | default(False)
+    groups: masters, etcd
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['masters'] }}"
+  with_items: "{{ groups['tag_ocp-master'] }}"

+ 0 - 13
roles/openshift_hosted_templates/files/v3.10/enterprise/registry-console.yaml

@@ -78,19 +78,6 @@ objects:
           targetPort: 9090
       selector:
         name: "registry-console"
-  - kind: ImageStream
-    apiVersion: v1
-    metadata:
-      name: registry-console
-      annotations:
-        description: Atomic Registry console
-    spec:
-      tags:
-        - annotations: null
-          from:
-            kind: DockerImage
-            name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
-          name: ${IMAGE_VERSION}
   - kind: OAuthClient
     apiVersion: v1
     metadata:

+ 0 - 13
roles/openshift_hosted_templates/files/v3.10/origin/registry-console.yaml

@@ -78,19 +78,6 @@ objects:
           targetPort: 9090
       selector:
         name: "registry-console"
-  - kind: ImageStream
-    apiVersion: v1
-    metadata:
-      name: registry-console
-      annotations:
-        description: Atomic Registry console
-    spec:
-      tags:
-        - annotations: null
-          from:
-            kind: DockerImage
-            name: ${IMAGE_PREFIX}${IMAGE_BASENAME}:${IMAGE_VERSION}
-          name: ${IMAGE_VERSION}
   - kind: OAuthClient
     apiVersion: v1
     metadata:

+ 0 - 22
roles/openshift_logging_fluentd/tasks/label_and_wait.yaml

@@ -1,22 +0,0 @@
----
-# This script is a special case because we need to pause between nodes while
-# labeling to avoid overloading the scheduler.
-# Also, looping over this task file (label_and_wait) causes memory to balloon
-# in some instances due to dynamic include bug in ansible, so we can't add the
-# wait in ansible directly.
-- name: Create temporary fluentd labeling script
-  template:
-    src: fluentd_label.sh.j2
-    dest: /tmp/fluentd_label.temp.sh
-    mode: "0744"
-
-- name: Execute the fluentd temporary labeling script
-  command: "/tmp/fluentd_label.temp.sh {{ fluentd_host }}"
-  with_items: "{{ openshift_logging_fluentd_hosts }}"
-  loop_control:
-    loop_var: fluentd_host
-
-- name: Remove temporary fluentd labeling script
-  file:
-    path: /tmp/fluentd_label.temp.sh
-    state: absent

+ 11 - 1
roles/openshift_logging_fluentd/tasks/main.yaml

@@ -199,7 +199,17 @@
     openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
   when: "'--all' in openshift_logging_fluentd_hosts"
 
-- import_tasks: label_and_wait.yaml
+# We need to pause between nodes while labeling to avoid overloading the scheduler.
+- name: Label OCP nodes for Fluentd
+  shell: >
+    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig \
+    label --overwrite node {{ fluentd_host }} \
+    {% for k, v in openshift_logging_fluentd_nodeselector.items() %} {{ k }}={{ v }} {% endfor %} \
+    && \
+    sleep {{ openshift_logging_fluentd_label_delay }}
+  with_items: "{{ openshift_logging_fluentd_hosts }}"
+  loop_control:
+    loop_var: fluentd_host
 
 - name: Delete temp directory
   file:

+ 0 - 8
roles/openshift_logging_fluentd/templates/fluentd_label.sh.j2

@@ -1,8 +0,0 @@
-#!/bin/bash
-
-{{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig \
-  label --overwrite node $1 \
-  {% for k, v in openshift_logging_fluentd_nodeselector.items() %} {{ k }}={{ v }} {% endfor %}
-
-# We sleep here because scheduler will get overwhelmed if we label all nodes at once.
-sleep {{ openshift_logging_fluentd_label_delay }}

+ 0 - 16
roles/openshift_manage_node/tasks/config.yml

@@ -9,19 +9,3 @@
   until: node_schedulable is succeeded
   when: "'nodename' in openshift.node"
   delegate_to: "{{ openshift_master_host }}"
-
-- name: Label nodes
-  oc_label:
-    name: "{{ openshift.node.nodename }}"
-    kind: node
-    state: add
-    labels: "{{ l_all_labels | lib_utils_oo_dict_to_list_of_dict }}"
-    namespace: default
-  when:
-    - "'nodename' in openshift.node"
-    - l_all_labels != {}
-  delegate_to: "{{ openshift_master_host }}"
-  vars:
-    l_node_labels: "{{ openshift_node_labels | default({}) }}"
-    l_master_labels: "{{ openshift_manage_node_is_master | ternary(openshift_master_node_labels, {}) }}"
-    l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}"

+ 1 - 1
roles/openshift_monitor_availability/tasks/install_monitor_app_create.yaml

@@ -1,7 +1,7 @@
 ---
 - name: Apply the app template
   shell: >
-    {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/monitor-app-create.yaml"
+    {{ openshift_client_binary }} process -n openshift-monitor-availability -f "{{ mktemp.stdout }}/monitor-app-create.yaml"
     --param IMAGE="{{ openshift_monitor_app_create_image }}"
     --param RUN_INTERVAL="{{ openshift_monitor_app_create_run_interval }}"
     --param TIMEOUT="{{ openshift_monitor_app_create_timeout }}"

+ 6 - 1
roles/openshift_node/tasks/upgrade.yml

@@ -66,7 +66,12 @@
     name: "{{ openshift.node.nodename | lower }}"
   register: node_output
   delegate_to: "{{ groups.oo_first_master.0 }}"
-  until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
+  until:
+  - node_output.results is defined
+  - node_output.results.returncode is defined
+  - node_output.results.results is defined
+  - node_output.results.returncode == 0
+  - node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True
   # Give the node three minutes to come back online.
   retries: 36
   delay: 5

+ 2 - 19
roles/openshift_node/tasks/upgrade/bootstrap_changes.yml

@@ -94,25 +94,8 @@
     path: "{{ openshift.common.config_base }}/node/certificates"
     state: absent
 
-- name: Determine if node already has a dynamic config group
-  command: grep -E '^BOOTSTRAP_CONFIG_NAME=.+' "/etc/sysconfig/{{ openshift_service_type }}-node"
-  ignore_errors: true
-  register: existing
-
-- name: Update the sysconfig to group "{{ r_node_dynamic_config_name }}"
+- name: Update the sysconfig to group "{{ openshift_node_group_name }}"
   lineinfile:
     dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
-    line: "BOOTSTRAP_CONFIG_NAME={{ r_node_dynamic_config_name }}"
+    line: "BOOTSTRAP_CONFIG_NAME={{ openshift_node_group_name }}"
     regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
-  when: r_node_dynamic_config_force|default(False) or existing is failed
-
-- name: Set up node-config.yml if dynamic configuration is off
-  copy:
-    remote_src: true
-    src: "{{ openshift.common.config_base }}/node/bootstrap-node-config.yaml"
-    dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
-    force: no
-    owner: root
-    group: root
-    mode: 0600
-  when: r_node_dynamic_config_name|length == 0

+ 1 - 1
roles/openshift_node/tasks/upgrade/restart.yml

@@ -34,7 +34,7 @@
   service:
     name: "{{ openshift_service_type }}-node"
     state: started
-  async: 1
+  async: 100
   poll: 0
   register: node_service
   failed_when: false

+ 3 - 16
roles/openshift_node_group/defaults/main.yml

@@ -1,20 +1,4 @@
 ---
-openshift_node_groups:
-- name: node-config-master
-  labels:
-  - 'node-role.kubernetes.io/master=true'
-  edits: []
-- name: node-config-infra
-  labels:
-  - 'node-role.kubernetes.io/infra=true'
-  edits: []
-- name: node-config-compute
-  labels:
-  - 'node-role.kubernetes.io/compute=true'
-  edits: []
-
-openshift_node_group_namespace: openshift-node
-
 openshift_use_crio: False
 l_crio_var_sock: "/var/run/crio/crio.sock"
 
@@ -27,3 +11,6 @@ openshift_node_group_node_data_dir: "{{ openshift_node_group_node_data_dir_defau
 openshift_imageconfig_format: "{{ oreg_url | default(l_osm_registry_url_default) }}"
 
 openshift_node_group_use_persistentlocalvolumes: "{{ openshift_persistentlocalstorage_enabled | default(False) | bool }}"
+openshift_node_group_name: 'node-config-compute'
+
+l_openshift_node_group_labels: []

+ 2 - 3
roles/openshift_node_group/files/sync.yaml

@@ -117,9 +117,8 @@ spec:
               # TODO: kubelet doesn't relabel nodes, best effort for now
               # https://github.com/kubernetes/kubernetes/issues/59314
               if args="$(openshift start node --write-flags --config /etc/origin/node/node-config.yaml)"; then
-                labels=' --node-labels=([^ ]+) '
-                if [[ ${args} =~ ${labels} ]]; then
-                  labels="${BASH_REMATCH[1]//,/ }"
+                labels=$(tr ' ' '\n' <<<$args | sed -ne '/^--node-labels=/ { s/^--node-labels=//; p; }' | tr ',\n' ' ')
+                if [[ -n "${labels}" ]]; then
                   echo "info: Applying node labels $labels" 2>&1
                   if ! oc label --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" ${labels} --overwrite; then
                     echo "error: Unable to apply labels, will retry in 10" 2>&1

+ 0 - 1
roles/openshift_node_group/tasks/bootstrap.yml

@@ -4,7 +4,6 @@
     src: node-config.yaml.j2
     dest: "/etc/origin/node/bootstrap-node-config.yaml"
     mode: 0600
-
 # Make sure a single master has node-config so that SDN and sync daemonsets requires it
 - name: remove existing node config
   file:

+ 2 - 7
roles/openshift_node_group/tasks/bootstrap_config.yml

@@ -9,13 +9,8 @@
   with_items:
   - /etc/origin/node/pods
   - /etc/origin/node/certificates
-- name: Determine if node already has a dynamic config group
-  command: grep -E '^BOOTSTRAP_CONFIG_NAME=.+' "/etc/sysconfig/{{ openshift_service_type }}-node"
-  ignore_errors: true
-  register: existing
-- name: Update the sysconfig to group "{{ r_node_dynamic_config_name }}"
+- name: Update the sysconfig to group "{{ openshift_node_group_name }}"
   lineinfile:
     dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
-    line: "BOOTSTRAP_CONFIG_NAME={{ r_node_dynamic_config_name }}"
+    line: "BOOTSTRAP_CONFIG_NAME={{ openshift_node_group_name }}"
     regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
-  when: r_node_dynamic_config_force|default(False) or existing is failed

+ 7 - 0
roles/openshift_node_group/tasks/check_for_config.yml

@@ -0,0 +1,7 @@
+---
+# This file is looped over, must use include_tasks, not import_tasks
+- include_tasks: fetch_config.yml
+
+- fail:
+    msg: "Configmap for {{ l_openshift_node_group_name }} must be present"
+  when: configout.results.results.0 == {}

+ 7 - 0
roles/openshift_node_group/tasks/check_for_configs.yml

@@ -0,0 +1,7 @@
+---
+- include_tasks: check_for_config.yml
+  vars:
+    l_openshift_node_group_name: "{{ node_group.name }}"
+  with_items: "{{ openshift_node_groups }}"
+  loop_control:
+    loop_var: node_group

+ 94 - 103
roles/openshift_node_group/tasks/create_config.yml

@@ -1,117 +1,108 @@
 ---
-- name: fetch node configmap
-  oc_configmap:
-    name: "{{ openshift_node_group_name }}"
-    namespace: "{{ openshift_node_group_namespace }}"
-    state: list
-  register: configout
-  run_once: true
+# This file is looped over, must use include_tasks, not import_tasks
+- include_tasks: fetch_config.yml
 
-- name: debug node config
-  debug:
-    var: configout
-  run_once: true
 
-- when:
-  - configout.results.results.0 == {} or (configout.results.results.0 != {} and (openshift_node_group_edits|length > 0 or openshift_node_group_labels|length > 0))
-  block:
-  - name: create a temp dir for this work
-    command: mktemp -d /tmp/openshift_node_config-XXXXXX
-    register: mktempout
-    run_once: true
+- name: create a temp dir for this work
+  command: mktemp -d /tmp/openshift_node_config-XXXXXX
+  register: mktempout
+  run_once: true
 
-  - name: create node config template
-    template:
-      src: node-config.yaml.j2
-      dest: "{{ mktempout.stdout }}/node-config.yaml"
-    when:
-    - configout.results.results.0 == {}
-    run_once: true
+- name: create node config template
+  template:
+    src: node-config.yaml.j2
+    dest: "{{ mktempout.stdout }}/node-config.yaml"
+  when:
+  - configout.results.results.0 == {}
+  run_once: true
 
-  - name: lay down the config from the existing configmap
-    copy:
-      content: "{{ configout.results.results.0.data['node-config.yaml'] }}"
-      dest: "{{ mktempout.stdout }}/node-config.yaml"
-    when:
-    - configout.results.results.0 != {}
-    run_once: true
+- name: lay down the config from the existing configmap
+  copy:
+    content: "{{ configout.results.results.0.data['node-config.yaml'] }}"
+    dest: "{{ mktempout.stdout }}/node-config.yaml"
+  when:
+  - configout.results.results.0 != {}
+  run_once: true
 
-  - name: "specialize the generated configs for {{ openshift_node_group_name }}"
-    yedit:
-      content:
-      src: "{{ mktempout.stdout }}/node-config.yaml"
-      edits: "{{ openshift_node_group_edits | union(openshift_node_labels_edit) }}"
-    register: yeditout
-    run_once: true
+- name: "specialize the generated configs for {{ l_openshift_node_group_name }}"
+  yedit:
+    content:
+    src: "{{ mktempout.stdout }}/node-config.yaml"
+    edits: "{{ l_openshift_node_group_all_edits }}"
+  register: yeditout
+  run_once: true
+  vars:
+    l_openshift_node_group_all_edits: "{{ l_openshift_node_group_edits | union(openshift_node_labels_edit) }}"
+  when: l_openshift_node_group_all_edits != []
 
-  - name: show the yeditout debug var
-    debug:
-      var: yeditout
-    run_once: true
+- name: show the yeditout debug var
+  debug:
+    var: yeditout
+  run_once: true
 
-  - name: create volume config template
-    template:
-      src: volume-config.yaml.j2
-      dest: "{{ mktempout.stdout }}/volume-config.yaml"
-    when:
-    - "'data' not in configout['results']['results'][0] or 'volume-config.yaml' not in configout['results']['results'][0]['data']"
-    - openshift_node_group_name != ""
-    - openshift_node_local_quota_per_fsgroup is defined
-    - openshift_node_local_quota_per_fsgroup != ""
-    run_once: true
+- name: create volume config template
+  template:
+    src: volume-config.yaml.j2
+    dest: "{{ mktempout.stdout }}/volume-config.yaml"
+  when:
+  - "'data' not in configout['results']['results'][0] or 'volume-config.yaml' not in configout['results']['results'][0]['data']"
+  - l_openshift_node_group_name != ""
+  - openshift_node_local_quota_per_fsgroup is defined
+  - openshift_node_local_quota_per_fsgroup != ""
+  run_once: true
 
-  - name: lay down the volume config from the existing configmap
-    copy:
-      content: "{{ configout.results.results.0.data['volume-config.yaml'] }}"
-      dest: "{{ mktempout.stdout }}/volume-config.yaml"
-    when:
-    - "'data' in configout['results']['results'][0]"
-    - "'volume-config.yaml' in configout['results']['results'][0]['data']"
-    - openshift_node_group_name != ""
-    - openshift_node_local_quota_per_fsgroup is defined
-    - openshift_node_local_quota_per_fsgroup != ""
-    run_once: true
+- name: lay down the volume config from the existing configmap
+  copy:
+    content: "{{ configout.results.results.0.data['volume-config.yaml'] }}"
+    dest: "{{ mktempout.stdout }}/volume-config.yaml"
+  when:
+  - "'data' in configout['results']['results'][0]"
+  - "'volume-config.yaml' in configout['results']['results'][0]['data']"
+  - l_openshift_node_group_name != ""
+  - openshift_node_local_quota_per_fsgroup is defined
+  - openshift_node_local_quota_per_fsgroup != ""
+  run_once: true
 
-  - name: "specialize the volume config for {{ openshift_node_group_name }}"
-    yedit:
-      content:
-      src: "{{ mktempout.stdout }}/volume-config.yaml"
-      key: localQuota.perFSGroup
-      value: "{{ openshift_node_local_quota_per_fsgroup }}"
-    register: volume_yeditout
-    when:
-    - openshift_node_local_quota_per_fsgroup is defined
-    - openshift_node_local_quota_per_fsgroup != ""
-    run_once: true
+- name: "specialize the volume config for {{ l_openshift_node_group_name }}"
+  yedit:
+    content:
+    src: "{{ mktempout.stdout }}/volume-config.yaml"
+    key: localQuota.perFSGroup
+    value: "{{ openshift_node_local_quota_per_fsgroup }}"
+  register: volume_yeditout
+  when:
+  - openshift_node_local_quota_per_fsgroup is defined
+  - openshift_node_local_quota_per_fsgroup != ""
+  run_once: true
 
-  - name: show the volume_yeditout debug var
-    debug:
-      var: volume_yeditout
-    run_once: true
+- name: show the volume_yeditout debug var
+  debug:
+    var: volume_yeditout
+  run_once: true
 
-  - name: create node-config.yaml configmap
-    oc_configmap:
-      name: "{{ openshift_node_group_name }}"
-      namespace: "{{ openshift_node_group_namespace }}"
-      from_file:
-        node-config.yaml: "{{ mktempout.stdout }}/node-config.yaml"
-    when:
-    - openshift_node_local_quota_per_fsgroup is undefined or openshift_node_local_quota_per_fsgroup == ""
-    run_once: true
+- name: create node-config.yaml configmap
+  oc_configmap:
+    name: "{{ l_openshift_node_group_name }}"
+    namespace: "{{ openshift_node_group_namespace }}"
+    from_file:
+      node-config.yaml: "{{ mktempout.stdout }}/node-config.yaml"
+  when:
+  - openshift_node_local_quota_per_fsgroup is undefined or openshift_node_local_quota_per_fsgroup == ""
+  run_once: true
 
-  - name: create node-config.yaml and volume-config.yaml configmap
-    oc_configmap:
-      name: "{{ openshift_node_group_name }}"
-      namespace: "{{ openshift_node_group_namespace }}"
-      from_file:
-        node-config.yaml: "{{ mktempout.stdout }}/node-config.yaml"
-        volume-config.yaml: "{{ mktempout.stdout }}/volume-config.yaml"
-    when:
-    - openshift_node_local_quota_per_fsgroup is defined and openshift_node_local_quota_per_fsgroup != ""
-    run_once: true
+- name: create node-config.yaml and volume-config.yaml configmap
+  oc_configmap:
+    name: "{{ l_openshift_node_group_name }}"
+    namespace: "{{ openshift_node_group_namespace }}"
+    from_file:
+      node-config.yaml: "{{ mktempout.stdout }}/node-config.yaml"
+      volume-config.yaml: "{{ mktempout.stdout }}/volume-config.yaml"
+  when:
+  - openshift_node_local_quota_per_fsgroup is defined and openshift_node_local_quota_per_fsgroup != ""
+  run_once: true
 
-  - name: remove templated files
-    file:
-      dest: "{{ mktempout.stdout }}/"
-      state: absent
-    run_once: true
+- name: remove templated files
+  file:
+    dest: "{{ mktempout.stdout }}/"
+    state: absent
+  run_once: true

+ 14 - 0
roles/openshift_node_group/tasks/fetch_config.yml

@@ -0,0 +1,14 @@
+---
+# This file is looped over, must use include_tasks, not import_tasks
+- name: fetch node configmap
+  oc_configmap:
+    name: "{{ l_openshift_node_group_name }}"
+    namespace: "{{ openshift_node_group_namespace }}"
+    state: list
+  register: configout
+  run_once: true
+
+- name: debug node config
+  debug:
+    var: configout
+  run_once: true

+ 3 - 3
roles/openshift_node_group/tasks/main.yml

@@ -2,9 +2,9 @@
 - name: Build node config maps
   include_tasks: create_config.yml
   vars:
-    openshift_node_group_name: "{{ node_group.name }}"
-    openshift_node_group_edits: "{{ node_group.edits | default([]) }}"
-    openshift_node_group_labels: "{{ node_group.labels | default([]) }}"
+    l_openshift_node_group_name: "{{ node_group.name }}"
+    l_openshift_node_group_edits: "{{ node_group.edits | default([]) }}"
+    l_openshift_node_group_labels: "{{ node_group.labels }}"
   with_items: "{{ openshift_node_groups }}"
   loop_control:
     loop_var: node_group

+ 3 - 3
roles/openshift_node_group/tasks/upgrade.yml

@@ -2,8 +2,8 @@
 - name: Ensure all node groups have bootstrap settings
   include_tasks: create_config.yml
   vars:
-    openshift_node_group_name: "{{ node_group.name }}"
-    openshift_node_group_edits:
+    l_openshift_node_group_name: "{{ node_group.name }}"
+    l_openshift_node_group_edits:
     - key: servingInfo.certFile
       value: ""
     - key: servingInfo.keyFile
@@ -27,7 +27,7 @@
       - /etc/origin/node/certificates
     - key: masterKubeConfig
       value: node.kubeconfig
-    openshift_node_group_labels: "{{ node_group.labels | default([]) }}"
+    l_openshift_node_group_labels: "{{ node_group.labels }}"
   with_items: "{{ openshift_node_groups }}"
   loop_control:
     loop_var: node_group

+ 4 - 1
roles/openshift_node_group/templates/node-config.yaml.j2

@@ -50,7 +50,10 @@ kubeletArguments:
   cloud-provider:
   - {{ openshift_node_group_cloud_provider }}
 {% endif %}
-  node-labels: []
+{% if l_openshift_node_group_labels != [] %}
+  node-labels:
+  - < this is always modified via yedit: openshift_node_labels_edit >
+{% endif %}
   enable-controller-attach-detach:
   - 'true'
 masterClientConnectionOverrides:

+ 3 - 3
roles/openshift_node_group/vars/main.yml

@@ -1,8 +1,8 @@
 ---
 # These values should never be passed in, they are needed
-openshift_node_group_edits: []
-openshift_node_group_labels: []
 openshift_node_labels_edit:
 - key: kubeletArguments.node-labels
   value:
-  - "{{ openshift_node_group_labels | join(',') }}"
+  - "{{ l_openshift_node_group_labels | join(',') }}"
+
+openshift_node_group_namespace: openshift-node

+ 0 - 6
roles/openshift_openstack/defaults/main.yml

@@ -16,12 +16,6 @@ openshift_openstack_use_lbaas_load_balancer: false
 openshift_openstack_lbaasv2_provider: Octavia
 openshift_openstack_use_vm_load_balancer: false
 
-openshift_openstack_cluster_node_labels:
-  app:
-    region: primary
-  infra:
-    region: infra
-
 openshift_openstack_install_debug_packages: false
 openshift_openstack_required_packages:
   - NetworkManager

+ 4 - 10
roles/openshift_openstack/tasks/generate-dns.yml

@@ -10,19 +10,12 @@
   with_items: "{{ groups['infra_hosts'] }}"
   when: openshift_openstack_public_router_ip is defined
 
-- name: "Add public master cluster hostname records to the private A records (single master)"
+- name: "Add public master cluster hostname records to the private A records"
   set_fact:
-    private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[groups.masters[0]].openshift_master_cluster_public_hostname, 'ip': hostvars[groups.masters[0]].private_v4 } ] }}"
+    private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[groups.masters[0]].openshift_master_cluster_public_hostname, 'ip': openshift_openstack_private_api_ip } ] }}"
   when:
     - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
-    - openshift_openstack_num_masters == 1
-
-- name: "Add public master cluster hostname records to the private A records (multi-master)"
-  set_fact:
-    private_records: "{{ private_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[groups.masters[0]].openshift_master_cluster_public_hostname, 'ip': hostvars[groups.lb[0]].private_v4 } ] }}"
-  when:
-    - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
-    - openshift_openstack_num_masters > 1
+    - openshift_openstack_private_api_ip is defined
 
 - name: "Set the private DNS server to use the external value (if provided)"
   set_fact:
@@ -63,6 +56,7 @@
     public_records: "{{ public_records | default([]) + [ { 'type': 'A', 'fqdn': hostvars[groups.masters[0]].openshift_master_cluster_public_hostname, 'ip': openshift_openstack_public_api_ip } ] }}"
   when:
     - hostvars[groups.masters[0]].openshift_master_cluster_public_hostname is defined
+    - openshift_openstack_public_api_ip is defined
 
 - name: "Set the public DNS server details to use the external value (if provided)"
   set_fact:

+ 3 - 8
roles/openshift_openstack/templates/heat_stack.yaml.j2

@@ -650,6 +650,7 @@ resources:
                 k8s_type: masters
                 cluster_id: {{ openshift_openstack_full_dns_domain }}
           type:        master
+          openshift_node_group_name: node-config-master
           image:       {{ openshift_openstack_master_image }}
           flavor:      {{ openshift_openstack_master_flavor }}
           key_name:    {{ openshift_openstack_keypair_name }}
@@ -738,10 +739,7 @@ resources:
                 cluster_id: {{ openshift_openstack_full_dns_domain }}
           type:        node
           subtype:     app
-          node_labels:
-{% for k, v in openshift_openstack_cluster_node_labels.app.items() %}
-            {{ k|e }}: {{ v|e }}
-{% endfor %}
+          openshift_node_group_name: node-config-compute
           image:       {{ openshift_openstack_node_image }}
           flavor:      {{ openshift_openstack_node_flavor }}
           key_name:    {{ openshift_openstack_keypair_name }}
@@ -810,10 +808,7 @@ resources:
                 cluster_id: {{ openshift_openstack_full_dns_domain }}
           type:        node
           subtype:     infra
-          node_labels:
-{% for k, v in openshift_openstack_cluster_node_labels.infra.items() %}
-            {{ k|e }}: {{ v|e }}
-{% endfor %}
+          openshift_node_group_name: node-config-infra
           image:       {{ openshift_openstack_infra_image }}
           flavor:      {{ openshift_openstack_infra_flavor }}
           key_name:    {{ openshift_openstack_keypair_name }}

+ 5 - 5
roles/openshift_openstack/templates/heat_stack_server.yaml.j2

@@ -159,10 +159,10 @@ parameters:
       - range: { min: 1, max: 1024 }
         description: must be between 1 and 1024 Gb.
 
-  node_labels:
-    type: json
-    description: OpenShift Node Labels
-    default: {"region": "default" }
+  openshift_node_group_name:
+    type: string
+    default: ''
+    description: The openshift node group name for this server.
 
   scheduler_hints:
     type: json
@@ -240,7 +240,7 @@ resources:
         clusterid: { get_param: cluster_id }
         host-type: { get_param: type }
         sub-host-type:    { get_param: subtype }
-        node_labels: { get_param: node_labels }
+        openshift_node_group_name: { get_param: openshift_node_group_name }
 {% if openshift_openstack_dns_nameservers %}
         openshift_hostname: { get_param: name }
 {% endif %}

+ 3 - 0
roles/openshift_service_catalog/defaults/main.yml

@@ -7,3 +7,6 @@ openshift_use_openshift_sdn: True
 os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
 
 openshift_service_catalog_image: "{{ l_os_registry_url | regex_replace('${component}' | regex_escape, 'service-catalog') }}"
+
+# Number of retries when waiting for the service catalog healthz to return 'ok' (retried every 10 seconds)
+openshift_service_catalog_retries: 60

+ 1 - 1
roles/openshift_service_catalog/tasks/start.yml

@@ -9,7 +9,7 @@
     warn: no
   register: endpoint_health
   until: endpoint_health.stdout == 'ok'
-  retries: 60
+  retries: "{{ openshift_service_catalog_retries }}"
   delay: 10
   changed_when: false
   # Ignore errors so we can log troubleshooting info on failures.

+ 4 - 1
roles/openshift_web_console/files/console-template.yaml

@@ -37,7 +37,10 @@ objects:
   spec:
     replicas: "${{REPLICA_COUNT}}"
     strategy:
-      type: Recreate
+      type: RollingUpdate
+      rollingUpdate:
+        # behave like a recreate deployment, but don't wait for pods to terminate
+        maxUnavailable: 100%
     template:
       metadata:
         name: webconsole