فهرست منبع

Force openshift_node_group_name for all nodes

This commit enforces openshift_node_group_name is defined
for all nodes.
Michael Gugino 6 سال پیش
والد
کامیت
33db7a5b15
28فایلهای تغییر یافته به همراه264 افزوده شده و 142 حذف شده
  1. 8 1
      .papr-master-ha.inventory
  2. 6 1
      .papr.all-in-one.inventory
  3. 11 3
      .papr.inventory
  4. 6 0
      inventory/dynamic/gcp/group_vars/all/00_defaults.yml
  5. 6 8
      inventory/hosts.example
  6. 4 4
      inventory/hosts.glusterfs.registry-only.example
  7. 7 7
      inventory/hosts.glusterfs.storage-and-registry.example
  8. 5 1
      inventory/hosts.localhost
  9. 2 2
      inventory/hosts.openstack
  10. 0 2
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  11. 0 2
      playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml
  12. 4 1
      playbooks/init/sanity_checks.yml
  13. 0 3
      playbooks/openshift-master/private/config.yml
  14. 0 2
      playbooks/openshift-node/private/configure_bootstrap.yml
  15. 136 0
      roles/lib_utils/action_plugins/node_group_checks.py
  16. 14 0
      roles/openshift_facts/defaults/main.yml
  17. 5 0
      roles/openshift_gcp/defaults/main.yml
  18. 25 25
      roles/openshift_gcp/tasks/setup_scale_group_facts.yml
  19. 0 16
      roles/openshift_manage_node/tasks/config.yml
  20. 2 19
      roles/openshift_node/tasks/upgrade/bootstrap_changes.yml
  21. 1 14
      roles/openshift_node_group/defaults/main.yml
  22. 0 1
      roles/openshift_node_group/tasks/bootstrap.yml
  23. 2 7
      roles/openshift_node_group/tasks/bootstrap_config.yml
  24. 10 9
      roles/openshift_node_group/tasks/create_config.yml
  25. 3 3
      roles/openshift_node_group/tasks/main.yml
  26. 3 3
      roles/openshift_node_group/tasks/upgrade.yml
  27. 4 1
      roles/openshift_node_group/templates/node-config.yaml.j2
  28. 0 7
      roles/openshift_node_group/vars/main.yml

+ 8 - 1
.papr-master-ha.inventory

@@ -15,6 +15,13 @@ openshift_portal_net=172.30.0.0/16
 openshift_enable_service_catalog=false
 debug_level=4
 
+my_node_group1_labels=['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']
+my_node_group1={'name': 'node-config-all-in-one', 'labels': {{ my_node_group1_labels }} }
+
+openshift_node_groups=[{{ my_node_group1 }}]
+
+openshift_node_group_name="node-config-all-in-one"
+
 [all:vars]
 # bootstrap configs
 openshift_master_bootstrap_auto_approve=true
@@ -33,6 +40,6 @@ ocp-master2
 ocp-master3
 
 [nodes]
-ocp-master1 openshift_schedulable=true openshift_node_labels="{'node-role.kubernetes.io/infra':'true'}"
+ocp-master1 openshift_schedulable=true
 ocp-master2
 ocp-master3

+ 6 - 1
.papr.all-in-one.inventory

@@ -15,6 +15,11 @@ openshift_portal_net=172.30.0.0/16
 openshift_enable_service_catalog=false
 debug_level=4
 
+my_node_group1_labels=['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']
+my_node_group1={'name': 'node-config-all-in-one', 'labels': {{ my_node_group1_labels }} }
+
+openshift_node_groups=[{{ my_node_group1 }}]
+
 [all:vars]
 # bootstrap configs
 openshift_master_bootstrap_auto_approve=true
@@ -29,4 +34,4 @@ ocp-master
 ocp-master
 
 [nodes]
-ocp-master openshift_schedulable=true openshift_node_labels="{'node-role.kubernetes.io/infra':'true'}" ansible_host="{{ lookup('env', 'RHCI_ocp_master_IP') }}"
+ocp-master openshift_schedulable=true ansible_host="{{ lookup('env', 'RHCI_ocp_master_IP') }}" openshift_node_group_name="node-config-all-in-one"

+ 11 - 3
.papr.inventory

@@ -14,6 +14,14 @@ openshift_check_min_host_memory_gb=1.9
 openshift_portal_net=172.30.0.0/16
 debug_level=4
 
+my_node_group1_labels=['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true']
+my_node_group1={'name': 'node-config-infra-master', 'labels': {{ my_node_group1_labels }} }
+
+my_node_group2_labels=['node-role.kubernetes.io/compute=true']
+my_node_group2={'name': 'node-config-compute', 'labels': {{ my_node_group2_labels }} }
+
+openshift_node_groups=[{{ my_node_group1 }}, {{ my_node_group2 }}]
+
 [all:vars]
 # bootstrap configs
 openshift_master_bootstrap_auto_approve=true
@@ -28,6 +36,6 @@ ocp-master
 ocp-master
 
 [nodes]
-ocp-master openshift_schedulable=true openshift_node_labels="{'node-role.kubernetes.io/infra':'true'}"
-ocp-node1
-ocp-node2
+ocp-master openshift_schedulable=true openshift_node_group_name="node-config-infra-master"
+ocp-node1 openshift_node_group_name="node-config-infra" openshift_node_group_name="node-config-compute"
+ocp-node2 openshift_node_group_name="node-config-infra" openshift_node_group_name="node-config-compute"

+ 6 - 0
inventory/dynamic/gcp/group_vars/all/00_defaults.yml

@@ -35,3 +35,9 @@ openshift_node_sdn_mtu: 1410
 osm_cluster_network_cidr: 172.16.0.0/16
 osm_host_subnet_length: 9
 openshift_portal_net: 172.30.0.0/16
+
+# masters and infra are the same in CI
+openshift_gcp_node_group_mapping:
+  masters: 'node-config-master'
+  infra: 'node-config-master'
+  compute: 'node-config-compute'

+ 6 - 8
inventory/hosts.example

@@ -12,8 +12,8 @@ ose3-master[1:3].test.example.com
 
 [nodes]
 ose3-master[1:3].test.example.com
-ose3-infra[1:2].test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
-ose3-node[1:2].test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
+ose3-infra[1:2].test.example.com
+ose3-node[1:2].test.example.com
 
 [nfs]
 ose3-master1.test.example.com
@@ -367,12 +367,11 @@ debug_level=2
 #
 # An OpenShift router will be created during install if there are
 # nodes present with labels matching the default router selector,
-# "node-role.kubernetes.io/infra=true". Set openshift_node_labels per node as needed in
-# order to label nodes.
+# "node-role.kubernetes.io/infra=true".
 #
 # Example:
 # [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
+# node.example.com openshift_node_group_name="node-config-infra"
 #
 # Router selector (optional)
 # Router will only be created if nodes matching this label are present.
@@ -418,12 +417,11 @@ debug_level=2
 #
 # An OpenShift registry will be created during install if there are
 # nodes present with labels matching the default registry selector,
-# "node-role.kubernetes.io/infra=true". Set openshift_node_labels per node as needed in
-# order to label nodes.
+# "node-role.kubernetes.io/infra=true".
 #
 # Example:
 # [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
+# node.example.com openshift_node_group_name="node-config-infra"
 #
 # Registry selector (optional)
 # Registry will only be created if nodes matching this label are present.

+ 4 - 4
inventory/hosts.glusterfs.registry-only.example

@@ -31,16 +31,16 @@ openshift_deployment_type=origin
 openshift_hosted_registry_storage_kind=glusterfs
 
 [masters]
-master
+master openshift_node_group_name="node-config-master"
 
 [nodes]
 # masters should be schedulable to run web console pods
 master  openshift_schedulable=True
 # A hosted registry, by default, will only be deployed on nodes labeled
 # "node-role.kubernetes.io/infra=true".
-node0   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node1   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node2   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node0   openshift_node_group_name="node-config-infra"
+node1   openshift_node_group_name="node-config-infra"
+node2   openshift_node_group_name="node-config-infra"
 
 [etcd]
 master

+ 7 - 7
inventory/hosts.glusterfs.storage-and-registry.example

@@ -36,17 +36,17 @@ master
 
 [nodes]
 # masters should be schedulable to run web console pods
-master  openshift_schedulable=True
+master  openshift_node_group_name="node-config-master" openshift_schedulable=True
 # It is recommended to not use a single cluster for both general and registry
 # storage, so two three-node clusters will be required.
-node0   openshift_schedulable=True
-node1   openshift_schedulable=True
-node2   openshift_schedulable=True
+node0   openshift_node_group_name="node-config-compute"
+node1   openshift_node_group_name="node-config-compute"
+node2   openshift_node_group_name="node-config-compute"
 # A hosted registry, by default, will only be deployed on nodes labeled
 # "node-role.kubernetes.io/infra=true".
-node3   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node4   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
-node5   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node3   openshift_node_group_name="node-config-infra"
+node4   openshift_node_group_name="node-config-infra"
+node5   openshift_node_group_name="node-config-infra"
 
 [etcd]
 master

+ 5 - 1
inventory/hosts.localhost

@@ -13,6 +13,9 @@ openshift_portal_net=172.30.0.0/16
 # localhost likely doesn't meet the minimum requirements
 openshift_disable_check=disk_availability,memory_availability
 
+openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true']}]
+
+
 [masters]
 localhost ansible_connection=local
 
@@ -20,4 +23,5 @@ localhost ansible_connection=local
 localhost ansible_connection=local
 
 [nodes]
-localhost ansible_connection=local openshift_node_labels="{'node-role.kubernetes.io/infra': 'true'}"
+# openshift_node_group_name should refer to a dictionary with matching key of name in list openshift_node_groups.
+localhost ansible_connection=local openshift_node_group_name="node-config-all-in-one"

+ 2 - 2
inventory/hosts.openstack

@@ -33,5 +33,5 @@ jdetiber-etcd.usersys.redhat.com
 #ose3-lb-ansible.test.example.com
 
 [nodes]
-jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
-jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-master"
+jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_group_name="node-config-compute"

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -57,8 +57,6 @@
   - import_role:
       name: openshift_node
       tasks_from: upgrade.yml
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-compute') }}"
 
   # Run the upgrade hook prior to make the node schedulable again.
   - debug: msg="Running node upgrade hook {{ openshift_node_upgrade_hook }}"

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/v3_10/upgrade_control_plane.yml

@@ -93,8 +93,6 @@
   - import_role:
       name: openshift_node
       tasks_from: upgrade
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-master') }}"
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 4 - 1
playbooks/init/sanity_checks.yml

@@ -3,6 +3,7 @@
 - name: Verify Requirements
   hosts: oo_first_master
   roles:
+  - role: openshift_facts
   - role: lib_utils
   tasks:
   # sanity_checks is a custom action plugin defined in lib_utils.
@@ -13,4 +14,6 @@
   - name: Run variable sanity checks
     sanity_checks:
       check_hosts: "{{ l_sanity_check_hosts | default(groups['oo_all_hosts']) }}"
-    run_once: True
+  # node_group_checks is a custom action plugin defined in lib_utils.
+  - name: Validate openshift_node_groups and openshift_node_group_name
+    node_group_checks: {}

+ 0 - 3
playbooks/openshift-master/private/config.yml

@@ -84,9 +84,6 @@
     import_role:
       name: openshift_node_group
       tasks_from: bootstrap_config
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-master') }}"
-      r_node_dynamic_config_force: True
 
   roles:
   - role: openshift_master_facts

+ 0 - 2
playbooks/openshift-node/private/configure_bootstrap.yml

@@ -12,7 +12,5 @@
     import_role:
       name: openshift_node_group
       tasks_from: bootstrap_config
-    vars:
-      r_node_dynamic_config_name: "{{ openshift_node_group_name | default('node-config-compute') }}"
   - set_fact:
       openshift_is_bootstrapped: True

+ 136 - 0
roles/lib_utils/action_plugins/node_group_checks.py

@@ -0,0 +1,136 @@
+"""
+Ansible action plugin to ensure inventory variables are set
+appropriately related to openshift_node_group_name
+"""
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+# Runs on first master
+# Checks each openshift_node_group_name is found in openshift_node_groups
+# Checks that master label is present in one of those groups
+# Checks that node label is present in one of those groups
+
+
+def get_or_fail(group, key):
+    """Find a key in a group dictionary or fail"""
+    res = group.get(key)
+    if res is None:
+        msg = "Each group in openshift_node_groups must have {} key".format(key)
+        raise errors.AnsibleModuleError(msg)
+    return res
+
+
+def validate_labels(labels_found):
+    """Ensure mandatory_labels are found in the labels we found, labels_found"""
+    mandatory_labels = ('node-role.kubernetes.io/master=true',
+                        'node-role.kubernetes.io/infra=true')
+    for item in mandatory_labels:
+        if item not in labels_found:
+            msg = ("At least one group in openshift_node_groups requires the"
+                   " {} label").format(item)
+            raise errors.AnsibleModuleError(msg)
+
+
+def process_group(group, groups_found, labels_found):
+    """Validate format of each group in openshift_node_groups"""
+    name = get_or_fail(group, 'name')
+    if name in groups_found:
+        msg = ("Duplicate definition of group {} in"
+               " openshift_node_groups").format(name)
+        raise errors.AnsibleModuleError(msg)
+    groups_found.add(name)
+    labels = get_or_fail(group, 'labels')
+    if not issubclass(type(labels), list):
+        msg = "labels value of each group in openshift_node_groups must be a list"
+        raise errors.AnsibleModuleError(msg)
+    labels_found.update(labels)
+
+
+class ActionModule(ActionBase):
+    """Action plugin to execute node_group_checks."""
+    def template_var(self, hostvars, host, varname):
+        """Retrieve a variable from hostvars and template it.
+           If undefined, return None type."""
+        # We will set the current host and variable checked for easy debugging
+        # if there are any unhandled exceptions.
+        # pylint: disable=W0201
+        self.last_checked_var = varname
+        # pylint: disable=W0201
+        self.last_checked_host = host
+        res = hostvars[host].get(varname)
+        if res is None:
+            return None
+        return self._templar.template(res)
+
+    def get_node_group_name(self, hostvars, host):
+        """Ensure openshift_node_group_name is defined for nodes"""
+        group_name = self.template_var(hostvars, host, 'openshift_node_group_name')
+        if not group_name:
+            msg = "openshift_node_group_name must be defined for all nodes"
+            raise errors.AnsibleModuleError(msg)
+        return group_name
+
+    def run_check(self, hostvars, host, groups_found):
+        """Run the check for each host"""
+        group_name = self.get_node_group_name(hostvars, host)
+        if group_name not in groups_found:
+            msg = "Group: {} not found in openshift_node_groups".format(group_name)
+            raise errors.AnsibleModuleError(msg)
+
+    def run(self, tmp=None, task_vars=None):
+        """Run node_group_checks action plugin"""
+        result = super(ActionModule, self).run(tmp, task_vars)
+        result["changed"] = False
+        result["failed"] = False
+        result["msg"] = "Node group checks passed"
+        # self.task_vars holds all in-scope variables.
+        # Ignore settting self.task_vars outside of init.
+        # pylint: disable=W0201
+        self.task_vars = task_vars or {}
+
+        # pylint: disable=W0201
+        self.last_checked_host = "none"
+        # pylint: disable=W0201
+        self.last_checked_var = "none"
+
+        # check_hosts is hard-set to oo_nodes_to_config
+        check_hosts = self.task_vars['groups'].get('oo_nodes_to_config')
+        if not check_hosts:
+            result["msg"] = "skipping; oo_nodes_to_config is required for this check"
+            return result
+
+        # We need to access each host's variables
+        hostvars = self.task_vars.get('hostvars')
+        if not hostvars:
+            msg = hostvars
+            raise errors.AnsibleModuleError(msg)
+
+        openshift_node_groups = self.task_vars.get('openshift_node_groups')
+        if not openshift_node_groups:
+            msg = "openshift_node_groups undefined"
+            raise errors.AnsibleModuleError(msg)
+
+        openshift_node_groups = self._templar.template(openshift_node_groups)
+        groups_found = set()
+        labels_found = set()
+        # gather the groups and labels we believe should be present.
+        for group in openshift_node_groups:
+            process_group(group, groups_found, labels_found)
+
+        if len(groups_found) == 0:
+            msg = "No groups found in openshift_node_groups"
+            raise errors.AnsibleModuleError(msg)
+
+        validate_labels(labels_found)
+
+        # We loop through each host in the provided list check_hosts
+        for host in check_hosts:
+            try:
+                self.run_check(hostvars, host, groups_found)
+            except Exception as uncaught_e:
+                msg = "last_checked_host: {}, last_checked_var: {};"
+                msg = msg.format(self.last_checked_host, self.last_checked_var)
+                msg += str(uncaught_e)
+                raise errors.AnsibleModuleError(msg)
+
+        return result

+ 14 - 0
roles/openshift_facts/defaults/main.yml

@@ -116,3 +116,17 @@ openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_typ
 openshift_master_api_port: "8443"
 openshift_ca_host: "{{ groups.oo_first_master.0 }}"
 openshift_use_openshift_sdn: true
+
+openshift_node_groups:
+  - name: node-config-master
+    labels:
+      - 'node-role.kubernetes.io/master=true'
+    edits: []
+  - name: node-config-infra
+    labels:
+      - 'node-role.kubernetes.io/infra=true'
+    edits: []
+  - name: node-config-compute
+    labels:
+      - 'node-role.kubernetes.io/compute=true'
+    edits: []

+ 5 - 0
roles/openshift_gcp/defaults/main.yml

@@ -60,3 +60,8 @@ openshift_gcp_startup_script_file: "{{ role_path }}/files/bootstrap-script.sh"
 openshift_gcp_user_data_file: ''
 
 openshift_gcp_multizone: False
+
+openshift_gcp_node_group_mapping:
+  masters: 'node-config-master'
+  infra: 'node-config-infra'
+  compute: 'node-config-compute'

+ 25 - 25
roles/openshift_gcp/tasks/setup_scale_group_facts.yml

@@ -1,11 +1,26 @@
 ---
-- name: Add masters to requisite groups
+- name: Add node instances to node group
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: masters, etcd
-    openshift_node_labels:
-      node-role.kubernetes.io/master: "true"
-  with_items: "{{ groups['tag_ocp-master'] }}"
+    groups: nodes, new_nodes
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['compute'] }}"
+  with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add bootstrap node instances
+  add_host:
+    name: "{{ hostvars[item].gce_name }}"
+    groups: bootstrap_nodes
+    openshift_is_bootstrapped: True
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['compute'] }}"
+  with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
+
+- name: Add bootstrap node instances as nodes
+  add_host:
+    name: "{{ item }}"
+    groups: nodes, new_nodes
+    openshift_is_bootstrapped: True
+  with_items: "{{ groups['tag_ocp-bootstrap'] | default([]) }}"
+  when: all_nodes | default(False)
 
 - name: Add a master to the primary masters group
   add_host:
@@ -23,27 +38,12 @@
   add_host:
     name: "{{ hostvars[item].gce_name }}"
     groups: nodes, new_nodes
-    openshift_node_labels:
-      node-role.kubernetes.io/infra: "true"
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['infra'] }}"
   with_items: "{{ groups['tag_ocp-infra-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
 
-- name: Add node instances to node group
-  add_host:
-    name: "{{ hostvars[item].gce_name }}"
-    groups: nodes, new_nodes
-  with_items: "{{ groups['tag_ocp-node'] | default([]) | difference(groups['tag_ocp-bootstrap'] | default([])) }}"
-
-- name: Add bootstrap node instances
+- name: Add masters to requisite groups
   add_host:
     name: "{{ hostvars[item].gce_name }}"
-    groups: bootstrap_nodes
-    openshift_is_bootstrapped: True
-  with_items: "{{ groups['tag_ocp-node'] | default([]) | intersect(groups['tag_ocp-bootstrap'] | default([])) }}"
-
-- name: Add bootstrap node instances as nodes
-  add_host:
-    name: "{{ item }}"
-    groups: nodes, new_nodes
-    openshift_is_bootstrapped: True
-  with_items: "{{ groups['tag_ocp-bootstrap'] | default([]) }}"
-  when: all_nodes | default(False)
+    groups: masters, etcd
+    openshift_node_group_name: "{{ openshift_gcp_node_group_mapping['masters'] }}"
+  with_items: "{{ groups['tag_ocp-master'] }}"

+ 0 - 16
roles/openshift_manage_node/tasks/config.yml

@@ -9,19 +9,3 @@
   until: node_schedulable is succeeded
   when: "'nodename' in openshift.node"
   delegate_to: "{{ openshift_master_host }}"
-
-- name: Label nodes
-  oc_label:
-    name: "{{ openshift.node.nodename }}"
-    kind: node
-    state: add
-    labels: "{{ l_all_labels | lib_utils_oo_dict_to_list_of_dict }}"
-    namespace: default
-  when:
-    - "'nodename' in openshift.node"
-    - l_all_labels != {}
-  delegate_to: "{{ openshift_master_host }}"
-  vars:
-    l_node_labels: "{{ openshift_node_labels | default({}) }}"
-    l_master_labels: "{{ openshift_manage_node_is_master | ternary(openshift_master_node_labels, {}) }}"
-    l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}"

+ 2 - 19
roles/openshift_node/tasks/upgrade/bootstrap_changes.yml

@@ -94,25 +94,8 @@
     path: "{{ openshift.common.config_base }}/node/certificates"
     state: absent
 
-- name: Determine if node already has a dynamic config group
-  command: grep -E '^BOOTSTRAP_CONFIG_NAME=.+' "/etc/sysconfig/{{ openshift_service_type }}-node"
-  ignore_errors: true
-  register: existing
-
-- name: Update the sysconfig to group "{{ r_node_dynamic_config_name }}"
+- name: Update the sysconfig to group "{{ openshift_node_group_name }}"
   lineinfile:
     dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
-    line: "BOOTSTRAP_CONFIG_NAME={{ r_node_dynamic_config_name }}"
+    line: "BOOTSTRAP_CONFIG_NAME={{ openshift_node_group_name }}"
     regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
-  when: r_node_dynamic_config_force|default(False) or existing is failed
-
-- name: Set up node-config.yml if dynamic configuration is off
-  copy:
-    remote_src: true
-    src: "{{ openshift.common.config_base }}/node/bootstrap-node-config.yaml"
-    dest: "{{ openshift.common.config_base }}/node/node-config.yaml"
-    force: no
-    owner: root
-    group: root
-    mode: 0600
-  when: r_node_dynamic_config_name|length == 0

+ 1 - 14
roles/openshift_node_group/defaults/main.yml

@@ -1,18 +1,4 @@
 ---
-openshift_node_groups:
-- name: node-config-master
-  labels:
-  - 'node-role.kubernetes.io/master=true'
-  edits: []
-- name: node-config-infra
-  labels:
-  - 'node-role.kubernetes.io/infra=true'
-  edits: []
-- name: node-config-compute
-  labels:
-  - 'node-role.kubernetes.io/compute=true'
-  edits: []
-
 openshift_node_group_namespace: openshift-node
 
 openshift_use_crio: False
@@ -27,3 +13,4 @@ openshift_node_group_node_data_dir: "{{ openshift_node_group_node_data_dir_defau
 openshift_imageconfig_format: "{{ oreg_url | default(l_osm_registry_url_default) }}"
 
 openshift_node_group_use_persistentlocalvolumes: "{{ openshift_persistentlocalstorage_enabled | default(False) | bool }}"
+openshift_node_group_name: 'node-config-compute'

+ 0 - 1
roles/openshift_node_group/tasks/bootstrap.yml

@@ -4,7 +4,6 @@
     src: node-config.yaml.j2
     dest: "/etc/origin/node/bootstrap-node-config.yaml"
     mode: 0600
-
 # Make sure a single master has node-config so that SDN and sync daemonsets requires it
 - name: remove existing node config
   file:

+ 2 - 7
roles/openshift_node_group/tasks/bootstrap_config.yml

@@ -9,13 +9,8 @@
   with_items:
   - /etc/origin/node/pods
   - /etc/origin/node/certificates
-- name: Determine if node already has a dynamic config group
-  command: grep -E '^BOOTSTRAP_CONFIG_NAME=.+' "/etc/sysconfig/{{ openshift_service_type }}-node"
-  ignore_errors: true
-  register: existing
-- name: Update the sysconfig to group "{{ r_node_dynamic_config_name }}"
+- name: Update the sysconfig to group "{{ openshift_node_group_name }}"
   lineinfile:
     dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
-    line: "BOOTSTRAP_CONFIG_NAME={{ r_node_dynamic_config_name }}"
+    line: "BOOTSTRAP_CONFIG_NAME={{ openshift_node_group_name }}"
     regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
-  when: r_node_dynamic_config_force|default(False) or existing is failed

+ 10 - 9
roles/openshift_node_group/tasks/create_config.yml

@@ -1,7 +1,7 @@
 ---
 - name: fetch node configmap
   oc_configmap:
-    name: "{{ openshift_node_group_name }}"
+    name: "{{ l_openshift_node_group_name }}"
     namespace: "{{ openshift_node_group_namespace }}"
     state: list
   register: configout
@@ -13,7 +13,7 @@
   run_once: true
 
 - when:
-  - configout.results.results.0 == {} or (configout.results.results.0 != {} and (openshift_node_group_edits|length > 0 or openshift_node_group_labels|length > 0))
+  - configout.results.results.0 == {} or (configout.results.results.0 != {} and (l_openshift_node_group_edits|length > 0))
   block:
   - name: create a temp dir for this work
     command: mktemp -d /tmp/openshift_node_config-XXXXXX
@@ -36,13 +36,14 @@
     - configout.results.results.0 != {}
     run_once: true
 
-  - name: "specialize the generated configs for {{ openshift_node_group_name }}"
+  - name: "specialize the generated configs for {{ l_openshift_node_group_name }}"
     yedit:
       content:
       src: "{{ mktempout.stdout }}/node-config.yaml"
-      edits: "{{ openshift_node_group_edits | union(openshift_node_labels_edit) }}"
+      edits: "{{ l_openshift_node_group_edits }}"
     register: yeditout
     run_once: true
+    when: l_openshift_node_group_edits != []
 
   - name: show the yeditout debug var
     debug:
@@ -55,7 +56,7 @@
       dest: "{{ mktempout.stdout }}/volume-config.yaml"
     when:
     - "'data' not in configout['results']['results'][0] or 'volume-config.yaml' not in configout['results']['results'][0]['data']"
-    - openshift_node_group_name != ""
+    - l_openshift_node_group_name != ""
     - openshift_node_local_quota_per_fsgroup is defined
     - openshift_node_local_quota_per_fsgroup != ""
     run_once: true
@@ -67,12 +68,12 @@
     when:
     - "'data' in configout['results']['results'][0]"
     - "'volume-config.yaml' in configout['results']['results'][0]['data']"
-    - openshift_node_group_name != ""
+    - l_openshift_node_group_name != ""
     - openshift_node_local_quota_per_fsgroup is defined
     - openshift_node_local_quota_per_fsgroup != ""
     run_once: true
 
-  - name: "specialize the volume config for {{ openshift_node_group_name }}"
+  - name: "specialize the volume config for {{ l_openshift_node_group_name }}"
     yedit:
       content:
       src: "{{ mktempout.stdout }}/volume-config.yaml"
@@ -91,7 +92,7 @@
 
   - name: create node-config.yaml configmap
     oc_configmap:
-      name: "{{ openshift_node_group_name }}"
+      name: "{{ l_openshift_node_group_name }}"
       namespace: "{{ openshift_node_group_namespace }}"
       from_file:
         node-config.yaml: "{{ mktempout.stdout }}/node-config.yaml"
@@ -101,7 +102,7 @@
 
   - name: create node-config.yaml and volume-config.yaml configmap
     oc_configmap:
-      name: "{{ openshift_node_group_name }}"
+      name: "{{ l_openshift_node_group_name }}"
       namespace: "{{ openshift_node_group_namespace }}"
       from_file:
         node-config.yaml: "{{ mktempout.stdout }}/node-config.yaml"

+ 3 - 3
roles/openshift_node_group/tasks/main.yml

@@ -2,9 +2,9 @@
 - name: Build node config maps
   include_tasks: create_config.yml
   vars:
-    openshift_node_group_name: "{{ node_group.name }}"
-    openshift_node_group_edits: "{{ node_group.edits | default([]) }}"
-    openshift_node_group_labels: "{{ node_group.labels | default([]) }}"
+    l_openshift_node_group_name: "{{ node_group.name }}"
+    l_openshift_node_group_edits: "{{ node_group.edits | default([]) }}"
+    l_openshift_node_group_labels: "{{ node_group.labels }}"
   with_items: "{{ openshift_node_groups }}"
   loop_control:
     loop_var: node_group

+ 3 - 3
roles/openshift_node_group/tasks/upgrade.yml

@@ -2,8 +2,8 @@
 - name: Ensure all node groups have bootstrap settings
   include_tasks: create_config.yml
   vars:
-    openshift_node_group_name: "{{ node_group.name }}"
-    openshift_node_group_edits:
+    l_openshift_node_group_name: "{{ node_group.name }}"
+    l_openshift_node_group_edits:
     - key: servingInfo.certFile
       value: ""
     - key: servingInfo.keyFile
@@ -27,7 +27,7 @@
       - /etc/origin/node/certificates
     - key: masterKubeConfig
       value: node.kubeconfig
-    openshift_node_group_labels: "{{ node_group.labels | default([]) }}"
+    l_openshift_node_group_labels: "{{ node_group.labels }}"
   with_items: "{{ openshift_node_groups }}"
   loop_control:
     loop_var: node_group

+ 4 - 1
roles/openshift_node_group/templates/node-config.yaml.j2

@@ -50,7 +50,10 @@ kubeletArguments:
   cloud-provider:
   - {{ openshift_node_group_cloud_provider }}
 {% endif %}
-  node-labels: []
+{% if l_openshift_node_group_labels is defined %}
+  node-labels:
+  - {{ l_openshift_node_group_labels | join(',') }}
+{% endif %}
   enable-controller-attach-detach:
   - 'true'
 masterClientConnectionOverrides:

+ 0 - 7
roles/openshift_node_group/vars/main.yml

@@ -1,8 +1 @@
 ---
-# These values should never be passed in, they are needed
-openshift_node_group_edits: []
-openshift_node_group_labels: []
-openshift_node_labels_edit:
-- key: kubeletArguments.node-labels
-  value:
-  - "{{ openshift_node_group_labels | join(',') }}"