Bläddra i källkod

Replace openshift.node.nodename with l_kubelet_node_name

This allows us to optionally use override as we see fit.
Michael Gugino 6 år sedan
förälder
incheckning
0411517980

+ 1 - 1
playbooks/byo/calico/legacy_upgrade.yml

@@ -100,7 +100,7 @@
   - name: Apply node label
   - name: Apply node label
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     command: >
     command: >
-      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ openshift.node.nodename | lower }} --overwrite projectcalico.org/ds-ready=true
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig label node {{ l_kubelet_node_name | lower }} --overwrite projectcalico.org/ds-ready=true
   - name: Wait for node running
   - name: Wait for node running
     uri:
     uri:
       url: http://localhost:9099/readiness
       url: http://localhost:9099/readiness

+ 3 - 3
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -56,7 +56,7 @@
   tasks:
   tasks:
   - name: Mark node unschedulable
   - name: Mark node unschedulable
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: False
       schedulable: False
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10
@@ -70,7 +70,7 @@
 
 
   - name: Drain Node for Kubelet upgrade
   - name: Drain Node for Kubelet upgrade
     command: >
     command: >
-      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
@@ -94,7 +94,7 @@
 
 
   - name: Set node schedulability
   - name: Set node schedulability
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: True
       schedulable: True
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -23,7 +23,7 @@
   # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
   # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
   - name: Mark node unschedulable
   - name: Mark node unschedulable
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: False
       schedulable: False
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10
@@ -33,7 +33,7 @@
 
 
   - name: Drain Node for Kubelet upgrade
   - name: Drain Node for Kubelet upgrade
     command: >
     command: >
-      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml

@@ -28,7 +28,7 @@
 
 
   - name: Mark node unschedulable
   - name: Mark node unschedulable
     oc_adm_manage_node:
     oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
+      node: "{{ l_kubelet_node_name | lower }}"
       schedulable: False
       schedulable: False
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     retries: 10
     retries: 10
@@ -45,7 +45,7 @@
   tasks:
   tasks:
   - name: Drain Node for Kubelet upgrade
   - name: Drain Node for Kubelet upgrade
     command: >
     command: >
-      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
+      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }}
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       --force --delete-local-data --ignore-daemonsets
       --force --delete-local-data --ignore-daemonsets
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s

+ 4 - 1
playbooks/init/cluster_facts.yml

@@ -28,7 +28,7 @@
     openshift_facts:
     openshift_facts:
       role: common
       role: common
       local_facts:
       local_facts:
-        hostname: "{{ openshift_kubelet_name_override | default(None) }}"
+        hostname: "{{ (openshift_kubelet_name_override | default(None)) if l_openshift_upgrade_in_progress else None }}"
         ip: "{{ openshift_ip | default(None) }}"
         ip: "{{ openshift_ip | default(None) }}"
         public_hostname: "{{ openshift_public_hostname | default(None) }}"
         public_hostname: "{{ openshift_public_hostname | default(None) }}"
         public_ip: "{{ openshift_public_ip | default(None) }}"
         public_ip: "{{ openshift_public_ip | default(None) }}"
@@ -62,6 +62,9 @@
       role: node
       role: node
       local_facts:
       local_facts:
         sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
         sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
+  - name: set_fact l_kubelet_node_name
+    set_fact:
+      l_kubelet_node_name: "{{ openshift_kubelet_name_override if (openshift_kubelet_name_override is defined and l_openshift_upgrade_in_progress) else openshift.node.nodename }}"
 
 
 - name: Initialize etcd host variables
 - name: Initialize etcd host variables
   hosts: oo_masters_to_config
   hosts: oo_masters_to_config

+ 1 - 1
playbooks/openshift-glusterfs/private/add_hosts.yml

@@ -2,7 +2,7 @@
 # This play runs when new gluster hosts are part of new_nodes group during
 # This play runs when new gluster hosts are part of new_nodes group during
 # master or node scaleup.
 # master or node scaleup.
 
 
-# Need to gather facts on glusterfs hosts to ensure we collect openshift.node.nodename
+# Need to gather facts on glusterfs hosts to ensure we collect l_kubelet_node_name
 # for topology file.
 # for topology file.
 - import_playbook: ../../init/basic_facts.yml
 - import_playbook: ../../init/basic_facts.yml
   vars:
   vars:

+ 1 - 1
playbooks/openshift-node/private/join.yml

@@ -31,7 +31,7 @@
 
 
   - name: Find all hostnames for bootstrapping
   - name: Find all hostnames for bootstrapping
     set_fact:
     set_fact:
-      l_nodes_to_join: "{{ groups['oo_nodes_to_config'] | default([]) | map('extract', hostvars) | map(attribute='openshift.node.nodename') | list }}"
+      l_nodes_to_join: "{{ groups['oo_nodes_to_config'] | default([]) | map('extract', hostvars) | map(attribute='l_kubelet_node_name') | list }}"
 
 
   - name: Dump the bootstrap hostnames
   - name: Dump the bootstrap hostnames
     debug:
     debug:

+ 1 - 1
playbooks/openshift-node/private/registry_auth.yml

@@ -28,7 +28,7 @@
     oc_obj:
     oc_obj:
       state: list
       state: list
       kind: node
       kind: node
-      name: "{{ openshift.node.nodename | lower }}"
+      name: "{{ l_kubelet_node_name | lower }}"
     register: node_output
     register: node_output
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_config
     when: inventory_hostname in groups.oo_nodes_to_config

+ 1 - 1
playbooks/openshift-node/private/restart.yml

@@ -36,7 +36,7 @@
     oc_obj:
     oc_obj:
       state: list
       state: list
       kind: node
       kind: node
-      name: "{{ openshift.node.nodename | lower }}"
+      name: "{{ l_kubelet_node_name | lower }}"
     register: node_output
     register: node_output
     delegate_to: "{{ groups.oo_first_master.0 }}"
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_config
     when: inventory_hostname in groups.oo_nodes_to_config

+ 2 - 2
roles/openshift_control_plane/tasks/main.yml

@@ -176,7 +176,7 @@
   oc_obj:
   oc_obj:
     state: list
     state: list
     kind: pod
     kind: pod
-    name: "master-{{ item }}-{{ openshift.node.nodename | lower }}"
+    name: "master-{{ item }}-{{ l_kubelet_node_name | lower }}"
     namespace: kube-system
     namespace: kube-system
   register: control_plane_pods
   register: control_plane_pods
   until:
   until:
@@ -228,7 +228,7 @@
   oc_obj:
   oc_obj:
     state: list
     state: list
     kind: pod
     kind: pod
-    name: "master-{{ item }}-{{ openshift.node.nodename | lower }}"
+    name: "master-{{ item }}-{{ l_kubelet_node_name | lower }}"
     namespace: kube-system
     namespace: kube-system
   register: control_plane_health
   register: control_plane_health
   until:
   until:

+ 1 - 0
roles/openshift_facts/defaults/main.yml

@@ -230,3 +230,4 @@ openshift_node_group_edits_crio:
       - "10m"
       - "10m"
 
 
 openshift_master_manage_htpasswd: True
 openshift_master_manage_htpasswd: True
+l_openshift_upgrade_in_progress: False

Filskillnaden har hållts tillbaka eftersom den är för stor
+ 1 - 1
roles/openshift_hosted/tasks/storage/glusterfs.yml


+ 1 - 1
roles/openshift_manage_node/tasks/config.yml

@@ -1,7 +1,7 @@
 ---
 ---
 - name: Set node schedulability
 - name: Set node schedulability
   oc_adm_manage_node:
   oc_adm_manage_node:
-    node: "{{ openshift.node.nodename | lower }}"
+    node: "{{ l_kubelet_node_name | lower }}"
     schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}"
     schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}"
   retries: 10
   retries: 10
   delay: 5
   delay: 5

+ 1 - 1
roles/openshift_manage_node/tasks/main.yml

@@ -24,7 +24,7 @@
 
 
 - name: Wait for Node Registration
 - name: Wait for Node Registration
   oc_obj:
   oc_obj:
-    name: "{{ openshift.node.nodename }}"
+    name: "{{ l_kubelet_node_name | lower }}"
     kind: node
     kind: node
     state: list
     state: list
   register: get_node
   register: get_node

+ 1 - 1
roles/openshift_node/tasks/upgrade.yml

@@ -84,7 +84,7 @@
     oc_bin: "{{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }}"
     oc_bin: "{{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }}"
     oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
     oc_conf: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
     node_list:
     node_list:
-    - "{{ openshift.node.nodename | lower }}"
+    - "{{ l_kubelet_node_name | lower }}"
   delegate_to: "{{ groups.oo_first_master.0 }}"
   delegate_to: "{{ groups.oo_first_master.0 }}"
   register: node_upgrade_oc_csr_approve
   register: node_upgrade_oc_csr_approve
   retries: 30
   retries: 30

+ 1 - 1
roles/openshift_storage_glusterfs/README.md

@@ -63,7 +63,7 @@ their configuration as GlusterFS nodes:
 | Name               | Default value             | Description                             |
 | Name               | Default value             | Description                             |
 |--------------------|---------------------------|-----------------------------------------|
 |--------------------|---------------------------|-----------------------------------------|
 | glusterfs_cluster  | 1                         | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
 | glusterfs_cluster  | 1                         | The ID of the cluster this node should belong to. This is useful when a single heketi service is expected to manage multiple distinct clusters. **NOTE:** For natively-hosted clusters, all pods will be in the same OpenShift namespace
-| glusterfs_hostname | openshift.node.nodename   | A hostname (or IP address) that will be used for internal GlusterFS communication
+| glusterfs_hostname | l_kubelet_node_name  | A hostname (or IP address) that will be used for internal GlusterFS communication
 | glusterfs_ip       | openshift.common.ip       | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes
 | glusterfs_ip       | openshift.common.ip       | An IP address that will be used by pods to communicate with the GlusterFS node. **NOTE:** Required for external GlusterFS nodes
 | glusterfs_zone     | 1                         | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
 | glusterfs_zone     | 1                         | A zone number for the node. Zones are used within the cluster for determining how to distribute the bricks of GlusterFS volumes. heketi will try to spread each volumes' bricks as evenly as possible across all zones
 
 

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/glusterfs_uninstall.yml

@@ -44,7 +44,7 @@
 
 
 - name: Unlabel any existing GlusterFS nodes
 - name: Unlabel any existing GlusterFS nodes
   oc_label:
   oc_label:
-    name: "{{ hostvars[item].openshift.node.nodename }}"
+    name: "{{ hostvars[item].l_kubelet_node_name }}"
     kind: node
     kind: node
     state: absent
     state: absent
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/label_nodes.yml

@@ -1,7 +1,7 @@
 ---
 ---
 - name: Label GlusterFS nodes
 - name: Label GlusterFS nodes
   oc_label:
   oc_label:
-    name: "{{ hostvars[item].openshift.node.nodename }}"
+    name: "{{ hostvars[item].l_kubelet_node_name }}"
     kind: node
     kind: node
     state: add
     state: add
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
     labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"

+ 1 - 1
roles/openshift_storage_glusterfs/templates/topology.json.j2

@@ -20,7 +20,7 @@
 {%- if 'glusterfs_hostname' in hostvars[node] -%}
 {%- if 'glusterfs_hostname' in hostvars[node] -%}
                 "{{ hostvars[node].glusterfs_hostname }}"
                 "{{ hostvars[node].glusterfs_hostname }}"
 {%- elif 'openshift' in hostvars[node] -%}
 {%- elif 'openshift' in hostvars[node] -%}
-                "{{ hostvars[node].openshift.node.nodename }}"
+                "{{ hostvars[node].l_kubelet_node_name }}"
 {%- else -%}
 {%- else -%}
                 "{{ node }}"
                 "{{ node }}"
 {%- endif -%}
 {%- endif -%}