Browse Source

Deprecate node 'evacuation' with 'drain'

* https://trello.com/c/TeaEB9fX/307-3-deprecate-node-evacuation
Tim Bielawa 8 years ago
parent
commit
96df370b82

+ 4 - 4
playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -18,20 +18,20 @@
 # If a node fails, halt everything, the admin will need to clean up and we
 # don't want to carry on, potentially taking out every node. The playbook can safely be re-run
 # and will not take any action on a node already running the requested docker version.
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
   hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   serial: 1
   any_errors_fatal: true
   tasks:
-  - name: Prepare for Node evacuation
+  - name: Prepare for Node draining
     command: >
       {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
 
-  - name: Evacuate Node for Kubelet upgrade
+  - name: Drain Node for Kubelet upgrade
     command: >
-      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
+      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --drain --force
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
 

+ 4 - 4
playbooks/common/openshift-cluster/redeploy-certificates.yml

@@ -204,7 +204,7 @@
       cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
     changed_when: False
 
-- name: Serially evacuate all nodes to trigger redeployments
+- name: Serially drain all nodes to trigger redeployments
   hosts: oo_nodes_to_config
   serial: 1
   any_errors_fatal: true
@@ -222,7 +222,7 @@
       was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
     when: openshift_certificates_redeploy_ca | default(false) | bool
 
-  - name: Prepare for node evacuation
+  - name: Prepare for node draining
     command: >
       {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
       manage-node {{ openshift.node.nodename }}
@@ -230,11 +230,11 @@
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
 
-  - name: Evacuate node
+  - name: Drain node
     command: >
       {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
       manage-node {{ openshift.node.nodename }}
-      --evacuate --force
+      --drain --force
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
 

+ 3 - 3
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -1,5 +1,5 @@
 ---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
   hosts: oo_nodes_to_upgrade
   # This var must be set with -e on invocation, as it is not a per-host inventory var
   # and is evaluated early. Values such as "20%" can also be used.
@@ -39,9 +39,9 @@
     retries: 3
     delay: 1
 
-  - name: Evacuate Node for Kubelet upgrade
+  - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --drain --force
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_upgrade
 

+ 1 - 1
roles/openshift_node/README.md

@@ -43,7 +43,7 @@ Currently we support re-labeling nodes but we don't re-schedule running pods nor
 
 ```
 oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --evacuate ${NODE}
+oadm manage-node --drain ${NODE}
 oadm manage-node --schedulable=true ${NODE}
 ````