123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104 |
- ---
- - import_playbook: ../../../../init/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- - import_playbook: ../initialize_nodes_to_upgrade.yml
- - import_playbook: ../../../../init/basic_facts.yml
- vars:
- l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_upgrade"
- - import_playbook: ../../../../init/cluster_facts.yml
- vars:
- l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_upgrade"
- # We need version for sanity_checks, but we don't need to actually check if
- # packages/images are available because we're not install any origin components.
- - import_playbook: ../../../../init/version.yml
- vars:
- l_openshift_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_upgrade:oo_masters_to_config:!oo_first_master"
- # Ensure inventory sanity_checks are run.
- - import_playbook: ../../../../init/sanity_checks.yml
- vars:
- l_sanity_check_hosts: "{{ groups['oo_nodes_to_upgrade'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
- - name: Check for appropriate Docker versions
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- roles:
- - openshift_facts
- tasks:
- - fail:
- msg: Cannot upgrade Docker on Atomic operating systems.
- when: openshift_is_atomic | bool
- - import_role:
- name: container_runtime
- tasks_from: docker_upgrade_check.yml
- when: docker_upgrade | default(True) | bool
- # If a node fails, halt everything, the admin will need to clean up and we
- # don't want to carry on, potentially taking out every node. The playbook can safely be re-run
- # and will not take any action on a node already running the requested docker version.
- - name: Drain and upgrade nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- serial: 1
- any_errors_fatal: true
- roles:
- - openshift_facts
- - lib_openshift
- tasks:
- - name: Mark node unschedulable
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: False
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_unschedulable
- until: node_unschedulable is succeeded
- when:
- - l_docker_upgrade is defined
- - l_docker_upgrade | bool
- - inventory_hostname in groups.oo_nodes_to_upgrade
- - name: Drain Node for Kubelet upgrade
- command: >
- {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
- --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- --force --delete-local-data --ignore-daemonsets
- --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when:
- - l_docker_upgrade is defined
- - l_docker_upgrade | bool
- - inventory_hostname in groups.oo_nodes_to_upgrade
- register: l_docker_upgrade_drain_result
- until: not (l_docker_upgrade_drain_result is failed)
- retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
- delay: 5
- failed_when:
- - l_docker_upgrade_drain_result is failed
- - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
- - import_tasks: tasks/upgrade.yml
- when:
- - l_docker_upgrade is defined
- - l_docker_upgrade | bool
- - name: Set node schedulability
- oc_adm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable is succeeded
- when: node_unschedulable is changed
|