docker_upgrade.yml 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. ---
  2. - import_playbook: ../../../../init/evaluate_groups.yml
  3. vars:
  4. # Do not allow adding hosts during upgrade.
  5. g_new_master_hosts: []
  6. g_new_node_hosts: []
  7. - import_playbook: ../initialize_nodes_to_upgrade.yml
  8. - import_playbook: ../../../../init/basic_facts.yml
  9. vars:
  10. l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_upgrade"
  11. - import_playbook: ../../../../init/cluster_facts.yml
  12. vars:
  13. l_init_fact_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config:oo_nodes_to_upgrade"
  14. # We need version for sanity_checks, but we don't need to actually check if
  15. # packages/images are available because we're not install any origin components.
  16. - import_playbook: ../../../../init/version.yml
  17. vars:
  18. l_openshift_version_set_hosts: "oo_etcd_to_config:oo_nodes_to_upgrade:oo_masters_to_config:!oo_first_master"
  19. # Ensure inventory sanity_checks are run.
  20. - import_playbook: ../../../../init/sanity_checks.yml
  21. vars:
  22. l_sanity_check_hosts: "{{ groups['oo_nodes_to_upgrade'] | union(groups['oo_masters_to_config']) | union(groups['oo_etcd_to_config']) }}"
  23. - name: Check for appropriate Docker versions
  24. hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
  25. roles:
  26. - openshift_facts
  27. tasks:
  28. - import_role:
  29. name: container_runtime
  30. tasks_from: docker_upgrade_check.yml
  31. when: docker_upgrade | default(True) | bool
  32. # If a node fails, halt everything, the admin will need to clean up and we
  33. # don't want to carry on, potentially taking out every node. The playbook can safely be re-run
  34. # and will not take any action on a node already running the requested docker version.
  35. - name: Drain and upgrade nodes
  36. hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
  37. serial: 1
  38. any_errors_fatal: true
  39. roles:
  40. - openshift_facts
  41. - lib_openshift
  42. tasks:
  43. - name: Mark node unschedulable
  44. oc_adm_manage_node:
  45. node: "{{ l_kubelet_node_name | lower }}"
  46. schedulable: False
  47. delegate_to: "{{ groups.oo_first_master.0 }}"
  48. retries: 10
  49. delay: 5
  50. register: node_unschedulable
  51. until: node_unschedulable is succeeded
  52. when:
  53. - l_docker_upgrade is defined
  54. - l_docker_upgrade | bool
  55. - inventory_hostname in groups.oo_nodes_to_upgrade
  56. - name: Drain Node for Kubelet upgrade
  57. command: >
  58. {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ l_kubelet_node_name | lower }}
  59. --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  60. --force --delete-local-data --ignore-daemonsets
  61. --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
  62. delegate_to: "{{ groups.oo_first_master.0 }}"
  63. when:
  64. - l_docker_upgrade is defined
  65. - l_docker_upgrade | bool
  66. - inventory_hostname in groups.oo_nodes_to_upgrade
  67. register: l_docker_upgrade_drain_result
  68. until: not (l_docker_upgrade_drain_result is failed)
  69. retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
  70. delay: 5
  71. failed_when:
  72. - l_docker_upgrade_drain_result is failed
  73. - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
  74. - import_tasks: tasks/upgrade.yml
  75. when:
  76. - l_docker_upgrade is defined
  77. - l_docker_upgrade | bool
  78. - name: Set node schedulability
  79. oc_adm_manage_node:
  80. node: "{{ l_kubelet_node_name | lower }}"
  81. schedulable: True
  82. delegate_to: "{{ groups.oo_first_master.0 }}"
  83. retries: 10
  84. delay: 5
  85. register: node_schedulable
  86. until: node_schedulable is succeeded
  87. when: node_unschedulable is changed