upgrade_nodes.yml 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. ---
  2. - name: Prepull images and rpms before doing rolling restart
  3. hosts: oo_nodes_to_upgrade:!oo_masters_to_config
  4. roles:
  5. - role: openshift_facts
  6. tasks:
  7. - import_role:
  8. name: openshift_node
  9. tasks_from: upgrade_pre.yml
  10. - name: Drain and upgrade nodes
  11. hosts: oo_nodes_to_upgrade:!oo_masters_to_config
  12. # This var must be set with -e on invocation, as it is not a per-host inventory var
  13. # and is evaluated early. Values such as "20%" can also be used.
  14. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
  15. max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
  16. roles:
  17. - lib_openshift
  18. - openshift_facts
  19. pre_tasks:
  20. # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
  21. # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
  22. # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
  23. - name: Mark node unschedulable
  24. oc_adm_manage_node:
  25. node: "{{ openshift.node.nodename | lower }}"
  26. schedulable: False
  27. delegate_to: "{{ groups.oo_first_master.0 }}"
  28. retries: 10
  29. delay: 5
  30. register: node_unschedulable
  31. until: node_unschedulable is succeeded
  32. - name: Drain Node for Kubelet upgrade
  33. command: >
  34. {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
  35. --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  36. --force --delete-local-data --ignore-daemonsets
  37. --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
  38. delegate_to: "{{ groups.oo_first_master.0 }}"
  39. register: l_upgrade_nodes_drain_result
  40. until: not (l_upgrade_nodes_drain_result is failed)
  41. retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
  42. delay: 5
  43. failed_when:
  44. - l_upgrade_nodes_drain_result is failed
  45. - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
  46. # Run the pre-upgrade hook if defined:
  47. - debug: msg="Running node pre-upgrade hook {{ openshift_node_upgrade_pre_hook }}"
  48. when: openshift_node_upgrade_pre_hook is defined
  49. - include_tasks: "{{ openshift_node_upgrade_pre_hook }}"
  50. when: openshift_node_upgrade_pre_hook is defined
  51. post_tasks:
  52. - import_role:
  53. name: openshift_node
  54. tasks_from: upgrade.yml
  55. # Run the upgrade hook prior to make the node schedulable again.
  56. - debug: msg="Running node upgrade hook {{ openshift_node_upgrade_hook }}"
  57. when: openshift_node_upgrade_hook is defined
  58. - include_tasks: "{{ openshift_node_upgrade_hook }}"
  59. when: openshift_node_upgrade_hook is defined
  60. - import_role:
  61. name: openshift_manage_node
  62. tasks_from: config.yml
  63. vars:
  64. openshift_master_host: "{{ groups.oo_first_master.0 }}"
  65. # Run the post-upgrade hook if defined:
  66. - debug: msg="Running node post-upgrade hook {{ openshift_node_upgrade_post_hook }}"
  67. when: openshift_node_upgrade_post_hook is defined
  68. - include_tasks: "{{ openshift_node_upgrade_post_hook }}"
  69. when: openshift_node_upgrade_post_hook is defined
  70. - name: Re-enable excluders
  71. hosts: oo_nodes_to_upgrade:!oo_masters_to_config
  72. tasks:
  73. - import_role:
  74. name: openshift_excluder
  75. vars:
  76. r_openshift_excluder_action: enable