upgrade_scale_group.yml 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859
  1. ---
  2. - name: create new scale group
  3. hosts: localhost
  4. tasks:
  5. - name: build upgrade scale groups
  6. include_role:
  7. name: openshift_aws
  8. tasks_from: upgrade_node_group.yml
  9. - fail:
  10. msg: "Ensure that new scale groups were provisioned before proceeding to update."
  11. when:
  12. - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
  13. - name: initialize upgrade bits
  14. include: init.yml
  15. - name: Drain and upgrade nodes
  16. hosts: oo_sg_current_nodes
  17. # This var must be set with -e on invocation, as it is not a per-host inventory var
  18. # and is evaluated early. Values such as "20%" can also be used.
  19. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
  20. max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
  21. pre_tasks:
  22. - name: Load lib_openshift modules
  23. include_role:
  24. name: ../roles/lib_openshift
  25. # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
  26. # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
  27. # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
  28. - name: Mark node unschedulable
  29. oc_adm_manage_node:
  30. node: "{{ openshift.node.nodename | lower }}"
  31. schedulable: False
  32. delegate_to: "{{ groups.oo_first_master.0 }}"
  33. retries: 10
  34. delay: 5
  35. register: node_unschedulable
  36. until: node_unschedulable|succeeded
  37. - name: Drain Node for Kubelet upgrade
  38. command: >
  39. {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
  40. delegate_to: "{{ groups.oo_first_master.0 }}"
  41. register: l_upgrade_nodes_drain_result
  42. until: not l_upgrade_nodes_drain_result | failed
  43. retries: 60
  44. delay: 60
  45. # Alright, let's clean up!
  46. - name: clean up the old scale group
  47. hosts: localhost
  48. tasks:
  49. - name: clean up scale group
  50. include_role:
  51. name: openshift_aws
  52. tasks_from: remove_scale_group.yml