upgrade_scale_group.yml 2.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768
  1. ---
  2. - name: create new scale group
  3. hosts: localhost
  4. tasks:
  5. - name: build upgrade scale groups
  6. import_role:
  7. name: openshift_aws
  8. tasks_from: upgrade_node_group.yml
  9. - fail:
  10. msg: "Ensure that new scale groups were provisioned before proceeding to update."
  11. when:
  12. - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
  13. - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
  14. - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
  15. - name: initialize upgrade bits
  16. import_playbook: init.yml
  17. vars:
  18. l_upgrade_cert_check_hosts: "oo_masters_to_config:oo_etcd_to_config"
  19. - name: unschedule nodes
  20. hosts: oo_sg_current_nodes
  21. tasks:
  22. - name: Load lib_openshift modules
  23. import_role:
  24. name: ../roles/lib_openshift
  25. - name: Mark node unschedulable
  26. oc_adm_manage_node:
  27. node: "{{ openshift.node.nodename | lower }}"
  28. schedulable: False
  29. delegate_to: "{{ groups.oo_first_master.0 }}"
  30. retries: 10
  31. delay: 5
  32. register: node_unschedulable
  33. until: node_unschedulable is succeeded
  34. - name: Drain nodes
  35. hosts: oo_sg_current_nodes
  36. # This var must be set with -e on invocation, as it is not a per-host inventory var
  37. # and is evaluated early. Values such as "20%" can also be used.
  38. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
  39. max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
  40. tasks:
  41. - name: Drain Node for Kubelet upgrade
  42. command: >
  43. {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
  44. --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  45. --force --delete-local-data --ignore-daemonsets
  46. --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
  47. delegate_to: "{{ groups.oo_first_master.0 }}"
  48. register: l_upgrade_nodes_drain_result
  49. until: not (l_upgrade_nodes_drain_result is failed)
  50. retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
  51. delay: 5
  52. failed_when:
  53. - l_upgrade_nodes_drain_result is failed
  54. - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
  55. # Alright, let's clean up!
  56. - name: clean up the old scale group
  57. hosts: localhost
  58. tasks:
  59. - name: clean up scale group
  60. import_role:
  61. name: openshift_aws
  62. tasks_from: remove_scale_group.yml