upgrade.yml 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. ---
  2. ###############################################################################
  3. # The restart playbook should be run after this playbook completes.
  4. ###############################################################################
  5. ###############################################################################
  6. # Upgrade Masters
  7. ###############################################################################
  8. - name: Upgrade master
  9. hosts: oo_masters_to_config
  10. handlers:
  11. - include: ../../../../../roles/openshift_master/handlers/main.yml
  12. roles:
  13. - openshift_facts
  14. tasks:
  15. - include: rpm_upgrade.yml component=master
  16. when: not openshift.common.is_containerized | bool
  17. - include_vars: ../../../../../roles/openshift_master/vars/main.yml
  18. - name: Update systemd units
  19. include: ../../../../../roles/openshift_master/tasks/systemd_units.yml
  20. # - name: Upgrade master configuration
  21. # openshift_upgrade_config:
  22. # from_version: '3.1'
  23. # to_version: '3.2'
  24. # role: master
  25. # config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
  26. - name: Set master update status to complete
  27. hosts: oo_masters_to_config
  28. tasks:
  29. - set_fact:
  30. master_update_complete: True
  31. ##############################################################################
  32. # Gate on master update complete
  33. ##############################################################################
  34. - name: Gate on master update
  35. hosts: localhost
  36. connection: local
  37. become: no
  38. tasks:
  39. - set_fact:
  40. master_update_completed: "{{ hostvars
  41. | oo_select_keys(groups.oo_masters_to_config)
  42. | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
  43. - set_fact:
  44. master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
  45. - fail:
  46. msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
  47. when: master_update_failed | length > 0
  48. ###############################################################################
  49. # Upgrade Nodes
  50. ###############################################################################
  51. # Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
  52. - name: Perform upgrades that may require node evacuation
  53. hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
  54. serial: 1
  55. any_errors_fatal: true
  56. roles:
  57. - openshift_facts
  58. handlers:
  59. - include: ../../../../../roles/openshift_node/handlers/main.yml
  60. tasks:
  61. # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
  62. # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
  63. # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
  64. - name: Mark unschedulable if host is a node
  65. command: >
  66. {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
  67. delegate_to: "{{ groups.oo_first_master.0 }}"
  68. when: inventory_hostname in groups.oo_nodes_to_config
  69. - name: Evacuate Node for Kubelet upgrade
  70. command: >
  71. {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
  72. delegate_to: "{{ groups.oo_first_master.0 }}"
  73. when: inventory_hostname in groups.oo_nodes_to_config
  74. # Only check if docker upgrade is required if docker_upgrade is not
  75. # already set to False.
  76. - include: ../docker/upgrade_check.yml
  77. when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
  78. - include: ../docker/upgrade.yml
  79. when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
  80. - include: rpm_upgrade.yml
  81. vars:
  82. component: "node"
  83. openshift_version: "{{ openshift_pkg_version | default('') }}"
  84. when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
  85. - include: containerized_node_upgrade.yml
  86. when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
  87. - name: Set node schedulability
  88. command: >
  89. {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
  90. delegate_to: "{{ groups.oo_first_master.0 }}"
  91. when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
  92. ###############################################################################
  93. # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
  94. ###############################################################################
  95. - name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
  96. hosts: oo_masters_to_config
  97. roles:
  98. - { role: openshift_cli }
  99. vars:
  100. origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
  101. ent_reconcile_bindings: true
  102. openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
  103. tasks:
  104. - name: Verifying the correct commandline tools are available
  105. shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
  106. when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
  107. - name: Reconcile Cluster Roles
  108. command: >
  109. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  110. policy reconcile-cluster-roles --additive-only=true --confirm
  111. run_once: true
  112. - name: Reconcile Cluster Role Bindings
  113. command: >
  114. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  115. policy reconcile-cluster-role-bindings
  116. --exclude-groups=system:authenticated
  117. --exclude-groups=system:authenticated:oauth
  118. --exclude-groups=system:unauthenticated
  119. --exclude-users=system:anonymous
  120. --additive-only=true --confirm
  121. when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
  122. run_once: true
  123. - name: Reconcile Security Context Constraints
  124. command: >
  125. {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
  126. run_once: true
  127. - set_fact:
  128. reconcile_complete: True
  129. ##############################################################################
  130. # Gate on reconcile
  131. ##############################################################################
  132. - name: Gate on reconcile
  133. hosts: localhost
  134. connection: local
  135. become: no
  136. tasks:
  137. - set_fact:
  138. reconcile_completed: "{{ hostvars
  139. | oo_select_keys(groups.oo_masters_to_config)
  140. | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
  141. - set_fact:
  142. reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
  143. - fail:
  144. msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
  145. when: reconcile_failed | length > 0