upgrade.yml 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. ---
  2. ###############################################################################
  3. # The restart playbook should be run after this playbook completes.
  4. ###############################################################################
  5. # Separate step so we can execute in parallel and clear out anything unused
  6. # before we get into the serialized upgrade process which will then remove
  7. # remaining images if possible.
  8. - name: Cleanup unused Docker images
  9. hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
  10. tasks:
  11. - name: Check Docker image count
  12. shell: "docker images -aq | wc -l"
  13. register: docker_image_count
  14. when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
  15. - debug: var=docker_image_count.stdout
  16. when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
  17. - name: Remove unused Docker images for Docker 1.10+ migration
  18. shell: "docker rmi `docker images -aq`"
  19. # Will fail on images still in use:
  20. failed_when: false
  21. when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
  22. - name: Check Docker image count
  23. shell: "docker images -aq | wc -l"
  24. register: docker_image_count
  25. when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
  26. - debug: var=docker_image_count.stdout
  27. when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
  28. ###############################################################################
  29. # Upgrade Masters
  30. ###############################################################################
  31. - name: Upgrade master packages
  32. hosts: oo_masters_to_config
  33. handlers:
  34. - include: ../../../../roles/openshift_master/handlers/main.yml
  35. static: yes
  36. roles:
  37. - openshift_facts
  38. tasks:
  39. - include: rpm_upgrade.yml component=master
  40. when: not openshift.common.is_containerized | bool
  41. - name: Determine if service signer cert must be created
  42. hosts: oo_first_master
  43. tasks:
  44. - name: Determine if service signer certificate must be created
  45. stat:
  46. path: "{{ openshift.common.config_base }}/master/service-signer.crt"
  47. register: service_signer_cert_stat
  48. changed_when: false
  49. # Create service signer cert when missing. Service signer certificate
  50. # is added to master config in the master config hook for v3_3.
  51. - include: create_service_signer_cert.yml
  52. when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool)
  53. - name: Upgrade master config and systemd units
  54. hosts: oo_masters_to_config
  55. handlers:
  56. - include: ../../../../roles/openshift_master/handlers/main.yml
  57. static: yes
  58. roles:
  59. - openshift_facts
  60. tasks:
  61. - include: "{{ master_config_hook }}"
  62. when: master_config_hook is defined
  63. - include_vars: ../../../../roles/openshift_master/vars/main.yml
  64. - name: Update systemd units
  65. include: ../../../../roles/openshift_master/tasks/systemd_units.yml
  66. # - name: Upgrade master configuration
  67. # openshift_upgrade_config:
  68. # from_version: '3.1'
  69. # to_version: '3.2'
  70. # role: master
  71. # config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
  72. - name: Check for ca-bundle.crt
  73. stat:
  74. path: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
  75. register: ca_bundle_stat
  76. failed_when: false
  77. - name: Check for ca.crt
  78. stat:
  79. path: "{{ openshift.common.config_base }}/master/ca.crt"
  80. register: ca_crt_stat
  81. failed_when: false
  82. - name: Migrate ca.crt to ca-bundle.crt
  83. command: mv ca.crt ca-bundle.crt
  84. args:
  85. chdir: "{{ openshift.common.config_base }}/master"
  86. when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
  87. - name: Link ca.crt to ca-bundle.crt
  88. file:
  89. src: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
  90. path: "{{ openshift.common.config_base }}/master/ca.crt"
  91. state: link
  92. when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
  93. - name: Set master update status to complete
  94. hosts: oo_masters_to_config
  95. tasks:
  96. - set_fact:
  97. master_update_complete: True
  98. ##############################################################################
  99. # Gate on master update complete
  100. ##############################################################################
  101. - name: Gate on master update
  102. hosts: localhost
  103. connection: local
  104. become: no
  105. tasks:
  106. - set_fact:
  107. master_update_completed: "{{ hostvars
  108. | oo_select_keys(groups.oo_masters_to_config)
  109. | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
  110. - set_fact:
  111. master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
  112. - fail:
  113. msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
  114. when: master_update_failed | length > 0
  115. ###############################################################################
  116. # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
  117. ###############################################################################
  118. - name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
  119. hosts: oo_masters_to_config
  120. roles:
  121. - { role: openshift_cli }
  122. vars:
  123. origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
  124. ent_reconcile_bindings: true
  125. openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
  126. # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role,
  127. # it will be updated when we perform node upgrade.
  128. docker_protect_installed_version: True
  129. tasks:
  130. - name: Verifying the correct commandline tools are available
  131. shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
  132. when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
  133. - name: Reconcile Cluster Roles
  134. command: >
  135. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  136. policy reconcile-cluster-roles --additive-only=true --confirm
  137. run_once: true
  138. - name: Reconcile Cluster Role Bindings
  139. command: >
  140. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  141. policy reconcile-cluster-role-bindings
  142. --exclude-groups=system:authenticated
  143. --exclude-groups=system:authenticated:oauth
  144. --exclude-groups=system:unauthenticated
  145. --exclude-users=system:anonymous
  146. --additive-only=true --confirm
  147. when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
  148. run_once: true
  149. - name: Reconcile Security Context Constraints
  150. command: >
  151. {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
  152. run_once: true
  153. - set_fact:
  154. reconcile_complete: True
  155. ###############################################################################
  156. # Upgrade Nodes
  157. ###############################################################################
  158. # Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
  159. - name: Perform upgrades that may require node evacuation
  160. hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
  161. serial: 1
  162. any_errors_fatal: true
  163. roles:
  164. - openshift_facts
  165. handlers:
  166. - include: ../../../../roles/openshift_node/handlers/main.yml
  167. static: yes
  168. tasks:
  169. # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
  170. # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
  171. # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
  172. - name: Determine if node is currently scheduleable
  173. command: >
  174. {{ openshift.common.client_binary }} get node {{ openshift.node.nodename }} -o json
  175. register: node_output
  176. delegate_to: "{{ groups.oo_first_master.0 }}"
  177. changed_when: false
  178. when: inventory_hostname in groups.oo_nodes_to_config
  179. - set_fact:
  180. was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
  181. when: inventory_hostname in groups.oo_nodes_to_config
  182. - name: Mark unschedulable if host is a node
  183. command: >
  184. {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
  185. delegate_to: "{{ groups.oo_first_master.0 }}"
  186. when: inventory_hostname in groups.oo_nodes_to_config
  187. - name: Evacuate Node for Kubelet upgrade
  188. command: >
  189. {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
  190. delegate_to: "{{ groups.oo_first_master.0 }}"
  191. when: inventory_hostname in groups.oo_nodes_to_config
  192. - include: docker/upgrade.yml
  193. when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
  194. - include: "{{ node_config_hook }}"
  195. when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config
  196. - include: rpm_upgrade.yml
  197. vars:
  198. component: "node"
  199. openshift_version: "{{ openshift_pkg_version | default('') }}"
  200. when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
  201. - include: containerized_node_upgrade.yml
  202. when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
  203. - meta: flush_handlers
  204. - name: Set node schedulability
  205. command: >
  206. {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
  207. delegate_to: "{{ groups.oo_first_master.0 }}"
  208. when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool
  209. ##############################################################################
  210. # Gate on reconcile
  211. ##############################################################################
  212. - name: Gate on reconcile
  213. hosts: localhost
  214. connection: local
  215. become: no
  216. tasks:
  217. - set_fact:
  218. reconcile_completed: "{{ hostvars
  219. | oo_select_keys(groups.oo_masters_to_config)
  220. | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
  221. - set_fact:
  222. reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
  223. - fail:
  224. msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
  225. when: reconcile_failed | length > 0