pre.yml 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. ---
  2. ###############################################################################
  3. # Evaluate host groups and gather facts
  4. ###############################################################################
  5. - name: Load openshift_facts and update repos
  6. hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
  7. roles:
  8. - openshift_facts
  9. - openshift_repos
  10. - name: Evaluate additional groups for upgrade
  11. hosts: localhost
  12. connection: local
  13. become: no
  14. tasks:
  15. - name: Evaluate etcd_hosts_to_backup
  16. add_host:
  17. name: "{{ item }}"
  18. groups: etcd_hosts_to_backup
  19. with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
  20. ###############################################################################
  21. # Pre-upgrade checks
  22. ###############################################################################
  23. - name: Verify upgrade can proceed
  24. hosts: oo_first_master
  25. vars:
  26. target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
  27. g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
  28. gather_facts: no
  29. tasks:
  30. - fail:
  31. msg: >
  32. This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
  33. deployment types
  34. when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
  35. - fail:
  36. msg: >
  37. This upgrade does not support Pacemaker:
  38. https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
  39. when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
  40. - fail:
  41. msg: >
  42. openshift_pkg_version is {{ openshift_pkg_version }} which is not a
  43. valid version for a {{ target_version }} upgrade
  44. when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
  45. - fail:
  46. msg: >
  47. openshift_image_tag is {{ openshift_image_tag }} which is not a
  48. valid version for a {{ target_version }} upgrade
  49. when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
  50. - name: Verify upgrade can proceed
  51. hosts: oo_masters_to_config
  52. roles:
  53. - openshift_facts
  54. tasks:
  55. - openshift_facts:
  56. role: master
  57. local_facts:
  58. ha: "{{ groups.oo_masters_to_config | length > 1 }}"
  59. - name: Ensure Master is running
  60. service:
  61. name: "{{ openshift.common.service_type }}-master"
  62. state: started
  63. enabled: yes
  64. when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
  65. - name: Ensure HA Master is running
  66. service:
  67. name: "{{ openshift.common.service_type }}-master-api"
  68. state: started
  69. enabled: yes
  70. when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
  71. - name: Ensure HA Master is running
  72. service:
  73. name: "{{ openshift.common.service_type }}-master-controllers"
  74. state: started
  75. enabled: yes
  76. when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
  77. - name: Verify upgrade can proceed
  78. hosts: oo_nodes_to_config
  79. roles:
  80. - openshift_facts
  81. tasks:
  82. - name: Ensure Node is running
  83. service:
  84. name: "{{ openshift.common.service_type }}-node"
  85. state: started
  86. enabled: yes
  87. when: openshift.common.is_containerized | bool
  88. - name: Verify upgrade can proceed
  89. hosts: oo_masters_to_config:oo_nodes_to_config
  90. vars:
  91. target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
  92. openshift_docker_hosted_registry_insecure: True
  93. openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
  94. handlers:
  95. - include: ../../../../../roles/openshift_master/handlers/main.yml
  96. - include: ../../../../../roles/openshift_node/handlers/main.yml
  97. roles:
  98. # We want the cli role to evaluate so that the containerized oc/oadm wrappers
  99. # are modified to use the correct image tag. However, this can trigger a
  100. # docker restart if new configuration is laid down which would immediately
  101. # pull the latest image and defeat the purpose of these tasks.
  102. - openshift_cli
  103. pre_tasks:
  104. - name: Clean package cache
  105. command: "{{ ansible_pkg_mgr }} clean all"
  106. when: not openshift.common.is_atomic | bool
  107. - set_fact:
  108. g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
  109. when: not openshift.common.is_containerized | bool
  110. - name: Determine available versions
  111. script: ../files/rpm_versions.sh {{ g_new_service_name }}
  112. register: g_rpm_versions_result
  113. when: not openshift.common.is_containerized | bool
  114. - set_fact:
  115. g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
  116. when: not openshift.common.is_containerized | bool
  117. - name: Determine available versions
  118. script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
  119. register: g_containerized_versions_result
  120. when: openshift.common.is_containerized | bool
  121. - set_fact:
  122. g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
  123. when: openshift.common.is_containerized | bool
  124. - set_fact:
  125. g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
  126. when: openshift_pkg_version is not defined
  127. - set_fact:
  128. g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
  129. when: openshift_pkg_version is defined
  130. - set_fact:
  131. g_new_version: "{{ openshift_image_tag | replace('v','') }}"
  132. when: openshift_image_tag is defined
  133. - fail:
  134. msg: Verifying the correct version was found
  135. when: g_aos_versions.curr_version == ""
  136. - fail:
  137. msg: Verifying the correct version was found
  138. when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
  139. - include_vars: ../../../../../roles/openshift_master/vars/main.yml
  140. when: inventory_hostname in groups.oo_masters_to_config
  141. - name: Update systemd units
  142. include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
  143. when: inventory_hostname in groups.oo_masters_to_config
  144. - include_vars: ../../../../../roles/openshift_node/vars/main.yml
  145. when: inventory_hostname in groups.oo_nodes_to_config
  146. - name: Update systemd units
  147. include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
  148. when: inventory_hostname in groups.oo_nodes_to_config
  149. # Note: the version number is hardcoded here in hopes of catching potential
  150. # bugs in how g_aos_versions.curr_version is set
  151. - name: Verifying the correct version is installed for upgrade
  152. shell: grep 3.1.1.6 {{ item }}
  153. with_items:
  154. - /etc/sysconfig/openvswitch
  155. - /etc/sysconfig/{{ openshift.common.service_type }}*
  156. when: verify_upgrade_version is defined
  157. - name: Verifying the image version is used in the systemd unit
  158. shell: grep IMAGE_VERSION {{ item }}
  159. with_items:
  160. - /etc/systemd/system/openvswitch.service
  161. - /etc/systemd/system/{{ openshift.common.service_type }}*.service
  162. when: openshift.common.is_containerized | bool
  163. - fail:
  164. msg: This playbook requires Origin 1.1 or later
  165. when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
  166. - fail:
  167. msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
  168. when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
  169. - fail:
  170. msg: Upgrade packages not found
  171. when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
  172. - name: Determine available Docker
  173. script: ../files/rpm_versions.sh docker
  174. register: g_docker_version_result
  175. when: not openshift.common.is_atomic | bool
  176. - name: Determine available Docker
  177. shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
  178. register: g_atomic_docker_version_result
  179. when: openshift.common.is_atomic | bool
  180. - set_fact:
  181. g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
  182. when: not openshift.common.is_atomic | bool
  183. - set_fact:
  184. g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
  185. when: openshift.common.is_atomic | bool
  186. - fail:
  187. msg: This playbook requires access to Docker 1.9 or later
  188. when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')
  189. # TODO: add check to upgrade ostree to get latest Docker
  190. - set_fact:
  191. pre_upgrade_complete: True
  192. ##############################################################################
  193. # Gate on pre-upgrade checks
  194. ##############################################################################
  195. - name: Gate on pre-upgrade checks
  196. hosts: localhost
  197. connection: local
  198. become: no
  199. vars:
  200. pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
  201. tasks:
  202. - set_fact:
  203. pre_upgrade_completed: "{{ hostvars
  204. | oo_select_keys(pre_upgrade_hosts)
  205. | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
  206. - set_fact:
  207. pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
  208. - fail:
  209. msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
  210. when: pre_upgrade_failed | length > 0
  211. ###############################################################################
  212. # Backup etcd
  213. ###############################################################################
  214. - name: Backup etcd
  215. hosts: etcd_hosts_to_backup
  216. vars:
  217. embedded_etcd: "{{ openshift.master.embedded_etcd }}"
  218. timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  219. roles:
  220. - openshift_facts
  221. tasks:
  222. # Ensure we persist the etcd role for this host in openshift_facts
  223. - openshift_facts:
  224. role: etcd
  225. local_facts: {}
  226. when: "'etcd' not in openshift"
  227. - stat: path=/var/lib/openshift
  228. register: var_lib_openshift
  229. - stat: path=/var/lib/origin
  230. register: var_lib_origin
  231. - name: Create origin symlink if necessary
  232. file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
  233. when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
  234. # TODO: replace shell module with command and update later checks
  235. # We assume to be using the data dir for all backups.
  236. - name: Check available disk space for etcd backup
  237. shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
  238. register: avail_disk
  239. # TODO: replace shell module with command and update later checks
  240. - name: Check current embedded etcd disk usage
  241. shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
  242. register: etcd_disk_usage
  243. when: embedded_etcd | bool
  244. - name: Abort if insufficient disk space for etcd backup
  245. fail:
  246. msg: >
  247. {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
  248. {{ avail_disk.stdout }} Kb available.
  249. when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  250. - name: Install etcd (for etcdctl)
  251. action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
  252. when: not openshift.common.is_atomic | bool
  253. - name: Generate etcd backup
  254. command: >
  255. etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
  256. --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  257. - set_fact:
  258. etcd_backup_complete: True
  259. - name: Display location of etcd backup
  260. debug:
  261. msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
  262. ##############################################################################
  263. # Gate on etcd backup
  264. ##############################################################################
  265. - name: Gate on etcd backup
  266. hosts: localhost
  267. connection: local
  268. become: no
  269. tasks:
  270. - set_fact:
  271. etcd_backup_completed: "{{ hostvars
  272. | oo_select_keys(groups.etcd_hosts_to_backup)
  273. | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
  274. - set_fact:
  275. etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
  276. - fail:
  277. msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
  278. when: etcd_backup_failed | length > 0