pre.yml 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343
  1. ---
  2. ###############################################################################
  3. # Evaluate host groups and gather facts
  4. ###############################################################################
  5. - include: ../../initialize_facts.yml
  6. - name: Update repos and initialize facts on all hosts
  7. hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
  8. roles:
  9. - openshift_repos
  10. - name: Set openshift_no_proxy_internal_hostnames
  11. hosts: oo_masters_to_config:oo_nodes_to_config
  12. tasks:
  13. - set_fact:
  14. openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
  15. | union(groups['oo_masters_to_config'])
  16. | union(groups['oo_etcd_to_config'] | default([])))
  17. | oo_collect('openshift.common.hostname') | default([]) | join (',')
  18. }}"
  19. when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
  20. openshift_generate_no_proxy_hosts | default(True) | bool }}"
  21. - name: Evaluate additional groups for upgrade
  22. hosts: localhost
  23. connection: local
  24. become: no
  25. tasks:
  26. - name: Evaluate etcd_hosts_to_backup
  27. add_host:
  28. name: "{{ item }}"
  29. groups: etcd_hosts_to_backup
  30. with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
  31. ###############################################################################
  32. # Pre-upgrade checks
  33. ###############################################################################
  34. - name: Verify upgrade can proceed on first master
  35. hosts: oo_first_master
  36. vars:
  37. target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
  38. g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
  39. gather_facts: no
  40. tasks:
  41. - fail:
  42. msg: >
  43. This upgrade is only supported for atomic-enterprise, origin, openshift-enterprise, and online
  44. deployment types
  45. when: deployment_type not in ['atomic-enterprise', 'origin','openshift-enterprise', 'online']
  46. - fail:
  47. msg: >
  48. This upgrade does not support Pacemaker:
  49. https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
  50. when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
  51. # Error out in situations where the user has older versions specified in their
  52. # inventory in any of the openshift_release, openshift_image_tag, and
  53. # openshift_pkg_version variables. These must be removed or updated to proceed
  54. # with upgrade.
  55. # TODO: Should we block if you're *over* the next major release version as well?
  56. - fail:
  57. msg: >
  58. openshift_pkg_version is {{ openshift_pkg_version }} which is not a
  59. valid version for a {{ target_version }} upgrade
  60. when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
  61. - fail:
  62. msg: >
  63. openshift_image_tag is {{ openshift_image_tag }} which is not a
  64. valid version for a {{ target_version }} upgrade
  65. when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
  66. - set_fact:
  67. openshift_release: "{{ openshift_release[1:] }}"
  68. when: openshift_release is defined and openshift_release[0] == 'v'
  69. - fail:
  70. msg: >
  71. openshift_release is {{ openshift_release }} which is not a
  72. valid release for a {{ target_version }} upgrade
  73. when: openshift_release is defined and not openshift_release | version_compare(target_version ,'=')
  74. - include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
  75. vars:
  76. # Request openshift_release 3.2 and let the openshift_version role handle converting this
  77. # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
  78. # defined, and overriding the normal behavior of protecting the installed version
  79. openshift_release: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
  80. openshift_protect_installed_version: False
  81. # Docker role (a dependency) should be told not to do anything to installed version
  82. # of docker, we handle this separately during upgrade. (the inventory may have a
  83. # docker_version defined, we don't want to actually do it until later)
  84. docker_protect_installed_version: True
  85. - name: Verify master processes
  86. hosts: oo_masters_to_config
  87. roles:
  88. - openshift_facts
  89. tasks:
  90. - openshift_facts:
  91. role: master
  92. local_facts:
  93. ha: "{{ groups.oo_masters_to_config | length > 1 }}"
  94. - name: Ensure Master is running
  95. service:
  96. name: "{{ openshift.common.service_type }}-master"
  97. state: started
  98. enabled: yes
  99. when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool
  100. - name: Ensure HA Master is running
  101. service:
  102. name: "{{ openshift.common.service_type }}-master-api"
  103. state: started
  104. enabled: yes
  105. when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
  106. - name: Ensure HA Master is running
  107. service:
  108. name: "{{ openshift.common.service_type }}-master-controllers"
  109. state: started
  110. enabled: yes
  111. when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
  112. - name: Verify node processes
  113. hosts: oo_nodes_to_config
  114. roles:
  115. - openshift_facts
  116. - openshift_docker_facts
  117. tasks:
  118. - name: Ensure Node is running
  119. service:
  120. name: "{{ openshift.common.service_type }}-node"
  121. state: started
  122. enabled: yes
  123. when: openshift.common.is_containerized | bool
  124. - name: Verify upgrade targets
  125. hosts: oo_masters_to_config:oo_nodes_to_config
  126. vars:
  127. target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
  128. openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
  129. pre_tasks:
  130. - fail:
  131. msg: Verify OpenShift is already installed
  132. when: openshift.common.version is not defined
  133. - fail:
  134. msg: Verify the correct version was found
  135. when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
  136. - name: Clean package cache
  137. command: "{{ ansible_pkg_mgr }} clean all"
  138. when: not openshift.common.is_atomic | bool
  139. - set_fact:
  140. g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
  141. when: not openshift.common.is_containerized | bool
  142. - name: Verify containers are available for upgrade
  143. command: >
  144. docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
  145. register: pull_result
  146. changed_when: "'Downloaded newer image' in pull_result.stdout"
  147. when: openshift.common.is_containerized | bool
  148. - name: Check latest available OpenShift RPM version
  149. command: >
  150. {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
  151. failed_when: false
  152. changed_when: false
  153. register: avail_openshift_version
  154. when: not openshift.common.is_containerized | bool
  155. - name: Verify OpenShift 3.2 RPMs are available for upgrade
  156. fail:
  157. msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 3.2 or greater is required"
  158. when: deployment_type != 'origin' and not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
  159. - name: Verify Origin 1.2 RPMs are available for upgrade
  160. fail:
  161. msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 1.2 or greater is required"
  162. when: deployment_type == 'origin' and not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
  163. # TODO: Are these two grep checks necessary anymore?
  164. # Note: the version number is hardcoded here in hopes of catching potential
  165. # bugs in how g_aos_versions.curr_version is set
  166. - name: Verifying the correct version is installed for upgrade
  167. shell: grep 3.1.1.6 {{ item }}
  168. with_items:
  169. - /etc/sysconfig/openvswitch
  170. - /etc/sysconfig/{{ openshift.common.service_type }}*
  171. when: verify_upgrade_version is defined
  172. - name: Verifying the image version is used in the systemd unit
  173. shell: grep IMAGE_VERSION {{ item }}
  174. with_items:
  175. - /etc/systemd/system/openvswitch.service
  176. - /etc/systemd/system/{{ openshift.common.service_type }}*.service
  177. when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
  178. - fail:
  179. msg: This upgrade playbook must be run on Origin 1.1 or later
  180. when: deployment_type == 'origin' and openshift.common.version | version_compare('1.1','<')
  181. - fail:
  182. msg: This upgrade playbook must be run on OpenShift Enterprise 3.1 or later
  183. when: deployment_type == 'atomic-openshift' and openshift.common.version | version_compare('3.1','<')
  184. - name: Verify docker upgrade targets
  185. hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
  186. tasks:
  187. - name: Determine available Docker
  188. script: ../files/rpm_versions.sh docker
  189. register: g_docker_version_result
  190. when: not openshift.common.is_atomic | bool
  191. - name: Determine available Docker
  192. shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
  193. register: g_atomic_docker_version_result
  194. when: openshift.common.is_atomic | bool
  195. - set_fact:
  196. g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
  197. when: not openshift.common.is_atomic | bool
  198. - set_fact:
  199. g_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
  200. when: openshift.common.is_atomic | bool
  201. - fail:
  202. msg: This playbook requires access to Docker 1.10 or later
  203. when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
  204. # TODO: add check to upgrade ostree to get latest Docker
  205. - set_fact:
  206. pre_upgrade_complete: True
  207. ##############################################################################
  208. # Gate on pre-upgrade checks
  209. ##############################################################################
  210. - name: Gate on pre-upgrade checks
  211. hosts: localhost
  212. connection: local
  213. become: no
  214. vars:
  215. pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
  216. tasks:
  217. - set_fact:
  218. pre_upgrade_completed: "{{ hostvars
  219. | oo_select_keys(pre_upgrade_hosts)
  220. | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
  221. - set_fact:
  222. pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
  223. - fail:
  224. msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
  225. when: pre_upgrade_failed | length > 0
  226. ###############################################################################
  227. # Backup etcd
  228. ###############################################################################
  229. - name: Backup etcd
  230. hosts: etcd_hosts_to_backup
  231. vars:
  232. embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
  233. timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  234. roles:
  235. - openshift_facts
  236. tasks:
  237. # Ensure we persist the etcd role for this host in openshift_facts
  238. - openshift_facts:
  239. role: etcd
  240. local_facts: {}
  241. when: "'etcd' not in openshift"
  242. - stat: path=/var/lib/openshift
  243. register: var_lib_openshift
  244. - stat: path=/var/lib/origin
  245. register: var_lib_origin
  246. - name: Create origin symlink if necessary
  247. file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
  248. when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
  249. # TODO: replace shell module with command and update later checks
  250. # We assume to be using the data dir for all backups.
  251. - name: Check available disk space for etcd backup
  252. shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
  253. register: avail_disk
  254. # TODO: replace shell module with command and update later checks
  255. - name: Check current embedded etcd disk usage
  256. shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
  257. register: etcd_disk_usage
  258. when: embedded_etcd | bool
  259. - name: Abort if insufficient disk space for etcd backup
  260. fail:
  261. msg: >
  262. {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
  263. {{ avail_disk.stdout }} Kb available.
  264. when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  265. - name: Install etcd (for etcdctl)
  266. action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
  267. when: not openshift.common.is_atomic | bool
  268. - name: Generate etcd backup
  269. command: >
  270. etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
  271. --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  272. - set_fact:
  273. etcd_backup_complete: True
  274. - name: Display location of etcd backup
  275. debug:
  276. msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
  277. ##############################################################################
  278. # Gate on etcd backup
  279. ##############################################################################
  280. - name: Gate on etcd backup
  281. hosts: localhost
  282. connection: local
  283. become: no
  284. tasks:
  285. - set_fact:
  286. etcd_backup_completed: "{{ hostvars
  287. | oo_select_keys(groups.etcd_hosts_to_backup)
  288. | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
  289. - set_fact:
  290. etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
  291. - fail:
  292. msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
  293. when: etcd_backup_failed | length > 0