upgrade.yml 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. ---
  2. - name: Update deployment type
  3. hosts: OSEv3
  4. roles:
  5. - openshift_facts
  6. post_tasks: # technically tasks are run after roles, but post_tasks is a bit more explicit.
  7. - openshift_facts:
  8. role: common
  9. local_facts:
  10. deployment_type: "{{ deployment_type }}"
  11. - name: Verify upgrade can proceed
  12. hosts: masters
  13. tasks:
  14. # Checking the global deployment type rather than host facts, this is about
  15. # what the user is requesting.
  16. - fail: msg="Deployment type enterprise not supported for upgrade"
  17. when: deployment_type == "enterprise"
  18. - name: Backup etcd
  19. hosts: masters
  20. vars:
  21. embedded_etcd: "{{ openshift.master.embedded_etcd }}"
  22. timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  23. roles:
  24. - openshift_facts
  25. tasks:
  26. - stat: path=/var/lib/openshift
  27. register: var_lib_openshift
  28. - name: Create origin symlink if necessary
  29. file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
  30. when: var_lib_openshift.stat.exists == True
  31. - name: Check available disk space for etcd backup
  32. # We assume to be using the data dir for all backups.
  33. shell: >
  34. df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
  35. register: avail_disk
  36. - name: Check current embedded etcd disk usage
  37. shell: >
  38. du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
  39. register: etcd_disk_usage
  40. when: embedded_etcd | bool
  41. - name: Abort if insufficient disk space for etcd backup
  42. fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available."
  43. when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  44. - name: Install etcd (for etcdctl)
  45. yum: pkg=etcd state=latest
  46. - name: Generate etcd backup
  47. command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  48. - name: Display location of etcd backup
  49. debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
  50. - name: Upgrade base package on masters
  51. hosts: masters
  52. roles:
  53. - openshift_facts
  54. vars:
  55. openshift_version: "{{ openshift_pkg_version | default('') }}"
  56. tasks:
  57. - name: Upgrade base package
  58. yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
  59. - name: Evaluate oo_first_master
  60. hosts: localhost
  61. vars:
  62. g_masters_group: "{{ 'masters' }}"
  63. tasks:
  64. - name: Evaluate oo_first_master
  65. add_host:
  66. name: "{{ groups[g_masters_group][0] }}"
  67. groups: oo_first_master
  68. ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
  69. ansible_sudo: "{{ g_sudo | default(omit) }}"
  70. when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
  71. # TODO: ideally we would check the new version, without installing it. (some
  72. # kind of yum repoquery? would need to handle openshift -> atomic-openshift
  73. # package rename)
  74. - name: Perform upgrade version checking
  75. hosts: oo_first_master
  76. tasks:
  77. - name: Determine new version
  78. command: >
  79. rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
  80. register: _new_version
  81. - name: Ensure AOS 3.0.2 or Origin 1.0.6
  82. hosts: oo_first_master
  83. tasks:
  84. fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
  85. when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
  86. - name: Verify upgrade can proceed
  87. hosts: oo_first_master
  88. tasks:
  89. # Checking the global deployment type rather than host facts, this is about
  90. # what the user is requesting.
  91. - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed"
  92. when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>='))
  93. - name: Upgrade masters
  94. hosts: masters
  95. vars:
  96. openshift_version: "{{ openshift_pkg_version | default('') }}"
  97. tasks:
  98. - name: Upgrade to latest available kernel
  99. yum: pkg=kernel state=latest
  100. - name: display just the deployment_type variable for the current host
  101. debug:
  102. var: hostvars[inventory_hostname]
  103. - name: Upgrade master packages
  104. command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
  105. - name: Upgrade master configuration.
  106. openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master
  107. - name: Restart master services
  108. service: name="{{ openshift.common.service_type}}-master" state=restarted
  109. - name: Upgrade nodes
  110. hosts: nodes
  111. vars:
  112. openshift_version: "{{ openshift_pkg_version | default('') }}"
  113. roles:
  114. - openshift_facts
  115. tasks:
  116. - name: Upgrade node packages
  117. command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
  118. - name: Restart node services
  119. service: name="{{ openshift.common.service_type }}-node" state=restarted
  120. - name: Update cluster policy
  121. hosts: oo_first_master
  122. tasks:
  123. - name: oadm policy reconcile-cluster-roles --confirm
  124. command: >
  125. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  126. policy reconcile-cluster-roles --confirm
  127. - name: Update cluster policy bindings
  128. hosts: oo_first_master
  129. tasks:
  130. - name: oadm policy reconcile-cluster-role-bindings --confirm
  131. command: >
  132. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  133. policy reconcile-cluster-role-bindings
  134. --exclude-groups=system:authenticated
  135. --exclude-groups=system:unauthenticated
  136. --exclude-users=system:anonymous
  137. --additive-only=true --confirm
  138. when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
  139. - name: Upgrade default router
  140. hosts: oo_first_master
  141. vars:
  142. - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
  143. - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  144. tasks:
  145. - name: Check for default router
  146. command: >
  147. {{ oc_cmd }} get -n default dc/router
  148. register: _default_router
  149. failed_when: false
  150. changed_when: false
  151. - name: Check for allowHostNetwork and allowHostPorts
  152. when: _default_router.rc == 0
  153. shell: >
  154. {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
  155. register: _scc
  156. - name: Grant allowHostNetwork and allowHostPorts
  157. when:
  158. - _default_router.rc == 0
  159. - "'false' in _scc.stdout"
  160. command: >
  161. {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
  162. - name: Update deployment config to 1.0.4/3.0.1 spec
  163. when: _default_router.rc == 0
  164. command: >
  165. {{ oc_cmd }} patch dc/router -p
  166. '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
  167. - name: Switch to hostNetwork=true
  168. when: _default_router.rc == 0
  169. command: >
  170. {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
  171. - name: Update router image to current version
  172. when: _default_router.rc == 0
  173. command: >
  174. {{ oc_cmd }} patch dc/router -p
  175. '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
  176. - name: Upgrade default
  177. hosts: oo_first_master
  178. vars:
  179. - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
  180. - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  181. tasks:
  182. - name: Check for default registry
  183. command: >
  184. {{ oc_cmd }} get -n default dc/docker-registry
  185. register: _default_registry
  186. failed_when: false
  187. changed_when: false
  188. - name: Update registry image to current version
  189. when: _default_registry.rc == 0
  190. command: >
  191. {{ oc_cmd }} patch dc/docker-registry -p
  192. '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
  193. - name: Update image streams and templates
  194. hosts: oo_first_master
  195. vars:
  196. openshift_examples_import_command: "update"
  197. openshift_deployment_type: "{{ deployment_type }}"
  198. roles:
  199. - openshift_examples