upgrade.yml 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. ---
  2. - name: Verify upgrade can proceed
  3. hosts: masters
  4. tasks:
  5. # Checking the global deployment type rather than host facts, this is about
  6. # what the user is requesting.
  7. - fail: msg="Deployment type enterprise not supported for upgrade"
  8. when: deployment_type == "enterprise"
  9. - name: Backup etcd
  10. hosts: masters
  11. vars:
  12. embedded_etcd: "{{ openshift.master.embedded_etcd }}"
  13. timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  14. roles:
  15. - openshift_facts
  16. tasks:
  17. - stat: path=/var/lib/openshift
  18. register: var_lib_openshift
  19. - name: Create origin symlink if necessary
  20. file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
  21. when: var_lib_openshift.stat.exists == True
  22. - name: Check available disk space for etcd backup
  23. # We assume to be using the data dir for all backups.
  24. shell: >
  25. df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
  26. register: avail_disk
  27. - name: Check current embedded etcd disk usage
  28. shell: >
  29. du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
  30. register: etcd_disk_usage
  31. when: embedded_etcd | bool
  32. - name: Abort if insufficient disk space for etcd backup
  33. fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available."
  34. when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  35. - name: Install etcd (for etcdctl)
  36. yum: pkg=etcd state=latest
  37. - name: Generate etcd backup
  38. command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  39. - name: Display location of etcd backup
  40. debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
  41. - name: Upgrade base package on masters
  42. hosts: masters
  43. roles:
  44. - openshift_facts
  45. vars:
  46. openshift_version: "{{ openshift_pkg_version | default('') }}"
  47. tasks:
  48. - name: Upgrade base package
  49. yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
  50. - name: Evaluate oo_first_master
  51. hosts: localhost
  52. vars:
  53. g_masters_group: "{{ 'masters' }}"
  54. tasks:
  55. - name: display all variables set for the current host
  56. debug:
  57. var: hostvars[inventory_hostname]
  58. - name: Evaluate oo_first_master
  59. add_host:
  60. name: "{{ groups[g_masters_group][0] }}"
  61. groups: oo_first_master
  62. ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
  63. ansible_sudo: "{{ g_sudo | default(omit) }}"
  64. when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
  65. # TODO: ideally we would check the new version, without installing it. (some
  66. # kind of yum repoquery? would need to handle openshift -> atomic-openshift
  67. # package rename)
  68. - name: Perform upgrade version checking
  69. hosts: oo_first_master
  70. tasks:
  71. - name: Determine new version
  72. command: >
  73. rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
  74. register: _new_version
  75. - name: Ensure AOS 3.0.2 or Origin 1.0.6
  76. hosts: oo_first_master
  77. tasks:
  78. fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
  79. when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
  80. - name: Verify upgrade can proceed
  81. hosts: oo_first_master
  82. tasks:
  83. # Checking the global deployment type rather than host facts, this is about
  84. # what the user is requesting.
  85. - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed"
  86. when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>='))
  87. #- name: Re-Run cluster configuration to apply latest configuration changes
  88. # include: ../../common/openshift-cluster/config.yml
  89. # vars:
  90. # g_etcd_group: "{{ 'etcd' }}"
  91. # g_masters_group: "{{ 'masters' }}"
  92. # g_nodes_group: "{{ 'nodes' }}"
  93. # openshift_cluster_id: "{{ cluster_id | default('default') }}"
  94. # openshift_deployment_type: "{{ deployment_type }}"
  95. - name: Upgrade masters
  96. hosts: masters
  97. vars:
  98. openshift_version: "{{ openshift_pkg_version | default('') }}"
  99. tasks:
  100. - name: Upgrade to latest available kernel
  101. yum: pkg=kernel state=latest
  102. - name: Upgrade master packages
  103. yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
  104. - name: Upgrade master configuration.
  105. openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master
  106. - name: Restart master services
  107. service: name="{{ openshift.common.service_type}}-master" state=restarted
  108. - name: Upgrade nodes
  109. hosts: nodes
  110. vars:
  111. openshift_version: "{{ openshift_pkg_version | default('') }}"
  112. roles:
  113. - openshift_facts
  114. tasks:
  115. - name: Upgrade node packages
  116. yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
  117. - name: Restart node services
  118. service: name="{{ openshift.common.service_type }}-node" state=restarted
  119. - name: Update cluster policy
  120. hosts: oo_first_master
  121. tasks:
  122. - name: oadm policy reconcile-cluster-roles --confirm
  123. command: >
  124. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  125. policy reconcile-cluster-roles --confirm
  126. - name: Update cluster policy bindings
  127. hosts: oo_first_master
  128. tasks:
  129. - name: oadm policy reconcile-cluster-role-bindings --confirm
  130. command: >
  131. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  132. policy reconcile-cluster-role-bindings
  133. --exclude-groups=system:authenticated
  134. --exclude-groups=system:unauthenticated
  135. --exclude-users=system:anonymous
  136. --additive-only=true --confirm
  137. when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
  138. - name: Upgrade default router
  139. hosts: oo_first_master
  140. vars:
  141. - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
  142. - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  143. tasks:
  144. - name: Check for default router
  145. command: >
  146. {{ oc_cmd }} get -n default dc/router
  147. register: _default_router
  148. failed_when: false
  149. changed_when: false
  150. - name: Check for allowHostNetwork and allowHostPorts
  151. when: _default_router.rc == 0
  152. shell: >
  153. {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
  154. register: _scc
  155. - name: Grant allowHostNetwork and allowHostPorts
  156. when:
  157. - _default_router.rc == 0
  158. - "'false' in _scc.stdout"
  159. command: >
  160. {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
  161. - name: Update deployment config to 1.0.4/3.0.1 spec
  162. when: _default_router.rc == 0
  163. command: >
  164. {{ oc_cmd }} patch dc/router -p
  165. '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
  166. - name: Switch to hostNetwork=true
  167. when: _default_router.rc == 0
  168. command: >
  169. {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
  170. - name: Update router image to current version
  171. when: _default_router.rc == 0
  172. command: >
  173. {{ oc_cmd }} patch dc/router -p
  174. '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
  175. - name: Upgrade default
  176. hosts: oo_first_master
  177. vars:
  178. - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
  179. - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  180. tasks:
  181. - name: Check for default registry
  182. command: >
  183. {{ oc_cmd }} get -n default dc/docker-registry
  184. register: _default_registry
  185. failed_when: false
  186. changed_when: false
  187. - name: Update registry image to current version
  188. when: _default_registry.rc == 0
  189. command: >
  190. {{ oc_cmd }} patch dc/docker-registry -p
  191. '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
  192. - name: Update image streams and templates
  193. hosts: oo_first_master
  194. vars:
  195. openshift_examples_import_command: "update"
  196. openshift_deployment_type: "{{ deployment_type }}"
  197. roles:
  198. - openshift_examples