upgrade.yml 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. ---
  2. - name: Verify upgrade can proceed
  3. hosts: masters
  4. tasks:
  5. # Checking the global deployment type rather than host facts, this is about
  6. # what the user is requesting.
  7. - fail: msg="Deployment type enterprise not supported for upgrade"
  8. when: deployment_type == "enterprise"
  9. - name: Backup etcd
  10. hosts: masters
  11. vars:
  12. embedded_etcd: "{{ openshift.master.embedded_etcd }}"
  13. timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  14. roles:
  15. - openshift_facts
  16. tasks:
  17. - stat: path=/var/lib/openshift
  18. register: var_lib_openshift
  19. - name: Create origin symlink if necessary
  20. file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
  21. when: var_lib_openshift.stat.exists == True
  22. - name: Check available disk space for etcd backup
  23. # We assume to be using the data dir for all backups.
  24. shell: >
  25. df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
  26. register: avail_disk
  27. - name: Check current embedded etcd disk usage
  28. shell: >
  29. du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
  30. register: etcd_disk_usage
  31. when: embedded_etcd | bool
  32. - name: Abort if insufficient disk space for etcd backup
  33. fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available."
  34. when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
  35. - name: Install etcd (for etcdctl)
  36. yum: pkg=etcd state=latest
  37. - name: Generate etcd backup
  38. command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
  39. - name: Display location of etcd backup
  40. debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
  41. - name: Upgrade base package on masters
  42. hosts: masters
  43. roles:
  44. - openshift_facts
  45. vars:
  46. openshift_version: "{{ openshift_pkg_version | default('') }}"
  47. tasks:
  48. - name: Upgrade base package
  49. yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest
  50. - name: Evaluate oo_first_master
  51. hosts: localhost
  52. vars:
  53. g_masters_group: "{{ 'masters' }}"
  54. tasks:
  55. - name: Evaluate oo_first_master
  56. add_host:
  57. name: "{{ groups[g_masters_group][0] }}"
  58. groups: oo_first_master
  59. ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
  60. ansible_sudo: "{{ g_sudo | default(omit) }}"
  61. when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
  62. # TODO: ideally we would check the new version, without installing it. (some
  63. # kind of yum repoquery? would need to handle openshift -> atomic-openshift
  64. # package rename)
  65. - name: Perform upgrade version checking
  66. hosts: oo_first_master
  67. tasks:
  68. - name: Determine new version
  69. command: >
  70. rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}
  71. register: _new_version
  72. - name: Ensure AOS 3.0.2 or Origin 1.0.6
  73. hosts: oo_first_master
  74. tasks:
  75. fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
  76. when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
  77. - name: Verify upgrade can proceed
  78. hosts: oo_first_master
  79. tasks:
  80. # Checking the global deployment type rather than host facts, this is about
  81. # what the user is requesting.
  82. - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed"
  83. when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>='))
  84. - name: Upgrade masters
  85. hosts: masters
  86. vars:
  87. openshift_version: "{{ openshift_pkg_version | default('') }}"
  88. tasks:
  89. - name: Upgrade to latest available kernel
  90. yum: pkg=kernel state=latest
  91. - name: display just the deployment_type variable for the current host
  92. debug:
  93. var: hostvars[inventory_hostname]
  94. - name: Upgrade master packages
  95. command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
  96. - name: Upgrade master configuration.
  97. openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master
  98. - name: Restart master services
  99. service: name="{{ openshift.common.service_type}}-master" state=restarted
  100. - name: Upgrade nodes
  101. hosts: nodes
  102. vars:
  103. openshift_version: "{{ openshift_pkg_version | default('') }}"
  104. roles:
  105. - openshift_facts
  106. tasks:
  107. - name: Upgrade node packages
  108. command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
  109. - name: Restart node services
  110. service: name="{{ openshift.common.service_type }}-node" state=restarted
  111. - name: Update cluster policy
  112. hosts: oo_first_master
  113. tasks:
  114. - name: oadm policy reconcile-cluster-roles --confirm
  115. command: >
  116. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  117. policy reconcile-cluster-roles --confirm
  118. - name: Update cluster policy bindings
  119. hosts: oo_first_master
  120. tasks:
  121. - name: oadm policy reconcile-cluster-role-bindings --confirm
  122. command: >
  123. {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  124. policy reconcile-cluster-role-bindings
  125. --exclude-groups=system:authenticated
  126. --exclude-groups=system:unauthenticated
  127. --exclude-users=system:anonymous
  128. --additive-only=true --confirm
  129. when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
  130. - name: Upgrade default router
  131. hosts: oo_first_master
  132. vars:
  133. - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
  134. - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  135. tasks:
  136. - name: Check for default router
  137. command: >
  138. {{ oc_cmd }} get -n default dc/router
  139. register: _default_router
  140. failed_when: false
  141. changed_when: false
  142. - name: Check for allowHostNetwork and allowHostPorts
  143. when: _default_router.rc == 0
  144. shell: >
  145. {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
  146. register: _scc
  147. - name: Grant allowHostNetwork and allowHostPorts
  148. when:
  149. - _default_router.rc == 0
  150. - "'false' in _scc.stdout"
  151. command: >
  152. {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
  153. - name: Update deployment config to 1.0.4/3.0.1 spec
  154. when: _default_router.rc == 0
  155. command: >
  156. {{ oc_cmd }} patch dc/router -p
  157. '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
  158. - name: Switch to hostNetwork=true
  159. when: _default_router.rc == 0
  160. command: >
  161. {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
  162. - name: Update router image to current version
  163. when: _default_router.rc == 0
  164. command: >
  165. {{ oc_cmd }} patch dc/router -p
  166. '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
  167. - name: Upgrade default
  168. hosts: oo_first_master
  169. vars:
  170. - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
  171. - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  172. tasks:
  173. - name: Check for default registry
  174. command: >
  175. {{ oc_cmd }} get -n default dc/docker-registry
  176. register: _default_registry
  177. failed_when: false
  178. changed_when: false
  179. - name: Update registry image to current version
  180. when: _default_registry.rc == 0
  181. command: >
  182. {{ oc_cmd }} patch dc/docker-registry -p
  183. '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
  184. - name: Update image streams and templates
  185. hosts: oo_first_master
  186. vars:
  187. openshift_examples_import_command: "update"
  188. openshift_deployment_type: "{{ deployment_type }}"
  189. roles:
  190. - openshift_examples