upgrade_image.yml 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455
  1. ---
  2. # INPUT r_etcd_upgrade_version
  3. - name: Verify cluster is healthy pre-upgrade
  4. command: "{{ etcdctlv2 }} cluster-health"
  5. - name: Get current image
  6. shell: "grep 'ExecStart=' {{ etcd_service_file }} | awk '{print $NF}'"
  7. register: current_image
  8. - name: Set new_etcd_image
  9. set_fact:
  10. new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ r_etcd_upgrade_version ) }}"
  11. - name: Pull new etcd image
  12. command: "docker pull {{ new_etcd_image }}"
  13. - name: Update to latest etcd image
  14. replace:
  15. dest: "{{ etcd_service_file }}"
  16. regexp: "{{ current_image.stdout }}$"
  17. replace: "{{ new_etcd_image }}"
  18. - name: Restart etcd_container
  19. systemd:
  20. name: "{{ etcd_service }}"
  21. daemon_reload: yes
  22. state: restarted
  23. ## TODO: probably should just move this into the backup playbooks, also this
  24. ## will fail on atomic host. We need to revisit how to do etcd backups there as
  25. ## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
  26. - name: Detecting Atomic Host Operating System
  27. stat:
  28. path: /run/ostree-booted
  29. register: l_ostree_booted
  30. - name: Upgrade etcd for etcdctl when not atomic
  31. package:
  32. name: etcd
  33. state: latest
  34. when: not l_ostree_booted.stat.exists | bool
  35. - name: Verify cluster is healthy
  36. command: "{{ etcdctlv2 }} cluster-health"
  37. register: etcdctl
  38. until: etcdctl.rc == 0
  39. retries: 3
  40. delay: 10
  41. - name: Store new etcd_image
  42. # DEPENDENCY openshift_facts
  43. openshift_facts:
  44. role: etcd
  45. local_facts:
  46. etcd_image: "{{ new_etcd_image }}"