upgrade_image.yml 1.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. ---
  2. # INPUT r_etcd_upgrade_version
  3. - name: Verify cluster is healthy pre-upgrade
  4. command: "{{ etcdctlv2 }} cluster-health"
  5. - name: Get current image
  6. shell: "grep 'ExecStart=' {{ etcd_service_file }} | awk '{print $NF}'"
  7. register: current_image
  8. - name: Set new_etcd_image
  9. set_fact:
  10. new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ r_etcd_upgrade_version ) }}"
  11. - name: Pull new etcd image
  12. command: "docker pull {{ new_etcd_image }}"
  13. - name: Update to latest etcd image
  14. replace:
  15. dest: "{{ etcd_service_file }}"
  16. regexp: "{{ current_image.stdout }}$"
  17. replace: "{{ new_etcd_image }}"
  18. - name: Restart etcd_container
  19. systemd:
  20. name: "{{ etcd_service }}"
  21. daemon_reload: yes
  22. state: restarted
  23. ## TODO: probably should just move this into the backup playbooks, also this
  24. ## will fail on atomic host. We need to revisit how to do etcd backups there as
  25. ## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1)
  26. - name: Upgrade etcd for etcdctl when not atomic
  27. package: name=etcd state=latest
  28. when: not l_ostree_booted.stat.exists | bool
  29. - name: Verify cluster is healthy
  30. command: "{{ etcdctlv2 }} cluster-health"
  31. register: etcdctl
  32. until: etcdctl.rc == 0
  33. retries: 3
  34. delay: 10
  35. - name: Store new etcd_image
  36. # DEPENDENCY openshift_facts
  37. openshift_facts:
  38. role: etcd
  39. local_facts:
  40. etcd_image: "{{ new_etcd_image }}"