glusterfs_deploy.yml 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. ---
  2. - assert:
  3. that: "glusterfs_nodeselector.keys() | count == 1"
  4. msg: Only one GlusterFS nodeselector key pair should be provided
  5. - assert:
  6. that: "glusterfs_nodes | count >= 3"
  7. msg: There must be at least three GlusterFS nodes specified
  8. - name: Delete pre-existing GlusterFS resources
  9. oc_obj:
  10. namespace: "{{ glusterfs_namespace }}"
  11. kind: "template,daemonset"
  12. name: glusterfs
  13. state: absent
  14. when: glusterfs_wipe
  15. - name: Unlabel any existing GlusterFS nodes
  16. oc_label:
  17. name: "{{ item }}"
  18. kind: node
  19. state: absent
  20. labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
  21. with_items: "{{ groups.all }}"
  22. when: glusterfs_wipe
  23. - name: Delete pre-existing GlusterFS config
  24. file:
  25. path: /var/lib/glusterd
  26. state: absent
  27. delegate_to: "{{ item }}"
  28. with_items: "{{ glusterfs_nodes | default([]) }}"
  29. when: glusterfs_wipe
  30. - name: Get GlusterFS storage devices state
  31. command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
  32. register: devices_info
  33. delegate_to: "{{ item }}"
  34. with_items: "{{ glusterfs_nodes | default([]) }}"
  35. failed_when: False
  36. when: glusterfs_wipe
  37. # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
  38. - name: Clear GlusterFS storage device contents
  39. shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
  40. delegate_to: "{{ item.item }}"
  41. with_items: "{{ devices_info.results }}"
  42. when:
  43. - glusterfs_wipe
  44. - item.stdout_lines | count > 0
  45. - name: Add service accounts to privileged SCC
  46. oc_adm_policy_user:
  47. user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
  48. resource_kind: scc
  49. resource_name: privileged
  50. state: present
  51. with_items:
  52. - 'default'
  53. - 'router'
  54. - name: Label GlusterFS nodes
  55. oc_label:
  56. name: "{{ glusterfs_host }}"
  57. kind: node
  58. state: add
  59. labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
  60. with_items: "{{ glusterfs_nodes | default([]) }}"
  61. loop_control:
  62. loop_var: glusterfs_host
  63. - name: Copy GlusterFS DaemonSet template
  64. copy:
  65. src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml"
  66. dest: "{{ mktemp.stdout }}/glusterfs-template.yml"
  67. - name: Create GlusterFS template
  68. oc_obj:
  69. namespace: "{{ glusterfs_namespace }}"
  70. kind: template
  71. name: glusterfs
  72. state: present
  73. files:
  74. - "{{ mktemp.stdout }}/glusterfs-template.yml"
  75. - name: Deploy GlusterFS pods
  76. oc_process:
  77. namespace: "{{ glusterfs_namespace }}"
  78. template_name: "glusterfs"
  79. create: True
  80. params:
  81. IMAGE_NAME: "{{ glusterfs_image }}"
  82. IMAGE_VERSION: "{{ glusterfs_version }}"
  83. - name: Wait for GlusterFS pods
  84. oc_obj:
  85. namespace: "{{ glusterfs_namespace }}"
  86. kind: pod
  87. state: list
  88. selector: "glusterfs-node=pod"
  89. register: glusterfs_pods
  90. until:
  91. - "glusterfs_pods.results.results[0]['items'] | count > 0"
  92. # There must be as many pods with 'Ready' staus True as there are nodes expecting those pods
  93. - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
  94. delay: 10
  95. retries: "{{ (glusterfs_timeout / 10) | int }}"