ソースを参照

Merge pull request #9365 from mgugino-upstream-stage/gluster-scaleup

Refactor glusterfs for scaleup
Michael Gugino 6 年 前
コミット
a1cf5a7cff
34 ファイル変更410 行追加441 行削除
  1. 1 1
      playbooks/common/private/components.yml
  2. 23 0
      playbooks/openshift-glusterfs/private/add_hosts.yml
  3. 3 37
      playbooks/openshift-glusterfs/private/config.yml
  4. 20 0
      playbooks/openshift-glusterfs/private/gluster_hosts.yml
  5. 8 0
      playbooks/openshift-glusterfs/private/gluster_main.yml
  6. 33 0
      playbooks/openshift-glusterfs/private/new_install.yml
  7. 5 1
      playbooks/openshift-glusterfs/private/registry.yml
  8. 18 0
      playbooks/openshift-glusterfs/private/setup_nodes.yml
  9. 1 1
      playbooks/openshift-glusterfs/private/uninstall.yml
  10. 9 0
      playbooks/openshift-glusterfs/private/update_topology.yml
  11. 2 0
      playbooks/openshift-master/private/scaleup.yml
  12. 5 0
      playbooks/openshift-node/private/configure_nodes.yml
  13. 2 0
      playbooks/openshift-node/scaleup.yml
  14. 0 0
      roles/openshift_node/tasks/glusterfs.yml
  15. 1 1
      roles/openshift_node/tasks/main.yml
  16. 13 0
      roles/openshift_storage_glusterfs/tasks/get_heketi_key.yml
  17. 0 25
      roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml
  18. 0 25
      roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
  19. 25 0
      roles/openshift_storage_glusterfs/tasks/glusterblock_storageclass.yml
  20. 9 250
      roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
  21. 2 2
      roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
  22. 3 73
      roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
  23. 2 2
      roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
  24. 18 0
      roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
  25. 31 0
      roles/openshift_storage_glusterfs/tasks/heketi_load.yml
  26. 32 0
      roles/openshift_storage_glusterfs/tasks/heketi_pod_check.yml
  27. 64 0
      roles/openshift_storage_glusterfs/tasks/heketi_setup.yml
  28. 8 0
      roles/openshift_storage_glusterfs/tasks/label_nodes.yml
  29. 4 19
      roles/openshift_storage_glusterfs/tasks/main.yml
  30. 12 0
      roles/openshift_storage_glusterfs/tasks/mktemp.yml
  31. 7 0
      roles/openshift_storage_glusterfs/tasks/rmtemp.yml
  32. 4 4
      roles/openshift_storage_glusterfs/tasks/uninstall.yml
  33. 31 0
      roles/openshift_storage_glusterfs/tasks/update_topology.yml
  34. 14 0
      roles/openshift_storage_glusterfs/tasks/wait_for_pods.yml

+ 1 - 1
playbooks/common/private/components.yml

@@ -14,7 +14,7 @@
 #    include the masters and usually includes infra nodes.
 # 5. The init/main.yml playbook has been invoked
 
-- import_playbook: ../../openshift-glusterfs/private/config.yml
+- import_playbook: ../../openshift-glusterfs/private/new_install.yml
   when: groups.oo_glusterfs_to_config | default([]) | count > 0
 
 - import_playbook: ../../openshift-hosted/private/config.yml

+ 23 - 0
playbooks/openshift-glusterfs/private/add_hosts.yml

@@ -0,0 +1,23 @@
+---
+# This play runs when new gluster hosts are part of new_nodes group during
+# master or node scaleup.
+
+# Need to gather facts on glusterfs hosts to ensure we collect openshift.node.nodename
+# for topology file.
+- import_playbook: ../../init/basic_facts.yml
+  vars:
+    l_init_fact_hosts: "glusterfs:glusterfs_registry"
+
+- import_playbook: ../../init/cluster_facts.yml
+  vars:
+    l_init_fact_hosts: "glusterfs:glusterfs_registry"
+
+- import_playbook: gluster_hosts.yml
+  vars:
+    # we only want the intersection of new_nodes and gluster hosts here.
+    l_glusterfs_hosts: "oo_glusterfs_to_config:&oo_nodes_to_config"
+
+- import_playbook: update_topology.yml
+  vars:
+    l_gluster_reload_topo: "{{ groups['oo_nodes_to_config'] | intersect(groups['glusterfs'] | default([])) | length > 0 }}"
+    l_gluster_registry_reload_topo: "{{ groups['oo_nodes_to_config'] | intersect(groups['glusterfs_registry'] | default([])) | length > 0 }}"

+ 3 - 37
playbooks/openshift-glusterfs/private/config.yml

@@ -13,45 +13,11 @@
           status: "In Progress"
           start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
-- name: Configure GlusterFS hosts
-  hosts: glusterfs
-  tasks:
-  - import_role:
-      name: openshift_storage_glusterfs
-      tasks_from: firewall.yml
-    when: openshift_storage_glusterfs_is_native | default(True) | bool
-  - import_role:
-      name: openshift_storage_glusterfs
-      tasks_from: host_services.yml
-    when: openshift_storage_glusterfs_is_native | default(True) | bool
+- import_playbook: setup_nodes.yml
 
-- name: Configure GlusterFS registry hosts
-  hosts: glusterfs_registry
-  tasks:
-  - import_role:
-      name: openshift_storage_glusterfs
-      tasks_from: firewall.yml
-    when: openshift_storage_glusterfs_registry_is_native | default(True) | bool
-  - import_role:
-      name: openshift_storage_glusterfs
-      tasks_from: host_services.yml
-    when: openshift_storage_glusterfs_registry_is_native | default(True) | bool
+- import_playbook: gluster_hosts.yml
 
-- name: Load kernel modules for nodes
-  hosts: oo_nodes_to_config
-  tasks:
-  - import_role:
-      name: openshift_storage_glusterfs
-      tasks_from: kernel_modules.yml
-    when: openshift_storage_glusterfs_registry_is_native | default(True) | bool
-
-- name: Configure GlusterFS
-  hosts: oo_first_master
-  tasks:
-  - name: setup glusterfs
-    import_role:
-      name: openshift_storage_glusterfs
-    when: groups.oo_glusterfs_to_config | default([]) | count > 0
+- import_playbook: gluster_main.yml
 
 - name: GlusterFS Install Checkpoint End
   hosts: all

+ 20 - 0
playbooks/openshift-glusterfs/private/gluster_hosts.yml

@@ -0,0 +1,20 @@
+---
+# l_glusterfs_hosts is passed in via add_hosts.yml during scaleup plays.
+- name: Configure GlusterFS hosts
+  hosts: "{{ l_glusterfs_hosts | default('oo_glusterfs_to_config') }}"
+  tasks:
+  - import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: firewall.yml
+    when: >
+          openshift_storage_glusterfs_is_native | default(True) | bool
+          or openshift_storage_glusterfs_registry_is_native | default(True) | bool
+  - import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: kernel_modules.yml
+  - import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: host_services.yml
+    when: >
+          openshift_storage_glusterfs_is_native | default(True) | bool
+          or openshift_storage_glusterfs_registry_is_native | default(True) | bool

+ 8 - 0
playbooks/openshift-glusterfs/private/gluster_main.yml

@@ -0,0 +1,8 @@
+---
+- name: Configure GlusterFS
+  hosts: oo_first_master
+  tasks:
+  - name: setup glusterfs
+    import_role:
+      name: openshift_storage_glusterfs
+    when: groups.oo_glusterfs_to_config | default([]) | count > 0

+ 33 - 0
playbooks/openshift-glusterfs/private/new_install.yml

@@ -0,0 +1,33 @@
+---
+# This playbook is meant to be called while installing a new cluster.
+# We don't need to run tasks against the nodes to consume gluster volumes
+# because do that during the node plays.
+- name: GlusterFS Install Checkpoint Start
+  hosts: all
+  gather_facts: false
+  tasks:
+  - name: Set GlusterFS install 'In Progress'
+    run_once: true
+    set_stats:
+      data:
+        installer_phase_glusterfs:
+          title: "GlusterFS Install"
+          playbook: "playbooks/openshift-glusterfs/new_install.yml"
+          status: "In Progress"
+          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
+
+- import_playbook: gluster_hosts.yml
+
+- import_playbook: gluster_main.yml
+
+- name: GlusterFS Install Checkpoint End
+  hosts: all
+  gather_facts: false
+  tasks:
+  - name: Set GlusterFS install 'Complete'
+    run_once: true
+    set_stats:
+      data:
+        installer_phase_glusterfs:
+          status: "Complete"
+          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"

+ 5 - 1
playbooks/openshift-glusterfs/private/registry.yml

@@ -1,5 +1,9 @@
 ---
-- import_playbook: config.yml
+- import_playbook: setup_nodes.yml
+
+- import_playbook: gluster_hosts.yml
+
+- import_playbook: gluster_main.yml
 
 - name: Create persistent volumes
   hosts: oo_first_master

+ 18 - 0
playbooks/openshift-glusterfs/private/setup_nodes.yml

@@ -0,0 +1,18 @@
+---
+# This playbook is designed to ensure an existing cluster without gluster support
+# gets the necessary modules and packages installed on all nodes to support
+# pods utilizing gluster volumes.
+# This playbooks is not intended for scaleup of existing clusters that already
+# have gluster deployed.
+- name: Install gluster node dependencies for pod volume support
+  hosts: oo_nodes_to_config
+  tasks:
+  - import_role:
+      name: openshift_node
+      tasks_from: glusterfs.yml
+  - import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: kernel_modules.yml
+    when:
+    - "inventory_hostname not in groups['glusterfs']"
+    - "inventory_hostname not in groups['glusterfs_registry']"

+ 1 - 1
playbooks/openshift-glusterfs/private/uninstall.yml

@@ -3,6 +3,6 @@
   hosts: oo_first_master
   tasks:
   - name: Run glusterfs uninstall role
-    include_role:
+    import_role:
       name: openshift_storage_glusterfs
       tasks_from: uninstall.yml

+ 9 - 0
playbooks/openshift-glusterfs/private/update_topology.yml

@@ -0,0 +1,9 @@
+---
+- name: Reload glusterfs topology
+  hosts: oo_first_master
+  tasks:
+  # We only want to update topology if we have new glusterfs/glusterfs_registry
+  # hosts.
+  - import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: update_topology.yml

+ 2 - 0
playbooks/openshift-master/private/scaleup.yml

@@ -17,3 +17,5 @@
 - import_playbook: ../../openshift-node/private/join.yml
 
 - import_playbook: ../../openshift-loadbalancer/private/config.yml
+
+- import_playbook: ../../openshift-glusterfs/private/add_hosts.yml

+ 5 - 0
playbooks/openshift-node/private/configure_nodes.yml

@@ -15,3 +15,8 @@
   - role: openshift_node
   - role: tuned
   - role: nickhammond.logrotate
+  tasks:
+  - import_role:
+      name: openshift_storage_glusterfs
+      tasks_from: kernel_modules.yml
+    when: "'glusterfs' in osn_storage_plugin_deps"

+ 2 - 0
playbooks/openshift-node/scaleup.yml

@@ -37,3 +37,5 @@
 
 - import_playbook: private/bootstrap.yml
 - import_playbook: private/join.yml
+
+- import_playbook: ../openshift-glusterfs/private/add_hosts.yml

roles/openshift_node/tasks/storage_plugins/glusterfs.yml → roles/openshift_node/tasks/glusterfs.yml


+ 1 - 1
roles/openshift_node/tasks/main.yml

@@ -71,7 +71,7 @@
   import_tasks: storage_plugins/nfs.yml
 
 - name: GlusterFS storage plugin configuration
-  import_tasks: storage_plugins/glusterfs.yml
+  import_tasks: glusterfs.yml
   when: "'glusterfs' in osn_storage_plugin_deps"
 
 - name: Ceph storage plugin configuration

+ 13 - 0
roles/openshift_storage_glusterfs/tasks/get_heketi_key.yml

@@ -0,0 +1,13 @@
+---
+- name: Get heketi admin secret
+  oc_secret:
+    state: list
+    namespace: "{{ glusterfs_namespace }}"
+    name: "heketi-{{ glusterfs_name }}-admin-secret"
+    decode: True
+  register: glusterfs_heketi_admin_secret
+
+- name: Set heketi admin key
+  set_fact:
+    glusterfs_heketi_admin_key: "{{ glusterfs_heketi_admin_secret.results.decoded.key }}"
+  when: glusterfs_heketi_admin_secret.results.results[0]

+ 0 - 25
roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml

@@ -1,29 +1,4 @@
 ---
-- name: Delete pre-existing gluster-s3 resources
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: "{{ item.kind }}"
-    name: "{{ item.name | default(omit) }}"
-    selector: "{{ item.selector | default(omit) }}"
-    state: absent
-  with_items:
-  - kind: "all,svc,deploy,secret,sc,pvc"
-    selector: "gluster-s3"
-  failed_when: False
-  when: glusterfs_wipe
-
-- name: Wait for gluster-s3 pods to terminate
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs=s3-{{ glusterfs_name }}-provisioner-pod"
-  register: gluster_s3_pod
-  until: "gluster_s3_pod.results.results[0]['items'] | count == 0"
-  delay: 10
-  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-  when: glusterfs_wipe
-
 - name: Create heketi secret
   oc_secret:
     namespace: "{{ glusterfs_namespace }}"

+ 0 - 25
roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml

@@ -1,29 +1,4 @@
 ---
-- name: Delete pre-existing glusterblock provisioner resources
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: "{{ item.kind }}"
-    name: "{{ item.name | default(omit) }}"
-    selector: "{{ item.selector | default(omit) }}"
-    state: absent
-  with_items:
-  - kind: "all,deploy,sa,clusterrole,clusterrolebinding"
-    selector: "glusterblock"
-  failed_when: False
-  when: glusterfs_wipe
-
-- name: Wait for glusterblock provisioner pods to terminate
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs=block-{{ glusterfs_name }}-provisioner-pod"
-  register: glusterblock_pod
-  until: "glusterblock_pod.results.results[0]['items'] | count == 0"
-  delay: 10
-  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
-  when: glusterfs_wipe
-
 - name: Copy initial glusterblock provisioner resource file
   copy:
     src: "{{ item }}"

+ 25 - 0
roles/openshift_storage_glusterfs/tasks/glusterblock_storageclass.yml

@@ -0,0 +1,25 @@
+---
+- name: Create heketi block secret
+  oc_secret:
+    namespace: "{{ glusterfs_namespace }}"
+    state: present
+    name: "heketi-{{ glusterfs_name }}-admin-secret-block"
+    type: "gluster.org/glusterblock"
+    force: True
+    contents:
+    - path: key
+      data: "{{ glusterfs_heketi_admin_key }}"
+  when: glusterfs_heketi_admin_key is defined
+
+- name: Generate Gluster Block StorageClass file
+  template:
+    src: "gluster-block-storageclass.yml.j2"
+    dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml"
+
+- name: Create Gluster Block StorageClass
+  oc_obj:
+    state: present
+    kind: storageclass
+    name: "glusterfs-{{ glusterfs_name }}-block"
+    files:
+    - "{{ mktemp.stdout }}/gluster-block-storageclass.yml"

ファイルの差分が大きいため隠しています
+ 9 - 250
roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml


+ 2 - 2
roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml

@@ -1,3 +1,3 @@
 ---
-- include_tasks: glusterfs_config_facts.yml
-- include_tasks: glusterfs_common.yml
+- import_tasks: glusterfs_config_facts.yml
+- import_tasks: glusterfs_common.yml

+ 3 - 73
roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml

@@ -3,66 +3,6 @@
     that: "glusterfs_nodes | count >= 3"
     msg: There must be at least three GlusterFS nodes specified
 
-- name: Delete pre-existing GlusterFS resources
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: "{{ item.kind }}"
-    name: "{{ item.name }}"
-    state: absent
-  with_items:
-  - kind: template
-    name: glusterfs
-  - kind: daemonset
-    name: "glusterfs-{{ glusterfs_name | default }}"
-  when: glusterfs_wipe
-
-- name: Unlabel any existing GlusterFS nodes
-  oc_label:
-    name: "{{ hostvars[item].openshift.node.nodename }}"
-    kind: node
-    state: absent
-    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
-  with_items: "{{ groups.all }}"
-  when: "'openshift' in hostvars[item] and glusterfs_wipe"
-
-- name: Delete pre-existing GlusterFS config
-  file:
-    path: /var/lib/glusterd
-    state: absent
-  delegate_to: "{{ item }}"
-  with_items: "{{ glusterfs_nodes | default([]) }}"
-  when: glusterfs_wipe
-
-- name: Get GlusterFS storage devices state
-  command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
-  register: devices_info
-  delegate_to: "{{ item }}"
-  with_items: "{{ glusterfs_nodes | default([]) }}"
-  failed_when: False
-  when: glusterfs_wipe
-
-  # Runs "lvremove -ff <vg>; vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
-- name: Clear GlusterFS storage device contents
-  shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}lvremove -ff {{ fields[1] }}; vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
-  delegate_to: "{{ item.item }}"
-  with_items: "{{ devices_info.results }}"
-  register: clear_devices
-  until:
-  - "'contains a filesystem in use' not in clear_devices.stderr"
-  delay: 1
-  retries: 30
-  when:
-  - glusterfs_wipe
-  - item.stdout_lines | count > 0
-
-- name: Label GlusterFS nodes
-  oc_label:
-    name: "{{ hostvars[item].openshift.node.nodename }}"
-    kind: node
-    state: add
-    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
-  with_items: "{{ glusterfs_nodes | default([]) }}"
-
 - name: Copy GlusterFS DaemonSet template
   copy:
     src: "glusterfs-template.yml"
@@ -77,6 +17,8 @@
     files:
     - "{{ mktemp.stdout }}/glusterfs-template.yml"
 
+- import_tasks: label_nodes.yml
+
 - name: Check GlusterFS DaemonSet status
   oc_obj:
     namespace: "{{ glusterfs_namespace }}"
@@ -98,16 +40,4 @@
   when: (glusterfs_ds.results.results[0].status is not defined) or
         (glusterfs_ds.results.results[0].status.numberReady | default(0) < glusterfs_ds.results.results[0].status.desiredNumberScheduled | default(glusterfs_nodes | count))
 
-- name: Wait for GlusterFS pods
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs={{ glusterfs_name }}-pod"
-  register: glusterfs_pods
-  until:
-  - "glusterfs_pods.results.results[0]['items'] | count > 0"
-  # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods
-  - "glusterfs_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
-  delay: 10
-  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+- import_tasks: wait_for_pods.yml

+ 2 - 2
roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml

@@ -1,7 +1,7 @@
 ---
-- include_tasks: glusterfs_registry_facts.yml
+- import_tasks: glusterfs_registry_facts.yml
 
-- include_tasks: glusterfs_common.yml
+- import_tasks: glusterfs_common.yml
   when:
   - glusterfs_nodes | default([]) | count > 0
   - "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"

+ 18 - 0
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml

@@ -32,3 +32,21 @@
 - name: Set heketi Deployed fact
   set_fact:
     glusterfs_heketi_deploy_is_missing: False
+
+- name: Wait for deploy-heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+  register: deploy_heketi_pod
+  until:
+  - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
+  # Pod's 'Ready' status must be True
+  - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+  when:
+  - glusterfs_heketi_is_native
+  - not glusterfs_heketi_deploy_is_missing
+  - glusterfs_heketi_is_missing

ファイルの差分が大きいため隠しています
+ 31 - 0
roles/openshift_storage_glusterfs/tasks/heketi_load.yml


+ 32 - 0
roles/openshift_storage_glusterfs/tasks/heketi_pod_check.yml

@@ -0,0 +1,32 @@
+---
+- name: Check for existing deploy-heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=deploy-heketi-{{ glusterfs_name }}-pod"
+  register: deploy_heketi_pod
+
+- name: Check if need to deploy deploy-heketi
+  set_fact:
+    glusterfs_heketi_deploy_is_missing: False
+  when:
+  - "deploy_heketi_pod.results.results[0]['items'] | count > 0"
+  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=heketi-{{ glusterfs_name }}-pod"
+  register: heketi_pod
+
+- name: Check if need to deploy heketi
+  set_fact:
+    glusterfs_heketi_is_missing: False
+  when:
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"

+ 64 - 0
roles/openshift_storage_glusterfs/tasks/heketi_setup.yml

@@ -0,0 +1,64 @@
+---
+- name: Create heketi service account
+  oc_serviceaccount:
+    namespace: "{{ glusterfs_namespace }}"
+    name: "heketi-{{ glusterfs_name }}-service-account"
+    state: present
+
+- name: Add heketi service account to privileged SCC
+  oc_adm_policy_user:
+    namespace: "{{ glusterfs_namespace }}"
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+
+- name: Allow heketi service account to view/edit pods
+  oc_adm_policy_user:
+    namespace: "{{ glusterfs_namespace }}"
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-{{ glusterfs_name }}-service-account"
+    resource_kind: role
+    resource_name: edit
+    state: present
+
+- import_tasks: heketi_pod_check.yml
+
+- name: Generate heketi config file
+  template:
+    src: "heketi.json.j2"
+    dest: "{{ mktemp.stdout }}/heketi.json"
+
+- import_tasks: get_heketi_key.yml
+
+- name: Generate heketi admin key
+  set_fact:
+    glusterfs_heketi_admin_key: "{{ 32 | lib_utils_oo_generate_secret }}"
+  when: glusterfs_heketi_admin_key is undefined
+
+- name: Generate heketi user key
+  set_fact:
+    glusterfs_heketi_user_key: "{{ 32 | lib_utils_oo_generate_secret }}"
+  until:
+  - glusterfs_heketi_user_key is defined
+  - glusterfs_heketi_user_key != glusterfs_heketi_admin_key
+  delay: 1
+  retries: 10
+  when: glusterfs_heketi_user_key is undefined
+
+- name: Copy heketi private key
+  copy:
+    src: "{{ glusterfs_heketi_ssh_keyfile | default(omit)  }}"
+    content: "{{ '' if glusterfs_heketi_ssh_keyfile is undefined else omit }}"
+    dest: "{{ mktemp.stdout }}/private_key"
+
+- name: Create heketi config secret
+  oc_secret:
+    namespace: "{{ glusterfs_namespace }}"
+    state: present
+    name: "heketi-{{ glusterfs_name }}-config-secret"
+    force: True
+    files:
+    - name: heketi.json
+      path: "{{ mktemp.stdout }}/heketi.json"
+    - name: private_key
+      path: "{{ mktemp.stdout }}/private_key"

+ 8 - 0
roles/openshift_storage_glusterfs/tasks/label_nodes.yml

@@ -0,0 +1,8 @@
+---
+- name: Label GlusterFS nodes
+  oc_label:
+    name: "{{ hostvars[item].openshift.node.nodename }}"
+    kind: node
+    state: add
+    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"
+  with_items: "{{ glusterfs_nodes | default([]) }}"

+ 4 - 19
roles/openshift_storage_glusterfs/tasks/main.yml

@@ -1,29 +1,14 @@
 ---
-- name: Create temp directory for doing work in
-  command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
-  register: mktemp
-  changed_when: False
-  check_mode: no
+- import_tasks: mktemp.yml
 
-- name: Copy the admin client config
-  command: >
-    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
-  changed_when: False
-  check_mode: no
-
-- include_tasks: glusterfs_config.yml
+- import_tasks: glusterfs_config.yml
   when:
   - groups.glusterfs | default([]) | count > 0
 
-- include_tasks: glusterfs_registry.yml
+- import_tasks: glusterfs_registry.yml
   when: >
     groups.glusterfs_registry | default([]) | count > 0
     or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs')
     or (openshift_hosted_registry_storage_glusterfs_swap | default(False))
 
-- name: Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False
-  check_mode: no
+- import_tasks: rmtemp.yml

+ 12 - 0
roles/openshift_storage_glusterfs/tasks/mktemp.yml

@@ -0,0 +1,12 @@
+---
+- name: Create temp directory for doing work in
+  command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+  check_mode: no
+
+- name: Copy the admin client config
+  command: >
+    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+  changed_when: False
+  check_mode: no

+ 7 - 0
roles/openshift_storage_glusterfs/tasks/rmtemp.yml

@@ -0,0 +1,7 @@
+---
+- name: Delete temp directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  changed_when: False
+  check_mode: no

+ 4 - 4
roles/openshift_storage_glusterfs/tasks/uninstall.yml

@@ -1,16 +1,16 @@
 ---
 - name: uninstall glusterfs
   block:
-    - include_tasks: glusterfs_config_facts.yml
-    - include_tasks: glusterfs_uninstall.yml
+    - import_tasks: glusterfs_config_facts.yml
+    - import_tasks: glusterfs_uninstall.yml
   when:
     - "'glusterfs' in groups"
     - "groups['glusterfs'] | length > 0"
 
 - name: uninstall glusterfs registry
   block:
-    - include_tasks: glusterfs_registry_facts.yml
-    - include_tasks: glusterfs_uninstall.yml
+    - import_tasks: glusterfs_registry_facts.yml
+    - import_tasks: glusterfs_uninstall.yml
   when:
     - "'glusterfs_registry' in groups"
     - "groups['glusterfs_registry'] | length > 0"

+ 31 - 0
roles/openshift_storage_glusterfs/tasks/update_topology.yml

@@ -0,0 +1,31 @@
+---
+# This taskfile is called when adding new nodes doing node and master
+# scaleup play.
+- import_tasks: mktemp.yml
+
+# l_gluster_reload_topo passed in via add_hosts.yml
+- when: l_gluster_reload_topo | default(True)
+  block:
+  - import_tasks: glusterfs_config_facts.yml
+  - import_tasks: label_nodes.yml
+  - import_tasks: heketi_pod_check.yml
+  - import_tasks: get_heketi_key.yml
+  - import_tasks: wait_for_pods.yml
+  - import_tasks: heketi_load.yml
+    when:
+    - glusterfs_nodes | default([]) | count > 0
+
+# l_gluster_registry_reload_topo passed in via add_hosts.yml
+- when: l_gluster_registry_reload_topo | default(True)
+  block:
+  - import_tasks: glusterfs_registry_facts.yml
+  - import_tasks: label_nodes.yml
+  - import_tasks: heketi_pod_check.yml
+  - import_tasks: get_heketi_key.yml
+  - import_tasks: wait_for_pods.yml
+  - import_tasks: heketi_load.yml
+    when:
+    - glusterfs_nodes | default([]) | count > 0
+    - "'glusterfs' not in groups or glusterfs_nodes != groups.glusterfs"
+
+- import_tasks: rmtemp.yml

+ 14 - 0
roles/openshift_storage_glusterfs/tasks/wait_for_pods.yml

@@ -0,0 +1,14 @@
+---
+- name: Wait for GlusterFS pods
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs={{ glusterfs_name }}-pod"
+  register: glusterfs_pods
+  until:
+  - "glusterfs_pods.results.results[0]['items'] | count > 0"
+  # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods
+  - "glusterfs_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"