Explorar el Código

Merge pull request #3969 from jarrpa/glusterfs-registry-too

Merged by openshift-bot
OpenShift Bot hace 8 años
padre
commit
c8e8f8f7b5
Se han modificado 28 ficheros con 607 adiciones y 233 borrados
  1. 51 0
      inventory/byo/hosts.byo.native-glusterfs.example
  2. 0 3
      inventory/byo/hosts.origin.example
  3. 0 3
      inventory/byo/hosts.ose.example
  4. 2 0
      playbooks/byo/openshift-cluster/cluster_hosts.yml
  5. 98 0
      playbooks/byo/openshift-glusterfs/README.md
  6. 10 0
      playbooks/byo/openshift-glusterfs/config.yml
  7. 1 0
      playbooks/byo/openshift-glusterfs/filter_plugins
  8. 1 0
      playbooks/byo/openshift-glusterfs/lookup_plugins
  9. 10 0
      playbooks/byo/openshift-glusterfs/registry.yml
  10. 1 0
      playbooks/byo/openshift-glusterfs/roles
  11. 1 1
      playbooks/common/openshift-cluster/evaluate_groups.yml
  12. 3 1
      playbooks/common/openshift-glusterfs/config.yml
  13. 49 0
      playbooks/common/openshift-glusterfs/registry.yml
  14. 3 1
      roles/openshift_facts/library/openshift_facts.py
  15. 8 0
      roles/openshift_hosted/README.md
  16. 2 2
      roles/openshift_hosted/tasks/registry/registry.yml
  17. 42 1
      roles/openshift_hosted/tasks/registry/storage/glusterfs.yml
  18. 35 1
      roles/openshift_storage_glusterfs/README.md
  19. 20 1
      roles/openshift_storage_glusterfs/defaults/main.yml
  20. 166 0
      roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
  21. 22 0
      roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
  22. 21 21
      roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
  23. 29 6
      roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
  24. 8 8
      roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml
  25. 17 17
      roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
  26. 5 165
      roles/openshift_storage_glusterfs/tasks/main.yml
  27. 1 1
      roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2
  28. 1 1
      roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2

+ 51 - 0
inventory/byo/hosts.byo.native-glusterfs.example

@@ -0,0 +1,51 @@
+# This is an example of a bring your own (byo) host inventory for a cluster
+# with natively hosted, containerized GlusterFS storage.
+#
+# This inventory may be used with the byo/config.yml playbook to deploy a new
+# cluster with GlusterFS storage, which will use that storage to create a
+# volume that will provide backend storage for a hosted Docker registry.
+#
+# This inventory may also be used with byo/openshift-glusterfs/config.yml to
+# deploy GlusterFS storage on an existing cluster. With this playbook, the
+# registry backend volume will be created but the administrator must then
+# either deploy a hosted registry or change an existing hosted registry to use
+# that volume.
+#
+# There are additional configuration parameters that can be specified to
+# control the deployment and state of a GlusterFS cluster. Please see the
+# documentation in playbooks/byo/openshift-glusterfs/README.md and
+# roles/openshift_storage_glusterfs/README.md for additional details.
+
+[OSEv3:children]
+masters
+nodes
+# Specify there will be GlusterFS nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=origin
+# Specify that we want to use GlusterFS storage for a hosted registry
+openshift_hosted_registry_storage_kind=glusterfs
+
+[masters]
+master  node=True storage=True master=True
+
+[nodes]
+master  node=True storage=True master=True openshift_schedulable=False
+# A hosted registry, by default, will only be deployed on nodes labeled
+# "region=infra".
+node0   node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node1   node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+node2   node=True openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True
+
+# Specify the glusterfs group, which contains the nodes that will host
+# GlusterFS storage pods. At a minimum, each node must have a
+# "glusterfs_devices" variable defined. This variable is a list of block
+# devices the node will have access to that is intended solely for use as
+# GlusterFS storage. These block devices must be bare (e.g. have no data, not
+# be marked as LVM PVs), and will be formatted.
+[glusterfs]
+node0  glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node1  glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'
+node2  glusterfs_devices='[ "/dev/vdb", "/dev/vdc", "/dev/vdd" ]'

+ 0 - 3
inventory/byo/hosts.origin.example

@@ -438,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
 #openshift_hosted_registry_storage_volume_size=10Gi
 #
-# Native GlusterFS Registry Storage
-#openshift_hosted_registry_storage_kind=glusterfs
-#
 # AWS S3
 # S3 bucket must already exist.
 #openshift_hosted_registry_storage_kind=object

+ 0 - 3
inventory/byo/hosts.ose.example

@@ -438,9 +438,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
 #openshift_hosted_registry_storage_volume_size=10Gi
 #
-# Native GlusterFS Registry Storage
-#openshift_hosted_registry_storage_kind=glusterfs
-#
 # AWS S3
 #
 # S3 bucket must already exist.

+ 2 - 0
playbooks/byo/openshift-cluster/cluster_hosts.yml

@@ -15,6 +15,8 @@ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
 
 g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}"
 
+g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}"
+
 g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
                  | union(g_lb_hosts) | union(g_nfs_hosts)
                  | union(g_new_node_hosts)| union(g_new_master_hosts)

+ 98 - 0
playbooks/byo/openshift-glusterfs/README.md

@@ -0,0 +1,98 @@
+# OpenShift GlusterFS Playbooks
+
+These playbooks are intended to enable the use of GlusterFS volumes by pods in
+OpenShift. While they try to provide a sane set of defaults they do cover a
+variety of scenarios and configurations, so read carefully. :)
+
+## Playbook: config.yml
+
+This is the main playbook that integrates GlusterFS into a new or existing
+OpenShift cluster. It will also, if specified, configure a hosted Docker
+registry with GlusterFS backend storage.
+
+This playbook requires the `glusterfs` group to exist in the Ansible inventory
+file. The hosts in this group are the nodes of the GlusterFS cluster.
+
+ * If this is a newly configured cluster each host must have a
+   `glusterfs_devices` variable defined, each of which must be a list of block
+   storage devices intended for use only by the GlusterFS cluster. If this is
+   also an external GlusterFS cluster, you must specify
+   `openshift_storage_glusterfs_is_native=False`. If the cluster is to be
+   managed by an external heketi service you must also specify
+   `openshift_storage_glusterfs_heketi_is_native=False` and
+   `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi
+   service. All these variables are specified in `[OSEv3:vars]`,
+ * If this is an existing cluster you do not need to specify a list of block
+   devices but you must specify the following variables in `[OSEv3:vars]`:
+   * `openshift_storage_glusterfs_is_missing=False`
+   * `openshift_storage_glusterfs_heketi_is_missing=False`
+
+By default, pods for a native GlusterFS cluster will be created in the
+`default` namespace. To change this, specify
+`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`.
+
+To configure the deployment of a Docker registry with GlusterFS backend
+storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in
+`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the
+registry, specify a `glusterfs_registry` group that is populated as the
+`glusterfs` is with the nodes for the separate cluster. If no
+`glusterfs_registry` group is specified, the cluster defined by the `glusterfs`
+group will be used.
+
+To swap an existing hosted registry's backend storage for a GlusterFS volume,
+specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To
+additoinally copy any existing contents from an existing hosted registry,
+specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`.
+
+**NOTE:** For each namespace that is to have access to GlusterFS volumes an
+Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding
+Service resource must be created. If dynamic provisioning using StorageClasses
+is configure, these resources are created automatically in the namespaces that
+require them. This playbook also takes care of creating these resources in the
+namespaces used for deployment.
+
+An example of a minimal inventory file:
+```
+[OSEv3:children]
+masters
+nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+node0
+node1
+node2
+
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/sdb" ]'
+node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]'
+node2 glusterfs_devices='[ "/dev/sdd" ]'
+```
+
+## Playbook: registry.yml
+
+This playbook is intended for admins who want to deploy a hosted Docker
+registry with GlusterFS backend storage on an existing OpenShift cluster. It
+has all the same requirements and behaviors as `config.yml`.
+
+## Role: openshift_storage_glusterfs
+
+The bulk of the work is done by the `openshift_storage_glusterfs` role. This
+role can handle the deployment of GlusterFS (if it is to be hosted on the
+OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone),
+and (if specified) integration as backend storage for a hosted Docker registry.
+
+See the documentation in the role's directory for further details.
+
+## Role: openshift_hosted
+
+The `openshift_hosted` role recognizes `glusterfs` as a possible storage
+backend for a hosted docker registry. It will also, if configured, handle the
+swap of an existing registry's backend storage to a GlusterFS volume.

+ 10 - 0
playbooks/byo/openshift-glusterfs/config.yml

@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-glusterfs/config.yml

+ 1 - 0
playbooks/byo/openshift-glusterfs/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/byo/openshift-glusterfs/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 10 - 0
playbooks/byo/openshift-glusterfs/registry.yml

@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-glusterfs/registry.yml

+ 1 - 0
playbooks/byo/openshift-glusterfs/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 1
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -155,5 +155,5 @@
       groups: oo_glusterfs_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
-    with_items: "{{ g_glusterfs_hosts | default([]) }}"
+    with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}"
     changed_when: no

+ 3 - 1
playbooks/common/openshift-glusterfs/config.yml

@@ -12,7 +12,9 @@
     - service: glusterfs_bricks
       port: "49152-49251/tcp"
   roles:
-  - os_firewall
+  - role: os_firewall
+    when:
+    - openshift_storage_glusterfs_is_native | default(True)
 
 - name: Configure GlusterFS
   hosts: oo_first_master

+ 49 - 0
playbooks/common/openshift-glusterfs/registry.yml

@@ -0,0 +1,49 @@
+---
+- include: config.yml
+
+- name: Initialize GlusterFS registry PV and PVC vars
+  hosts: oo_first_master
+  tags: hosted
+  tasks:
+  - set_fact:
+      glusterfs_pv: []
+      glusterfs_pvc: []
+
+  - set_fact:
+      glusterfs_pv:
+      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
+        capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+        storage:
+          glusterfs:
+            endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
+            path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
+            readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
+      glusterfs_pvc:
+      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+        capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
+        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
+    when: openshift.hosted.registry.storage.glusterfs.swap
+
+- name: Create persistent volumes
+  hosts: oo_first_master
+  tags:
+  - hosted
+  vars:
+    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
+    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
+  roles:
+  - role: openshift_persistent_volumes
+    when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+
+- name: Create Hosted Resources
+  hosts: oo_first_master
+  tags:
+  - hosted
+  pre_tasks:
+  - set_fact:
+      openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+      openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}"
+    when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master"
+  roles:
+  - role: openshift_hosted

+ 3 - 1
roles/openshift_facts/library/openshift_facts.py

@@ -2167,7 +2167,9 @@ class OpenShiftFacts(object):
                         glusterfs=dict(
                             endpoints='glusterfs-registry-endpoints',
                             path='glusterfs-registry-volume',
-                            readOnly=False),
+                            readOnly=False,
+                            swap=False,
+                            swapcopy=True),
                         host=None,
                         access=dict(
                             modes=['ReadWriteMany']

+ 8 - 0
roles/openshift_hosted/README.md

@@ -28,6 +28,14 @@ From this role:
 | openshift_hosted_registry_selector    | region=infra                             | Node selector used when creating registry. The OpenShift registry will only be deployed to nodes matching this selector. |
 | openshift_hosted_registry_cert_expire_days | `730` (2 years)                     | Validity of the certificates in days. Works only with OpenShift version 1.5 (3.5) and later.                             |
 
+If you specify `openshift_hosted_registry_kind=glusterfs`, the following
+variables also control configuration behavior:
+
+| Name                                         | Default value | Description                                                                  |
+|----------------------------------------------|---------------|------------------------------------------------------------------------------|
+| openshift_hosted_registry_glusterfs_swap     | False         | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+| openshift_hosted_registry_glusterfs_swapcopy | True          | If swapping, also copy the current contents of the registry volume           |
+
 Dependencies
 ------------
 

+ 2 - 2
roles/openshift_hosted/tasks/registry/registry.yml

@@ -61,7 +61,7 @@
     name: "{{ openshift_hosted_registry_serviceaccount }}"
     namespace: "{{ openshift_hosted_registry_namespace }}"
 
-- name: Grant the registry serivce account access to the appropriate scc
+- name: Grant the registry service account access to the appropriate scc
   oc_adm_policy_user:
     user: "system:serviceaccount:{{ openshift_hosted_registry_namespace }}:{{ openshift_hosted_registry_serviceaccount }}"
     namespace: "{{ openshift_hosted_registry_namespace }}"
@@ -126,4 +126,4 @@
 
 - include: storage/glusterfs.yml
   when:
-  - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs'
+  - openshift.hosted.registry.storage.kind | default(none) == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap

+ 42 - 1
roles/openshift_hosted/tasks/registry/storage/glusterfs.yml

@@ -1,10 +1,18 @@
 ---
+- name: Get registry DeploymentConfig
+  oc_obj:
+    namespace: "{{ openshift_hosted_registry_namespace }}"
+    state: list
+    kind: dc
+    name: "{{ openshift_hosted_registry_name }}"
+  register: registry_dc
+
 - name: Wait for registry pods
   oc_obj:
     namespace: "{{ openshift_hosted_registry_namespace }}"
     state: list
     kind: pod
-    selector: "{{ openshift_hosted_registry_name }}={{ openshift_hosted_registry_namespace }}"
+    selector: "{% for label, value in registry_dc.results.results[0].spec.selector.iteritems() %}{{ label }}={{ value }}{% if not loop.last %},{% endif %}{% endfor %}"
   register: registry_pods
   until:
   - "registry_pods.results.results[0]['items'] | count > 0"
@@ -38,6 +46,39 @@
     mode: "2775"
     recurse: True
 
+- block:
+  - name: Activate registry maintenance mode
+    oc_env:
+      namespace: "{{ openshift_hosted_registry_namespace }}"
+      name: "{{ openshift_hosted_registry_name }}"
+      env_vars:
+      - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+
+  - name: Get first registry pod name
+    set_fact:
+      registry_pod_name: "{{ registry_pods.results.results[0]['items'][0].metadata.name }}"
+
+  - name: Copy current registry contents to new GlusterFS volume
+    command: "oc rsync {{ registry_pod_name }}:/registry/ {{ mktemp.stdout }}/"
+    when: openshift.hosted.registry.storage.glusterfs.swapcopy
+
+  - name: Swap new GlusterFS registry volume
+    oc_volume:
+      namespace: "{{ openshift_hosted_registry_namespace }}"
+      name: "{{ openshift_hosted_registry_name }}"
+      vol_name: registry-storage
+      mount_type: pvc
+      claim_name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
+
+  - name: Deactivate registry maintenance mode
+    oc_env:
+      namespace: "{{ openshift_hosted_registry_namespace }}"
+      name: "{{ openshift_hosted_registry_name }}"
+      state: absent
+      env_vars:
+      - REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
+  when: openshift.hosted.registry.storage.glusterfs.swap
+
 - name: Unmount registry volume
   mount:
     state: unmounted

+ 35 - 1
roles/openshift_storage_glusterfs/README.md

@@ -8,10 +8,24 @@ Requirements
 
 * Ansible 2.2
 
+Host Groups
+-----------
+
+The following group is expected to be populated for this role to run:
+
+* `[glusterfs]`
+
+Additionally, the following group may be specified either in addition to or
+instead of the above group to deploy a GlusterFS cluster for use by a natively
+hosted Docker registry:
+
+* `[glusterfs_registry]`
+
 Role Variables
 --------------
 
-From this role:
+This role has the following variables that control the integration of a
+GlusterFS cluster into a new or existing OpenShift cluster:
 
 | Name                                             | Default value           |                                         |
 |--------------------------------------------------|-------------------------|-----------------------------------------|
@@ -31,6 +45,25 @@ From this role:
 | openshift_storage_glusterfs_heketi_url           | Undefined               | URL for the heketi REST API, dynamically determined in native mode
 | openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
 
+Each role variable also has a corresponding variable to optionally configure a
+separate GlusterFS cluster for use as storage for an integrated Docker
+registry. These variables start with the prefix
+`openshift_storage_glusterfs_registry_` and, for the most part, default to the
+values in their corresponding non-registry variables. The following variables
+are an exception:
+
+| Name                                              | Default value         |                                         |
+|---------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace    | registry namespace    | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+
+Additionally, this role's behavior responds to the following registry-specific
+variable:
+
+| Name                                         | Default value | Description                                                                  |
+|----------------------------------------------|---------------|------------------------------------------------------------------------------|
+| openshift_hosted_registry_glusterfs_swap     | False         | Whether to swap an existing registry's storage volume for a GlusterFS volume |
+
 Dependencies
 ------------
 
@@ -47,6 +80,7 @@ Example Playbook
   hosts: oo_first_master
   roles:
   - role: openshift_storage_glusterfs
+    when: groups.oo_glusterfs_to_config | default([]) | count > 0
 ```
 
 License

+ 20 - 1
roles/openshift_storage_glusterfs/defaults/main.yml

@@ -2,7 +2,7 @@
 openshift_storage_glusterfs_timeout: 300
 openshift_storage_glusterfs_namespace: 'default'
 openshift_storage_glusterfs_is_native: True
-openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}"
+openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
 openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
 openshift_storage_glusterfs_version: 'latest'
 openshift_storage_glusterfs_wipe: False
@@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: ''
 openshift_storage_glusterfs_heketi_user_key: ''
 openshift_storage_glusterfs_heketi_topology_load: True
 openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_heketi_url: "{{ omit }}"
+
+openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
+openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
+openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"

+ 166 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml

@@ -0,0 +1,166 @@
+---
+- name: Verify target namespace exists
+  oc_project:
+    state: present
+    name: "{{ glusterfs_namespace }}"
+  when: glusterfs_is_native or glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+  when: glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+  package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "template,route,service,dc,jobs,secret"
+    selector: "deploy-heketi"
+  - kind: "template,route,service,dc"
+    name: "heketi"
+  - kind: "svc,ep"
+    name: "heketi-storage-endpoints"
+  - kind: "sa"
+    name: "heketi-service-account"
+  failed_when: False
+  when: glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=deploy-heketi-pod"
+  register: heketi_pod
+  until: "heketi_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
+  when: glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=heketi-pod"
+  register: heketi_pod
+  until: "heketi_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
+  when: glusterfs_heketi_wipe
+
+- name: Create heketi service account
+  oc_serviceaccount:
+    namespace: "{{ glusterfs_namespace }}"
+    name: heketi-service-account
+    state: present
+  when: glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+  oc_adm_policy_user:
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+  when: glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+  oc_adm_policy_user:
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+    resource_kind: role
+    resource_name: edit
+    state: present
+  when: glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+  register: heketi_pod
+  when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+  set_fact:
+    glusterfs_heketi_deploy_is_missing: False
+  when:
+  - "glusterfs_heketi_is_native"
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=heketi-pod"
+  register: heketi_pod
+  when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+  set_fact:
+    glusterfs_heketi_is_missing: False
+  when:
+  - "glusterfs_heketi_is_native"
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_deploy_is_missing
+  - glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: ep
+    selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+  register: heketi_url
+  until:
+  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+  delay: 10
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+  set_fact:
+    glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+  changed_when: False
+
+- name: Generate topology file
+  template:
+    src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+    dest: "{{ mktemp.stdout }}/topology.json"
+  when:
+  - glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+  register: topology_load
+  failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+  when:
+  - glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_is_missing

+ 22 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml

@@ -0,0 +1,22 @@
+---
+- set_fact:
+    glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+    glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+    glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+    glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
+    glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
+    glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+    glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+    glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+    glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+    glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+    glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+    glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+    glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+    glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+    glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+    glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
+    glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+
+- include: glusterfs_common.yml

+ 21 - 21
roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml

@@ -1,44 +1,44 @@
 ---
 - assert:
-    that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1"
+    that: "glusterfs_nodeselector.keys() | count == 1"
     msg: Only one GlusterFS nodeselector key pair should be provided
 
 - assert:
-    that: "groups.oo_glusterfs_to_config | count >= 3"
+    that: "glusterfs_nodes | count >= 3"
     msg: There must be at least three GlusterFS nodes specified
 
 - name: Delete pre-existing GlusterFS resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: "template,daemonset"
     name: glusterfs
     state: absent
-  when: openshift_storage_glusterfs_wipe
+  when: glusterfs_wipe
 
 - name: Unlabel any existing GlusterFS nodes
   oc_label:
     name: "{{ item }}"
     kind: node
     state: absent
-    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
   with_items: "{{ groups.all }}"
-  when: openshift_storage_glusterfs_wipe
+  when: glusterfs_wipe
 
 - name: Delete pre-existing GlusterFS config
   file:
     path: /var/lib/glusterd
     state: absent
   delegate_to: "{{ item }}"
-  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
-  when: openshift_storage_glusterfs_wipe
+  with_items: "{{ glusterfs_nodes | default([]) }}"
+  when: glusterfs_wipe
 
 - name: Get GlusterFS storage devices state
   command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
   register: devices_info
   delegate_to: "{{ item }}"
-  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+  with_items: "{{ glusterfs_nodes | default([]) }}"
   failed_when: False
-  when: openshift_storage_glusterfs_wipe
+  when: glusterfs_wipe
 
   # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
 - name: Clear GlusterFS storage device contents
@@ -46,12 +46,12 @@
   delegate_to: "{{ item.item }}"
   with_items: "{{ devices_info.results }}"
   when:
-  - openshift_storage_glusterfs_wipe
+  - glusterfs_wipe
   - item.stdout_lines | count > 0
 
 - name: Add service accounts to privileged SCC
   oc_adm_policy_user:
-    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}"
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
     resource_kind: scc
     resource_name: privileged
     state: present
@@ -64,8 +64,8 @@
     name: "{{ glusterfs_host }}"
     kind: node
     state: add
-    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
-  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+  with_items: "{{ glusterfs_nodes | default([]) }}"
   loop_control:
     loop_var: glusterfs_host
 
@@ -76,7 +76,7 @@
 
 - name: Create GlusterFS template
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: template
     name: glusterfs
     state: present
@@ -85,16 +85,16 @@
 
 - name: Deploy GlusterFS pods
   oc_process:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     template_name: "glusterfs"
     create: True
     params:
-      IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}"
-      IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}"
+      IMAGE_NAME: "{{ glusterfs_image }}"
+      IMAGE_VERSION: "{{ glusterfs_version }}"
 
 - name: Wait for GlusterFS pods
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: pod
     state: list
     selector: "glusterfs-node=pod"
@@ -102,6 +102,6 @@
   until:
   - "glusterfs_pods.results.results[0]['items'] | count > 0"
   # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods
-  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count"
+  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"

+ 29 - 6
roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml

@@ -1,7 +1,30 @@
 ---
+- set_fact:
+    glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
+    glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
+    glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
+    glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+    glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
+    glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
+    glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
+    glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}"
+    glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}"
+    glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}"
+    glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}"
+    glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}"
+    glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}"
+    glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}"
+    glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
+    glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
+    glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
+    glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+
+- include: glusterfs_common.yml
+  when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+
 - name: Delete pre-existing GlusterFS registry resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: "{{ item.kind }}"
     name: "{{ item.name | default(omit) }}"
     selector: "{{ item.selector | default(omit) }}"
@@ -23,7 +46,7 @@
 
 - name: Create GlusterFS registry endpoints
   oc_obj:
-    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+    namespace: "{{ glusterfs_namespace }}"
     state: present
     kind: endpoints
     name: glusterfs-registry-endpoints
@@ -32,7 +55,7 @@
 
 - name: Create GlusterFS registry service
   oc_obj:
-    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+    namespace: "{{ glusterfs_namespace }}"
     state: present
     kind: service
     name: glusterfs-registry-endpoints
@@ -40,9 +63,9 @@
     - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
 
 - name: Check if GlusterFS registry volume exists
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
   register: registry_volume
 
 - name: Create GlusterFS registry volume
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
-  when: "'openshift.hosted.registry.storage.glusterfs.path' not in registry_volume.stdout"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+  when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"

+ 8 - 8
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml

@@ -8,7 +8,7 @@
 
 - name: Create deploy-heketi resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: template
     name: deploy-heketi
     state: present
@@ -17,18 +17,18 @@
 
 - name: Deploy deploy-heketi pod
   oc_process:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     template_name: "deploy-heketi"
     create: True
     params:
-      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
-      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
-      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
-      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+      IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+      IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
 
 - name: Wait for deploy-heketi pod
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: pod
     state: list
     selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
@@ -38,4 +38,4 @@
   # Pod's 'Ready' status must be True
   - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"

+ 17 - 17
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml

@@ -1,6 +1,6 @@
 ---
 - name: Create heketi DB volume
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
   register: setup_storage
   failed_when: False
 
@@ -13,12 +13,12 @@
 
 # Need `command` here because heketi-storage.json contains multiple objects.
 - name: Copy heketi DB to GlusterFS volume
-  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
   when: setup_storage.rc == 0
 
 - name: Wait for copy job to finish
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: job
     state: list
     name: "heketi-storage-copy-job"
@@ -28,7 +28,7 @@
   # Pod's 'Complete' status must be True
   - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
   failed_when:
   - "'results' in heketi_job.results"
   - "heketi_job.results.results | count > 0"
@@ -38,7 +38,7 @@
 
 - name: Delete deploy resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: "{{ item.kind }}"
     name: "{{ item.name | default(omit) }}"
     selector: "{{ item.selector | default(omit) }}"
@@ -55,7 +55,7 @@
 
 - name: Create heketi resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: template
     name: heketi
     state: present
@@ -64,18 +64,18 @@
 
 - name: Deploy heketi pod
   oc_process:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     template_name: "heketi"
     create: True
     params:
-      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
-      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
-      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
-      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+      IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+      IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
 
 - name: Wait for heketi pod
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: pod
     state: list
     selector: "glusterfs=heketi-pod"
@@ -85,11 +85,11 @@
   # Pod's 'Ready' status must be True
   - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
 
 - name: Determine heketi URL
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     state: list
     kind: ep
     selector: "glusterfs=heketi-service"
@@ -98,12 +98,12 @@
   - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
   - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
 
 - name: Set heketi URL
   set_fact:
-    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+    glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
 
 - name: Verify heketi service
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
   changed_when: False

+ 5 - 165
roles/openshift_storage_glusterfs/tasks/main.yml

@@ -5,174 +5,14 @@
   changed_when: False
   check_mode: no
 
-- name: Verify target namespace exists
-  oc_project:
-    state: present
-    name: "{{ openshift_storage_glusterfs_namespace }}"
-  when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native
-
-- include: glusterfs_deploy.yml
-  when: openshift_storage_glusterfs_is_native
-
-- name: Make sure heketi-client is installed
-  package: name=heketi-client state=present
-
-- name: Delete pre-existing heketi resources
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    kind: "{{ item.kind }}"
-    name: "{{ item.name | default(omit) }}"
-    selector: "{{ item.selector | default(omit) }}"
-    state: absent
-  with_items:
-  - kind: "template,route,service,jobs,dc,secret"
-    selector: "deploy-heketi"
-  - kind: "template,route,dc,service"
-    name: "heketi"
-  - kind: "svc,ep"
-    name: "heketi-storage-endpoints"
-  - kind: "sa"
-    name: "heketi-service-account"
-  failed_when: False
-  when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Wait for deploy-heketi pods to terminate
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs=deploy-heketi-pod"
-  register: heketi_pod
-  until: "heketi_pod.results.results[0]['items'] | count == 0"
-  delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
-  when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Wait for heketi pods to terminate
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs=heketi-pod"
-  register: heketi_pod
-  until: "heketi_pod.results.results[0]['items'] | count == 0"
-  delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
-  when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Create heketi service account
-  oc_serviceaccount:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    name: heketi-service-account
-    state: present
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Add heketi service account to privileged SCC
-  oc_adm_policy_user:
-    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Allow heketi service account to view/edit pods
-  oc_adm_policy_user:
-    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
-    resource_kind: role
-    resource_name: edit
-    state: present
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check for existing deploy-heketi pod
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    state: list
-    kind: pod
-    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
-  register: heketi_pod
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check if need to deploy deploy-heketi
-  set_fact:
-    openshift_storage_glusterfs_heketi_deploy_is_missing: False
-  when:
-  - "openshift_storage_glusterfs_heketi_is_native"
-  - "heketi_pod.results.results[0]['items'] | count > 0"
-  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
-  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
-
-- name: Check for existing heketi pod
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    state: list
-    kind: pod
-    selector: "glusterfs=heketi-pod"
-  register: heketi_pod
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check if need to deploy heketi
-  set_fact:
-    openshift_storage_glusterfs_heketi_is_missing: False
+- include: glusterfs_config.yml
   when:
-  - "openshift_storage_glusterfs_heketi_is_native"
-  - "heketi_pod.results.results[0]['items'] | count > 0"
-  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
-  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
-
-- include: heketi_deploy_part1.yml
-  when:
-  - openshift_storage_glusterfs_heketi_is_native
-  - openshift_storage_glusterfs_heketi_deploy_is_missing
-  - openshift_storage_glusterfs_heketi_is_missing
-
-- name: Determine heketi URL
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    state: list
-    kind: ep
-    selector: "glusterfs in (deploy-heketi-service, heketi-service)"
-  register: heketi_url
-  until:
-  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
-  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
-  delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
-  when:
-  - openshift_storage_glusterfs_heketi_is_native
-  - openshift_storage_glusterfs_heketi_url is undefined
-
-- name: Set heketi URL
-  set_fact:
-    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
-  when:
-  - openshift_storage_glusterfs_heketi_is_native
-  - openshift_storage_glusterfs_heketi_url is undefined
-
-- name: Verify heketi service
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
-  changed_when: False
-
-- name: Generate topology file
-  template:
-    src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
-    dest: "{{ mktemp.stdout }}/topology.json"
-  when:
-  - openshift_storage_glusterfs_is_native
-  - openshift_storage_glusterfs_heketi_topology_load
-
-- name: Load heketi topology
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
-  register: topology_load
-  failed_when: topology_load.rc != 0 or 'Unable' in topology_load.stdout
-  when:
-  - openshift_storage_glusterfs_is_native
-  - openshift_storage_glusterfs_heketi_topology_load
-
-- include: heketi_deploy_part2.yml
-  when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
+  - g_glusterfs_hosts | default([]) | count > 0
 
 - include: glusterfs_registry.yml
-  when: openshift.hosted.registry.storage.kind == 'glusterfs'
+  when:
+  - g_glusterfs_registry_hosts | default([]) | count > 0
+  - "openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.glusterfs.swap"
 
 - name: Delete temp directory
   file:

+ 1 - 1
roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2

@@ -4,7 +4,7 @@ metadata:
   name: glusterfs-registry-endpoints
 subsets:
 - addresses:
-{% for node in groups.oo_glusterfs_to_config %}
+{% for node in glusterfs_nodes %}
   - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
 {% endfor %}
   ports:

+ 1 - 1
roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2

@@ -1,7 +1,7 @@
 {
   "clusters": [
 {%- set clusters = {} -%}
-{%- for node in groups.oo_glusterfs_to_config -%}
+{%- for node in glusterfs_nodes -%}
   {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
   {%- if cluster in clusters -%}
     {%- set _dummy = clusters[cluster].append(node) -%}