Ver código fonte

GlusterFS: Allow for a separate registry-specific playbook

Signed-off-by: Jose A. Rivera <jarrpa@redhat.com>
Jose A. Rivera 8 anos atrás
pai
commit
1328328522

+ 2 - 0
playbooks/byo/openshift-cluster/cluster_hosts.yml

@@ -15,6 +15,8 @@ g_nfs_hosts: "{{ groups.nfs | default([]) }}"
 
 g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}"
 
+g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}"
+
 g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
                  | union(g_lb_hosts) | union(g_nfs_hosts)
                  | union(g_new_node_hosts)| union(g_new_master_hosts)

+ 92 - 0
playbooks/byo/openshift-glusterfs/README.md

@@ -0,0 +1,92 @@
+# OpenShift GlusterFS Playbooks
+
+These playbooks are intended to enable the use of GlusterFS volumes by pods in
+OpenShift. While they try to provide a sane set of defaults they do cover a
+variety of scenarios and configurations, so read carefully. :)
+
+## Playbook: config.yml
+
+This is the main playbook that integrates GlusterFS into a new or existing
+OpenShift cluster. It will also, if specified, configure a hosted Docker
+registry with GlusterFS backend storage.
+
+This playbook requires the `glusterfs` group to exist in the Ansible inventory
+file. The hosts in this group are the nodes of the GlusterFS cluster.
+
+ * If this is a newly configured cluster each host must have a
+   `glusterfs_devices` variable defined, each of which must be a list of block
+   storage devices intended for use only by the GlusterFS cluster. If this is
+   also an external GlusterFS cluster, you must specify
+   `openshift_storage_glusterfs_is_native=False`. If the cluster is to be
+   managed by an external heketi service you must also specify
+   `openshift_storage_glusterfs_heketi_is_native=False` and
+   `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi
+   service. All these variables are specified in `[OSEv3:vars]`,
+ * If this is an existing cluster you do not need to specify a list of block
+   devices but you must specify the following variables in `[OSEv3:vars]`:
+   * `openshift_storage_glusterfs_is_missing=False`
+   * `openshift_storage_glusterfs_heketi_is_missing=False`
+
+By default, pods for a native GlusterFS cluster will be created in the
+`default` namespace. To change this, specify
+`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`.
+
+To configure the deployment of a Docker registry with GlusterFS backend
+storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in
+`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the
+registry, specify a `glusterfs_registry` group that is populated as the
+`glusterfs` is with the nodes for the separate cluster. If no
+`glusterfs_registry` group is specified, the cluster defined by the `glusterfs`
+group will be used.
+
+**NOTE:** For each namespace that is to have access to GlusterFS volumes an
+Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding
+Service resource must be created. If dynamic provisioning using StorageClasses
+is configure, these resources are created automatically in the namespaces that
+require them. This playbook also takes care of creating these resources in the
+namespaces used for deployment.
+
+An example of a minimal inventory file:
+```
+[OSEv3:children]
+masters
+nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+node0
+node1
+node2
+
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/sdb" ]'
+node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]'
+node2 glusterfs_devices='[ "/dev/sdd" ]'
+```
+
+## Playbook: registry.yml
+
+This playbook is intended for admins who want to deploy a hosted Docker
+registry with GlusterFS backend storage on an existing OpenShift cluster. It
+has all the same requirements and behaviors as `config.yml`.
+
+## Role: openshift_storage_glusterfs
+
+The bulk of the work is done by the `openshift_storage_glusterfs` role. This
+role can handle the deployment of GlusterFS (if it is to be hosted on the
+OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone),
+and (if specified) integration as backend storage for a hosted Docker registry.
+
+See the documentation in the role's directory for further details.
+
+## Role: openshift_hosted
+
+The `openshift_hosted` role recognizes `glusterfs` as a possible storage
+backend for a hosted docker registry.

+ 10 - 0
playbooks/byo/openshift-glusterfs/config.yml

@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-glusterfs/config.yml

+ 1 - 0
playbooks/byo/openshift-glusterfs/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/byo/openshift-glusterfs/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 10 - 0
playbooks/byo/openshift-glusterfs/registry.yml

@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+  tags:
+  - always
+
+- include: ../../common/openshift-glusterfs/registry.yml

+ 1 - 0
playbooks/byo/openshift-glusterfs/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 1
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -155,5 +155,5 @@
       groups: oo_glusterfs_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
-    with_items: "{{ g_glusterfs_hosts | default([]) }}"
+    with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}"
     changed_when: no

+ 3 - 1
playbooks/common/openshift-glusterfs/config.yml

@@ -12,7 +12,9 @@
     - service: glusterfs_bricks
       port: "49152-49251/tcp"
   roles:
-  - os_firewall
+  - role: os_firewall
+    when:
+    - openshift_storage_glusterfs_is_native | default(True)
 
 - name: Configure GlusterFS
   hosts: oo_first_master

+ 4 - 0
playbooks/common/openshift-glusterfs/registry.yml

@@ -0,0 +1,4 @@
+---
+- include: config.yml
+
+- include: ../openshift-cluster/openshift_hosted.yml

+ 18 - 0
roles/openshift_storage_glusterfs/README.md

@@ -15,6 +15,12 @@ The following group is expected to be populated for this role to run:
 
 * `[glusterfs]`
 
+Additionally, the following group may be specified either in addition to or
+instead of the above group to deploy a GlusterFS cluster for use by a natively
+hosted Docker registry:
+
+* `[glusterfs_registry]`
+
 Role Variables
 --------------
 
@@ -39,6 +45,18 @@ GlusterFS cluster into a new or existing OpenShift cluster:
 | openshift_storage_glusterfs_heketi_url           | Undefined               | URL for the heketi REST API, dynamically determined in native mode
 | openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
 
+Each role variable also has a corresponding variable to optionally configure a
+separate GlusterFS cluster for use as storage for an integrated Docker
+registry. These variables start with the prefix
+`openshift_storage_glusterfs_registry_` and, for the most part, default to the
+values in their corresponding non-registry variables. The following variables
+are an exception:
+
+| Name                                              | Default value         |                                         |
+|---------------------------------------------------|-----------------------|-----------------------------------------|
+| openshift_storage_glusterfs_registry_namespace    | registry namespace    | Default is to use the hosted registry's namespace, otherwise 'default'
+| openshift_storage_glusterfs_registry_nodeselector | 'storagenode=registry'| This allows for the logical separation of the registry GlusterFS cluster from any regular-use GlusterFS clusters
+
 Dependencies
 ------------
 

+ 20 - 1
roles/openshift_storage_glusterfs/defaults/main.yml

@@ -2,7 +2,7 @@
 openshift_storage_glusterfs_timeout: 300
 openshift_storage_glusterfs_namespace: 'default'
 openshift_storage_glusterfs_is_native: True
-openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}"
+openshift_storage_glusterfs_nodeselector: 'storagenode=glusterfs'
 openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
 openshift_storage_glusterfs_version: 'latest'
 openshift_storage_glusterfs_wipe: False
@@ -15,3 +15,22 @@ openshift_storage_glusterfs_heketi_admin_key: ''
 openshift_storage_glusterfs_heketi_user_key: ''
 openshift_storage_glusterfs_heketi_topology_load: True
 openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_heketi_url: "{{ omit }}"
+
+openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+openshift_storage_glusterfs_registry_nodeselector: 'storagenode=registry'
+openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
+openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
+openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+openshift_storage_glusterfs_registry_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+openshift_storage_glusterfs_registry_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+openshift_storage_glusterfs_registry_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+openshift_storage_glusterfs_registry_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+openshift_storage_glusterfs_registry_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+openshift_storage_glusterfs_registry_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+openshift_storage_glusterfs_registry_heketi_url: "{{ openshift_storage_glusterfs_heketi_url | default(omit) }}"

+ 166 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml

@@ -0,0 +1,166 @@
+---
+- name: Verify target namespace exists
+  oc_project:
+    state: present
+    name: "{{ glusterfs_namespace }}"
+  when: glusterfs_is_native or glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+  when: glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+  package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "template,route,service,dc,jobs,secret"
+    selector: "deploy-heketi"
+  - kind: "template,route,service,dc"
+    name: "heketi"
+  - kind: "svc,ep"
+    name: "heketi-storage-endpoints"
+  - kind: "sa"
+    name: "heketi-service-account"
+  failed_when: False
+  when: glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=deploy-heketi-pod"
+  register: heketi_pod
+  until: "heketi_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
+  when: glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=heketi-pod"
+  register: heketi_pod
+  until: "heketi_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
+  when: glusterfs_heketi_wipe
+
+- name: Create heketi service account
+  oc_serviceaccount:
+    namespace: "{{ glusterfs_namespace }}"
+    name: heketi-service-account
+    state: present
+  when: glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+  oc_adm_policy_user:
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+  when: glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+  oc_adm_policy_user:
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:heketi-service-account"
+    resource_kind: role
+    resource_name: edit
+    state: present
+  when: glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+  register: heketi_pod
+  when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+  set_fact:
+    glusterfs_heketi_deploy_is_missing: False
+  when:
+  - "glusterfs_heketi_is_native"
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=heketi-pod"
+  register: heketi_pod
+  when: glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+  set_fact:
+    glusterfs_heketi_is_missing: False
+  when:
+  - "glusterfs_heketi_is_native"
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_deploy_is_missing
+  - glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    state: list
+    kind: ep
+    selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+  register: heketi_url
+  until:
+  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+  delay: 10
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+  set_fact:
+    glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
+  changed_when: False
+
+- name: Generate topology file
+  template:
+    src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+    dest: "{{ mktemp.stdout }}/topology.json"
+  when:
+  - glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+  register: topology_load
+  failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+  when:
+  - glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+  when:
+  - glusterfs_heketi_is_native
+  - glusterfs_heketi_is_missing

+ 22 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml

@@ -0,0 +1,22 @@
+---
+- set_fact:
+    glusterfs_timeout: "{{ openshift_storage_glusterfs_timeout }}"
+    glusterfs_namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    glusterfs_is_native: "{{ openshift_storage_glusterfs_is_native }}"
+    glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector | map_from_pairs }}"
+    glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
+    glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
+    glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe }}"
+    glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native }}"
+    glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
+    glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_heketi_deploy_is_missing }}"
+    glusterfs_heketi_image: "{{ openshift_storage_glusterfs_heketi_image }}"
+    glusterfs_heketi_version: "{{ openshift_storage_glusterfs_heketi_version }}"
+    glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+    glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+    glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_heketi_topology_load }}"
+    glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_heketi_wipe }}"
+    glusterfs_heketi_url: "{{ openshift_storage_glusterfs_heketi_url }}"
+    glusterfs_nodes: "{{ g_glusterfs_hosts }}"
+
+- include: glusterfs_common.yml

+ 21 - 21
roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml

@@ -1,44 +1,44 @@
 ---
 - assert:
-    that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1"
+    that: "glusterfs_nodeselector.keys() | count == 1"
     msg: Only one GlusterFS nodeselector key pair should be provided
 
 - assert:
-    that: "groups.oo_glusterfs_to_config | count >= 3"
+    that: "glusterfs_nodes | count >= 3"
     msg: There must be at least three GlusterFS nodes specified
 
 - name: Delete pre-existing GlusterFS resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: "template,daemonset"
     name: glusterfs
     state: absent
-  when: openshift_storage_glusterfs_wipe
+  when: glusterfs_wipe
 
 - name: Unlabel any existing GlusterFS nodes
   oc_label:
     name: "{{ item }}"
     kind: node
     state: absent
-    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
   with_items: "{{ groups.all }}"
-  when: openshift_storage_glusterfs_wipe
+  when: glusterfs_wipe
 
 - name: Delete pre-existing GlusterFS config
   file:
     path: /var/lib/glusterd
     state: absent
   delegate_to: "{{ item }}"
-  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
-  when: openshift_storage_glusterfs_wipe
+  with_items: "{{ glusterfs_nodes | default([]) }}"
+  when: glusterfs_wipe
 
 - name: Get GlusterFS storage devices state
   command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
   register: devices_info
   delegate_to: "{{ item }}"
-  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+  with_items: "{{ glusterfs_nodes | default([]) }}"
   failed_when: False
-  when: openshift_storage_glusterfs_wipe
+  when: glusterfs_wipe
 
   # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
 - name: Clear GlusterFS storage device contents
@@ -46,12 +46,12 @@
   delegate_to: "{{ item.item }}"
   with_items: "{{ devices_info.results }}"
   when:
-  - openshift_storage_glusterfs_wipe
+  - glusterfs_wipe
   - item.stdout_lines | count > 0
 
 - name: Add service accounts to privileged SCC
   oc_adm_policy_user:
-    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}"
+    user: "system:serviceaccount:{{ glusterfs_namespace }}:{{ item }}"
     resource_kind: scc
     resource_name: privileged
     state: present
@@ -64,8 +64,8 @@
     name: "{{ glusterfs_host }}"
     kind: node
     state: add
-    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
-  with_items: "{{ groups.oo_glusterfs_to_config | default([]) }}"
+    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+  with_items: "{{ glusterfs_nodes | default([]) }}"
   loop_control:
     loop_var: glusterfs_host
 
@@ -76,7 +76,7 @@
 
 - name: Create GlusterFS template
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: template
     name: glusterfs
     state: present
@@ -85,16 +85,16 @@
 
 - name: Deploy GlusterFS pods
   oc_process:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     template_name: "glusterfs"
     create: True
     params:
-      IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}"
-      IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}"
+      IMAGE_NAME: "{{ glusterfs_image }}"
+      IMAGE_VERSION: "{{ glusterfs_version }}"
 
 - name: Wait for GlusterFS pods
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: pod
     state: list
     selector: "glusterfs-node=pod"
@@ -102,6 +102,6 @@
   until:
   - "glusterfs_pods.results.results[0]['items'] | count > 0"
   # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods
-  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count"
+  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"

+ 29 - 6
roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml

@@ -1,7 +1,30 @@
 ---
+- set_fact:
+    glusterfs_timeout: "{{ openshift_storage_glusterfs_registry_timeout }}"
+    glusterfs_namespace: "{{ openshift_storage_glusterfs_registry_namespace }}"
+    glusterfs_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
+    glusterfs_nodeselector: "{{ openshift_storage_glusterfs_registry_nodeselector | map_from_pairs }}"
+    glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
+    glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
+    glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe }}"
+    glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native }}"
+    glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing }}"
+    glusterfs_heketi_deploy_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_deploy_is_missing }}"
+    glusterfs_heketi_image: "{{ openshift_storage_glusterfs_registry_heketi_image }}"
+    glusterfs_heketi_version: "{{ openshift_storage_glusterfs_registry_heketi_version }}"
+    glusterfs_heketi_admin_key: "{{ openshift_storage_glusterfs_registry_heketi_admin_key }}"
+    glusterfs_heketi_user_key: "{{ openshift_storage_glusterfs_registry_heketi_user_key }}"
+    glusterfs_heketi_topology_load: "{{ openshift_storage_glusterfs_registry_heketi_topology_load }}"
+    glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_registry_heketi_wipe }}"
+    glusterfs_heketi_url: "{{ openshift_storage_glusterfs_registry_heketi_url }}"
+    glusterfs_nodes: "{{ g_glusterfs_registry_hosts }}"
+
+- include: glusterfs_common.yml
+  when: g_glusterfs_registry_hosts != g_glusterfs_hosts
+
 - name: Delete pre-existing GlusterFS registry resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: "{{ item.kind }}"
     name: "{{ item.name | default(omit) }}"
     selector: "{{ item.selector | default(omit) }}"
@@ -23,7 +46,7 @@
 
 - name: Create GlusterFS registry endpoints
   oc_obj:
-    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+    namespace: "{{ glusterfs_namespace }}"
     state: present
     kind: endpoints
     name: glusterfs-registry-endpoints
@@ -32,7 +55,7 @@
 
 - name: Create GlusterFS registry service
   oc_obj:
-    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+    namespace: "{{ glusterfs_namespace }}"
     state: present
     kind: service
     name: glusterfs-registry-endpoints
@@ -40,9 +63,9 @@
     - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
 
 - name: Check if GlusterFS registry volume exists
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume list"
   register: registry_volume
 
 - name: Create GlusterFS registry volume
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
-  when: "'openshift.hosted.registry.storage.glusterfs.path' not in registry_volume.stdout"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+  when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"

+ 8 - 8
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml

@@ -8,7 +8,7 @@
 
 - name: Create deploy-heketi resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: template
     name: deploy-heketi
     state: present
@@ -17,18 +17,18 @@
 
 - name: Deploy deploy-heketi pod
   oc_process:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     template_name: "deploy-heketi"
     create: True
     params:
-      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
-      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
-      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
-      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+      IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+      IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
 
 - name: Wait for deploy-heketi pod
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: pod
     state: list
     selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
@@ -38,4 +38,4 @@
   # Pod's 'Ready' status must be True
   - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"

+ 17 - 17
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml

@@ -1,6 +1,6 @@
 ---
 - name: Create heketi DB volume
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
   register: setup_storage
   failed_when: False
 
@@ -13,12 +13,12 @@
 
 # Need `command` here because heketi-storage.json contains multiple objects.
 - name: Copy heketi DB to GlusterFS volume
-  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"
   when: setup_storage.rc == 0
 
 - name: Wait for copy job to finish
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: job
     state: list
     name: "heketi-storage-copy-job"
@@ -28,7 +28,7 @@
   # Pod's 'Complete' status must be True
   - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
   failed_when:
   - "'results' in heketi_job.results"
   - "heketi_job.results.results | count > 0"
@@ -38,7 +38,7 @@
 
 - name: Delete deploy resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: "{{ item.kind }}"
     name: "{{ item.name | default(omit) }}"
     selector: "{{ item.selector | default(omit) }}"
@@ -55,7 +55,7 @@
 
 - name: Create heketi resources
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: template
     name: heketi
     state: present
@@ -64,18 +64,18 @@
 
 - name: Deploy heketi pod
   oc_process:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     template_name: "heketi"
     create: True
     params:
-      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
-      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
-      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
-      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+      IMAGE_NAME: "{{ glusterfs_heketi_image }}"
+      IMAGE_VERSION: "{{ glusterfs_heketi_version }}"
+      HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
+      HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
 
 - name: Wait for heketi pod
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     kind: pod
     state: list
     selector: "glusterfs=heketi-pod"
@@ -85,11 +85,11 @@
   # Pod's 'Ready' status must be True
   - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
 
 - name: Determine heketi URL
   oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    namespace: "{{ glusterfs_namespace }}"
     state: list
     kind: ep
     selector: "glusterfs=heketi-service"
@@ -98,12 +98,12 @@
   - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
   - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
   delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  retries: "{{ (glusterfs_timeout / 10) | int }}"
 
 - name: Set heketi URL
   set_fact:
-    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+    glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
 
 - name: Verify heketi service
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+  command: "heketi-cli -s http://{{ glusterfs_heketi_url }} --user admin --secret '{{ glusterfs_heketi_admin_key }}' cluster list"
   changed_when: False

+ 5 - 165
roles/openshift_storage_glusterfs/tasks/main.yml

@@ -5,174 +5,14 @@
   changed_when: False
   check_mode: no
 
-- name: Verify target namespace exists
-  oc_project:
-    state: present
-    name: "{{ openshift_storage_glusterfs_namespace }}"
-  when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native
-
-- include: glusterfs_deploy.yml
-  when: openshift_storage_glusterfs_is_native
-
-- name: Make sure heketi-client is installed
-  package: name=heketi-client state=present
-
-- name: Delete pre-existing heketi resources
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    kind: "{{ item.kind }}"
-    name: "{{ item.name | default(omit) }}"
-    selector: "{{ item.selector | default(omit) }}"
-    state: absent
-  with_items:
-  - kind: "template,route,service,jobs,dc,secret"
-    selector: "deploy-heketi"
-  - kind: "template,route,dc,service"
-    name: "heketi"
-  - kind: "svc,ep"
-    name: "heketi-storage-endpoints"
-  - kind: "sa"
-    name: "heketi-service-account"
-  failed_when: False
-  when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Wait for deploy-heketi pods to terminate
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs=deploy-heketi-pod"
-  register: heketi_pod
-  until: "heketi_pod.results.results[0]['items'] | count == 0"
-  delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
-  when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Wait for heketi pods to terminate
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    kind: pod
-    state: list
-    selector: "glusterfs=heketi-pod"
-  register: heketi_pod
-  until: "heketi_pod.results.results[0]['items'] | count == 0"
-  delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
-  when: openshift_storage_glusterfs_heketi_wipe
-
-- name: Create heketi service account
-  oc_serviceaccount:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    name: heketi-service-account
-    state: present
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Add heketi service account to privileged SCC
-  oc_adm_policy_user:
-    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
-    resource_kind: scc
-    resource_name: privileged
-    state: present
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Allow heketi service account to view/edit pods
-  oc_adm_policy_user:
-    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
-    resource_kind: role
-    resource_name: edit
-    state: present
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check for existing deploy-heketi pod
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    state: list
-    kind: pod
-    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
-  register: heketi_pod
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check if need to deploy deploy-heketi
-  set_fact:
-    openshift_storage_glusterfs_heketi_deploy_is_missing: False
-  when:
-  - "openshift_storage_glusterfs_heketi_is_native"
-  - "heketi_pod.results.results[0]['items'] | count > 0"
-  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
-  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
-
-- name: Check for existing heketi pod
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    state: list
-    kind: pod
-    selector: "glusterfs=heketi-pod"
-  register: heketi_pod
-  when: openshift_storage_glusterfs_heketi_is_native
-
-- name: Check if need to deploy heketi
-  set_fact:
-    openshift_storage_glusterfs_heketi_is_missing: False
+- include: glusterfs_config.yml
   when:
-  - "openshift_storage_glusterfs_heketi_is_native"
-  - "heketi_pod.results.results[0]['items'] | count > 0"
-  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
-  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
-
-- include: heketi_deploy_part1.yml
-  when:
-  - openshift_storage_glusterfs_heketi_is_native
-  - openshift_storage_glusterfs_heketi_deploy_is_missing
-  - openshift_storage_glusterfs_heketi_is_missing
-
-- name: Determine heketi URL
-  oc_obj:
-    namespace: "{{ openshift_storage_glusterfs_namespace }}"
-    state: list
-    kind: ep
-    selector: "glusterfs in (deploy-heketi-service, heketi-service)"
-  register: heketi_url
-  until:
-  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
-  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
-  delay: 10
-  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
-  when:
-  - openshift_storage_glusterfs_heketi_is_native
-  - openshift_storage_glusterfs_heketi_url is undefined
-
-- name: Set heketi URL
-  set_fact:
-    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
-  when:
-  - openshift_storage_glusterfs_heketi_is_native
-  - openshift_storage_glusterfs_heketi_url is undefined
-
-- name: Verify heketi service
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
-  changed_when: False
-
-- name: Generate topology file
-  template:
-    src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
-    dest: "{{ mktemp.stdout }}/topology.json"
-  when:
-  - openshift_storage_glusterfs_is_native
-  - openshift_storage_glusterfs_heketi_topology_load
-
-- name: Load heketi topology
-  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
-  register: topology_load
-  failed_when: topology_load.rc != 0 or 'Unable' in topology_load.stdout
-  when:
-  - openshift_storage_glusterfs_is_native
-  - openshift_storage_glusterfs_heketi_topology_load
-
-- include: heketi_deploy_part2.yml
-  when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
+  - g_glusterfs_hosts | default([]) | count > 0
 
 - include: glusterfs_registry.yml
-  when: openshift.hosted.registry.storage.kind == 'glusterfs'
+  when:
+  - g_glusterfs_registry_hosts | default([]) | count > 0
+  - "openshift.hosted.registry.storage.kind == 'glusterfs'"
 
 - name: Delete temp directory
   file:

+ 1 - 1
roles/openshift_storage_glusterfs/templates/v3.6/glusterfs-registry-endpoints.yml.j2

@@ -4,7 +4,7 @@ metadata:
   name: glusterfs-registry-endpoints
 subsets:
 - addresses:
-{% for node in groups.oo_glusterfs_to_config %}
+{% for node in glusterfs_nodes %}
   - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
 {% endfor %}
   ports:

+ 1 - 1
roles/openshift_storage_glusterfs/templates/v3.6/topology.json.j2

@@ -1,7 +1,7 @@
 {
   "clusters": [
 {%- set clusters = {} -%}
-{%- for node in groups.oo_glusterfs_to_config -%}
+{%- for node in glusterfs_nodes -%}
   {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
   {%- if cluster in clusters -%}
     {%- set _dummy = clusters[cluster].append(node) -%}