Browse Source

GlusterFS playbook and role

Signed-off-by: Jose A. Rivera <jarrpa@redhat.com>
Jose A. Rivera 8 years ago
parent
commit
1a72183498

+ 21 - 0
playbooks/common/openshift-glusterfs/config.yml

@@ -0,0 +1,21 @@
+---
+- name: Open firewall ports for GlusterFS
+  hosts: oo_glusterfs_to_config
+  vars:
+    os_firewall_allow:
+    - service: glusterfs_sshd
+      port: "2222/tcp"
+    - service: glusterfs_daemon
+      port: "24007/tcp"
+    - service: glusterfs_management
+      port: "24008/tcp"
+    - service: glusterfs_bricks
+      port: "49152-49251/tcp"
+  roles:
+  - os_firewall
+
+- name: Configure GlusterFS
+  hosts: oo_first_master
+  roles:
+  - role: openshift_storage_glusterfs
+    when: groups.oo_glusterfs_to_config | default([]) | count > 0

+ 1 - 0
playbooks/common/openshift-glusterfs/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-glusterfs/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 1 - 0
playbooks/common/openshift-glusterfs/roles

@@ -0,0 +1 @@
+../../../roles/

+ 60 - 0
roles/openshift_storage_glusterfs/README.md

@@ -0,0 +1,60 @@
+OpenShift GlusterFS Cluster
+===========================
+
+OpenShift GlusterFS Cluster Installation
+
+Requirements
+------------
+
+* Ansible 2.2
+
+Role Variables
+--------------
+
+From this role:
+
+| Name                                             | Default value           |                                         |
+|--------------------------------------------------|-------------------------|-----------------------------------------|
+| openshift_storage_glusterfs_timeout              | 300                     | Seconds to wait for pods to become ready
+| openshift_storage_glusterfs_namespace            | 'default'               | Namespace in which to create GlusterFS resources
+| openshift_storage_glusterfs_is_native            | True                    | GlusterFS should be containerized
+| openshift_storage_glusterfs_nodeselector         | 'storagenode=glusterfs' | Selector to determine which nodes will host GlusterFS pods in native mode
+| openshift_storage_glusterfs_image                | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
+| openshift_storage_glusterfs_version              | 'latest'                | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_wipe                 | False                   | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
+| openshift_storage_glusterfs_heketi_is_native     | True                    | heketi should be containerized
+| openshift_storage_glusterfs_heketi_image         | 'heketi/heketi'         | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7'
+| openshift_storage_glusterfs_heketi_version       | 'latest'                | Container image version to use for heketi pods
+| openshift_storage_glusterfs_heketi_admin_key     | ''                      | String to use as secret key for performing heketi commands as admin
+| openshift_storage_glusterfs_heketi_user_key      | ''                      | String to use as secret key for performing heketi commands as user that can only view or modify volumes
+| openshift_storage_glusterfs_heketi_topology_load | True                    | Load the GlusterFS topology information into heketi
+| openshift_storage_glusterfs_heketi_url           | Undefined               | URL for the heketi REST API, dynamically determined in native mode
+| openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
+
+Dependencies
+------------
+
+* os_firewall
+* openshift_hosted_facts
+* openshift_repos
+* lib_openshift
+
+Example Playbook
+----------------
+
+```
+- name: Configure GlusterFS hosts
+  hosts: oo_first_master
+  roles:
+  - role: openshift_storage_glusterfs
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose A. Rivera (jarrpa@redhat.com)

+ 17 - 0
roles/openshift_storage_glusterfs/defaults/main.yml

@@ -0,0 +1,17 @@
+---
+openshift_storage_glusterfs_timeout: 300
+openshift_storage_glusterfs_namespace: 'default'
+openshift_storage_glusterfs_is_native: True
+openshift_storage_glusterfs_nodeselector: "{{ openshift_storage_glusterfs_nodeselector_label | default('storagenode=glusterfs') | map_from_pairs }}"
+openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
+openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_wipe: False
+openshift_storage_glusterfs_heketi_is_native: True
+openshift_storage_glusterfs_heketi_is_missing: True
+openshift_storage_glusterfs_heketi_deploy_is_missing: True
+openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"
+openshift_storage_glusterfs_heketi_version: 'latest'
+openshift_storage_glusterfs_heketi_admin_key: ''
+openshift_storage_glusterfs_heketi_user_key: ''
+openshift_storage_glusterfs_heketi_topology_load: True
+openshift_storage_glusterfs_heketi_wipe: "{{ openshift_storage_glusterfs_wipe }}"

+ 115 - 0
roles/openshift_storage_glusterfs/files/v1.6/deploy-heketi-template.yml

@@ -0,0 +1,115 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+  name: deploy-heketi
+  labels:
+    glusterfs: heketi-template
+    deploy-heketi: support
+  annotations:
+    description: Bootstrap Heketi installation
+    tags: glusterfs,heketi,installation
+labels:
+  template: deploy-heketi
+objects:
+- kind: Service
+  apiVersion: v1
+  metadata:
+    name: deploy-heketi
+    labels:
+      glusterfs: deploy-heketi-service
+      deploy-heketi: support
+    annotations:
+      description: Exposes Heketi service
+  spec:
+    ports:
+    - name: deploy-heketi
+      port: 8080
+      targetPort: 8080
+    selector:
+      name: deploy-heketi
+- kind: Route
+  apiVersion: v1
+  metadata:
+    name: deploy-heketi
+    labels:
+      glusterfs: deploy-heketi-route
+      deploy-heketi: support
+  spec:
+    to:
+      kind: Service
+      name: deploy-heketi
+- kind: DeploymentConfig
+  apiVersion: v1
+  metadata:
+    name: deploy-heketi
+    labels:
+      glusterfs: deploy-heketi-dc
+      deploy-heketi: support
+    annotations:
+      description: Defines how to deploy Heketi
+  spec:
+    replicas: 1
+    selector:
+      name: deploy-heketi
+    triggers:
+    - type: ConfigChange
+    strategy:
+      type: Recreate
+    template:
+      metadata:
+        name: deploy-heketi
+        labels:
+          name: deploy-heketi
+          glusterfs: deploy-heketi-pod
+          deploy-heketi: support
+      spec:
+        serviceAccountName: heketi-service-account
+        containers:
+        - name: deploy-heketi
+          image: ${IMAGE_NAME}:${IMAGE_VERSION}
+          env:
+          - name: HEKETI_USER_KEY
+            value: ${HEKETI_USER_KEY}
+          - name: HEKETI_ADMIN_KEY
+            value: ${HEKETI_ADMIN_KEY}
+          - name: HEKETI_EXECUTOR
+            value: kubernetes
+          - name: HEKETI_FSTAB
+            value: /var/lib/heketi/fstab
+          - name: HEKETI_SNAPSHOT_LIMIT
+            value: '14'
+          - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+            value: '1'
+          ports:
+          - containerPort: 8080
+          volumeMounts:
+          - name: db
+            mountPath: /var/lib/heketi
+          readinessProbe:
+            timeoutSeconds: 3
+            initialDelaySeconds: 3
+            httpGet:
+              path: /hello
+              port: 8080
+          livenessProbe:
+            timeoutSeconds: 3
+            initialDelaySeconds: 30
+            httpGet:
+              path: /hello
+              port: 8080
+        volumes:
+        - name: db
+parameters:
+- name: HEKETI_USER_KEY
+  displayName: Heketi User Secret
+  description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+  displayName: Heketi Administrator Secret
+  description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+  displayName: GlusterFS container name
+  required: True
+- name: IMAGE_VERSION
+  displayName: GlusterFS container versiona
+  required: True

+ 10 - 0
roles/openshift_storage_glusterfs/files/v1.6/glusterfs-registry-service.yml

@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+  name: glusterfs-registry-endpoints
+spec:
+  ports:
+  - port: 1
+status:
+  loadBalancer: {}

+ 128 - 0
roles/openshift_storage_glusterfs/files/v1.6/glusterfs-template.yml

@@ -0,0 +1,128 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+  name: glusterfs
+  labels:
+    glusterfs: template
+  annotations:
+    description: GlusterFS DaemonSet template
+    tags: glusterfs
+objects:
+- kind: DaemonSet
+  apiVersion: extensions/v1beta1
+  metadata:
+    name: glusterfs
+    labels:
+      glusterfs: daemonset
+    annotations:
+      description: GlusterFS DaemonSet
+      tags: glusterfs
+  spec:
+    selector:
+      matchLabels:
+        glusterfs-node: pod
+    template:
+      metadata:
+        name: glusterfs
+        labels:
+          glusterfs-node: pod
+      spec:
+        nodeSelector:
+          storagenode: glusterfs
+        hostNetwork: true
+        containers:
+        - name: glusterfs
+          image: ${IMAGE_NAME}:${IMAGE_VERSION}
+          imagePullPolicy: IfNotPresent
+          volumeMounts:
+          - name: glusterfs-heketi
+            mountPath: "/var/lib/heketi"
+          - name: glusterfs-run
+            mountPath: "/run"
+          - name: glusterfs-lvm
+            mountPath: "/run/lvm"
+          - name: glusterfs-etc
+            mountPath: "/etc/glusterfs"
+          - name: glusterfs-logs
+            mountPath: "/var/log/glusterfs"
+          - name: glusterfs-config
+            mountPath: "/var/lib/glusterd"
+          - name: glusterfs-dev
+            mountPath: "/dev"
+          - name: glusterfs-misc
+            mountPath: "/var/lib/misc/glusterfsd"
+          - name: glusterfs-cgroup
+            mountPath: "/sys/fs/cgroup"
+            readOnly: true
+          - name: glusterfs-ssl
+            mountPath: "/etc/ssl"
+            readOnly: true
+          securityContext:
+            capabilities: {}
+            privileged: true
+          readinessProbe:
+            timeoutSeconds: 3
+            initialDelaySeconds: 100
+            exec:
+              command:
+              - "/bin/bash"
+              - "-c"
+              - systemctl status glusterd.service
+            periodSeconds: 10
+            successThreshold: 1
+            failureThreshold: 3
+          livenessProbe:
+            timeoutSeconds: 3
+            initialDelaySeconds: 100
+            exec:
+              command:
+              - "/bin/bash"
+              - "-c"
+              - systemctl status glusterd.service
+            periodSeconds: 10
+            successThreshold: 1
+            failureThreshold: 3
+          resources: {}
+          terminationMessagePath: "/dev/termination-log"
+        volumes:
+        - name: glusterfs-heketi
+          hostPath:
+            path: "/var/lib/heketi"
+        - name: glusterfs-run
+          emptyDir: {}
+        - name: glusterfs-lvm
+          hostPath:
+            path: "/run/lvm"
+        - name: glusterfs-etc
+          hostPath:
+            path: "/etc/glusterfs"
+        - name: glusterfs-logs
+          hostPath:
+            path: "/var/log/glusterfs"
+        - name: glusterfs-config
+          hostPath:
+            path: "/var/lib/glusterd"
+        - name: glusterfs-dev
+          hostPath:
+            path: "/dev"
+        - name: glusterfs-misc
+          hostPath:
+            path: "/var/lib/misc/glusterfsd"
+        - name: glusterfs-cgroup
+          hostPath:
+            path: "/sys/fs/cgroup"
+        - name: glusterfs-ssl
+          hostPath:
+            path: "/etc/ssl"
+        restartPolicy: Always
+        terminationGracePeriodSeconds: 30
+        dnsPolicy: ClusterFirst
+        securityContext: {}
+parameters:
+- name: IMAGE_NAME
+  displayName: GlusterFS container name
+  required: True
+- name: IMAGE_VERSION
+  displayName: GlusterFS container versiona
+  required: True

+ 113 - 0
roles/openshift_storage_glusterfs/files/v1.6/heketi-template.yml

@@ -0,0 +1,113 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+  name: heketi
+  labels:
+    glusterfs: heketi-template
+  annotations:
+    description: Heketi service deployment template
+    tags: glusterfs,heketi
+labels:
+  template: heketi
+objects:
+- kind: Service
+  apiVersion: v1
+  metadata:
+    name: heketi
+    labels:
+      glusterfs: heketi-service
+    annotations:
+      description: Exposes Heketi service
+  spec:
+    ports:
+    - name: heketi
+      port: 8080
+      targetPort: 8080
+    selector:
+      glusterfs: heketi-pod
+- kind: Route
+  apiVersion: v1
+  metadata:
+    name: heketi
+    labels:
+      glusterfs: heketi-route
+  spec:
+    to:
+      kind: Service
+      name: heketi
+- kind: DeploymentConfig
+  apiVersion: v1
+  metadata:
+    name: heketi
+    labels:
+      glusterfs: heketi-dc
+    annotations:
+      description: Defines how to deploy Heketi
+  spec:
+    replicas: 1
+    selector:
+      glusterfs: heketi-pod
+    triggers:
+    - type: ConfigChange
+    strategy:
+      type: Recreate
+    template:
+      metadata:
+        name: heketi
+        labels:
+          glusterfs: heketi-pod
+      spec:
+        serviceAccountName: heketi-service-account
+        containers:
+        - name: heketi
+          image: ${IMAGE_NAME}:${IMAGE_VERSION}
+          imagePullPolicy: IfNotPresent
+          env:
+          - name: HEKETI_USER_KEY
+            value: ${HEKETI_USER_KEY}
+          - name: HEKETI_ADMIN_KEY
+            value: ${HEKETI_ADMIN_KEY}
+          - name: HEKETI_EXECUTOR
+            value: kubernetes
+          - name: HEKETI_FSTAB
+            value: /var/lib/heketi/fstab
+          - name: HEKETI_SNAPSHOT_LIMIT
+            value: '14'
+          - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+            value: '1'
+          ports:
+          - containerPort: 8080
+          volumeMounts:
+          - name: db
+            mountPath: /var/lib/heketi
+          readinessProbe:
+            timeoutSeconds: 3
+            initialDelaySeconds: 3
+            httpGet:
+              path: /hello
+              port: 8080
+          livenessProbe:
+            timeoutSeconds: 3
+            initialDelaySeconds: 30
+            httpGet:
+              path: /hello
+              port: 8080
+        volumes:
+        - name: db
+          glusterfs:
+            endpoints: heketi-storage-endpoints
+            path: heketidbstorage
+parameters:
+- name: HEKETI_USER_KEY
+  displayName: Heketi User Secret
+  description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+  displayName: Heketi Administrator Secret
+  description: Set secret for administration of the Heketi service as user _admin_
+- name: IMAGE_NAME
+  displayName: GlusterFS container name
+  required: True
+- name: IMAGE_VERSION
+  displayName: GlusterFS container versiona
+  required: True

+ 23 - 0
roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py

@@ -0,0 +1,23 @@
+'''
+ Openshift Storage GlusterFS class that provides useful filters used in GlusterFS
+'''
+
+
+def map_from_pairs(source, delim="="):
+    ''' Returns a dict given the source and delim delimited '''
+    if source == '':
+        return dict()
+
+    return dict(source.split(delim) for item in source.split(","))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+    ''' OpenShift Storage GlusterFS Filters '''
+
+    # pylint: disable=no-self-use, too-few-public-methods
+    def filters(self):
+        ''' Returns the names of the filters provided by this class '''
+        return {
+            'map_from_pairs': map_from_pairs
+        }

+ 15 - 0
roles/openshift_storage_glusterfs/meta/main.yml

@@ -0,0 +1,15 @@
+---
+galaxy_info:
+  author: Jose A. Rivera
+  description: OpenShift GlusterFS Cluster
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 2.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+dependencies:
+- role: openshift_hosted_facts
+- role: openshift_repos
+- role: lib_openshift

+ 107 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml

@@ -0,0 +1,107 @@
+---
+- assert:
+    that: "openshift_storage_glusterfs_nodeselector.keys() | count == 1"
+    msg: Only one GlusterFS nodeselector key pair should be provided
+
+- assert:
+    that: "groups.oo_glusterfs_to_config | count >= 3"
+    msg: There must be at least three GlusterFS nodes specified
+
+- name: Delete pre-existing GlusterFS resources
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: "template,daemonset"
+    name: glusterfs
+    state: absent
+  when: openshift_storage_glusterfs_wipe
+
+- name: Unlabel any existing GlusterFS nodes
+  oc_label:
+    name: "{{ item }}"
+    kind: node
+    state: absent
+    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+  with_items: "{{ groups.all }}"
+  when: openshift_storage_glusterfs_wipe
+
+- name: Delete pre-existing GlusterFS config
+  file:
+    path: /var/lib/glusterd
+    state: absent
+  delegate_to: "{{ item }}"
+  with_items: "{{ groups.oo_glusterfs_to_config }}"
+  when: openshift_storage_glusterfs_wipe
+
+- name: Get GlusterFS storage devices state
+  command: "pvdisplay -C --noheadings -o pv_name,vg_name {% for device in hostvars[item].glusterfs_devices %}{{ device }} {% endfor %}"
+  register: devices_info
+  delegate_to: "{{ item }}"
+  with_items: "{{ groups.oo_glusterfs_to_config }}"
+  failed_when: False
+  when: openshift_storage_glusterfs_wipe
+
+  # Runs "vgremove -fy <vg>; pvremove -fy <pv>" for every device found to be a physical volume.
+- name: Clear GlusterFS storage device contents
+  shell: "{% for line in item.stdout_lines %}{% set fields = line.split() %}{% if fields | count > 1 %}vgremove -fy {{ fields[1] }}; {% endif %}pvremove -fy {{ fields[0] }}; {% endfor %}"
+  delegate_to: "{{ item.item }}"
+  with_items: "{{ devices_info.results }}"
+  when:
+  - openshift_storage_glusterfs_wipe
+  - item.stdout_lines | count > 0
+
+- name: Add service accounts to privileged SCC
+  oc_adm_policy_user:
+    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:{{ item }}"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+  with_items:
+  - 'default'
+  - 'router'
+
+- name: Label GlusterFS nodes
+  oc_label:
+    name: "{{ glusterfs_host }}"
+    kind: node
+    state: add
+    labels: "{{ openshift_storage_glusterfs_nodeselector | oo_dict_to_list_of_dict }}"
+  with_items: "{{ groups.oo_glusterfs_to_config }}"
+  loop_control:
+    loop_var: glusterfs_host
+
+- name: Copy GlusterFS DaemonSet template
+  copy:
+    src: "{{ openshift.common.examples_content_version }}/glusterfs-template.yml"
+    dest: "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Create GlusterFS template
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: template
+    name: glusterfs
+    state: present
+    files:
+    - "{{ mktemp.stdout }}/glusterfs-template.yml"
+
+- name: Deploy GlusterFS pods
+  oc_process:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    template_name: "glusterfs"
+    create: True
+    params:
+      IMAGE_NAME: "{{ openshift_storage_glusterfs_image }}"
+      IMAGE_VERSION: "{{ openshift_storage_glusterfs_version }}"
+
+- name: Wait for GlusterFS pods
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs-node=pod"
+  register: glusterfs_pods
+  until:
+  - "glusterfs_pods.results.results[0]['items'] | count > 0"
+  # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods
+  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == groups.oo_glusterfs_to_config | count"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"

+ 48 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml

@@ -0,0 +1,48 @@
+---
+- name: Delete pre-existing GlusterFS registry resources
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "svc,ep"
+    name: "glusterfs-registry-endpoints"
+  failed_when: False
+
+- name: Generate GlusterFS registry endpoints
+  template:
+    src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-endpoints.yml.j2"
+    dest: "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Copy GlusterFS registry service
+  copy:
+    src: "{{ openshift.common.examples_content_version }}/glusterfs-registry-service.yml"
+    dest: "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Create GlusterFS registry endpoints
+  oc_obj:
+    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+    state: present
+    kind: endpoints
+    name: glusterfs-registry-endpoints
+    files:
+    - "{{ mktemp.stdout }}/glusterfs-registry-endpoints.yml"
+
+- name: Create GlusterFS registry service
+  oc_obj:
+    namespace: "{{ openshift.hosted.registry.namespace | default('default') }}"
+    state: present
+    kind: service
+    name: glusterfs-registry-endpoints
+    files:
+    - "{{ mktemp.stdout }}/glusterfs-registry-service.yml"
+
+- name: Check if GlusterFS registry volume exists
+  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume list"
+  register: registry_volume
+
+- name: Create GlusterFS registry volume
+  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
+  when: "'{{ openshift.hosted.registry.storage.glusterfs.path }}' not in registry_volume.stdout"

+ 41 - 0
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml

@@ -0,0 +1,41 @@
+---
+- name: Copy initial heketi resource files
+  copy:
+    src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+    dest: "{{ mktemp.stdout }}/{{ item }}"
+  with_items:
+  - "deploy-heketi-template.yml"
+
+- name: Create deploy-heketi resources
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: template
+    name: deploy-heketi
+    state: present
+    files:
+    - "{{ mktemp.stdout }}/deploy-heketi-template.yml"
+
+- name: Deploy deploy-heketi pod
+  oc_process:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    template_name: "deploy-heketi"
+    create: True
+    params:
+      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for deploy-heketi pod
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+  register: heketi_pod
+  until:
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # Pod's 'Ready' status must be True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"

+ 109 - 0
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml

@@ -0,0 +1,109 @@
+---
+- name: Create heketi DB volume
+  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' setup-openshift-heketi-storage --listfile {{ mktemp.stdout }}/heketi-storage.json"
+  register: setup_storage
+  failed_when: False
+
+# This is used in the subsequent task
+- name: Copy the admin client config
+  command: >
+    cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+  changed_when: False
+  check_mode: no
+
+# Need `command` here because heketi-storage.json contains multiple objects.
+- name: Copy heketi DB to GlusterFS volume
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
+  when: "setup_storage.rc == 0"
+
+- name: Wait for copy job to finish
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: job
+    state: list
+    name: "heketi-storage-copy-job"
+  register: heketi_job
+  until:
+  - "'results' in heketi_job.results and heketi_job.results.results | count > 0"
+  # Pod's 'Complete' status must be True
+  - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  failed_when:
+  - "'results' in heketi_job.results"
+  - "heketi_job.results.results | count > 0"
+  # Fail when pod's 'Failed' status is True
+  - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
+  when: "setup_storage.rc == 0"
+
+- name: Delete deploy resources
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "template,route,service,jobs,dc,secret"
+    selector: "deploy-heketi"
+  failed_when: False
+
+- name: Copy heketi template
+  copy:
+    src: "{{ openshift.common.examples_content_version }}/heketi-template.yml"
+    dest: "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Create heketi resources
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: template
+    name: heketi
+    state: present
+    files:
+    - "{{ mktemp.stdout }}/heketi-template.yml"
+
+- name: Deploy heketi pod
+  oc_process:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    template_name: "heketi"
+    create: True
+    params:
+      IMAGE_NAME: "{{ openshift_storage_glusterfs_heketi_image }}"
+      IMAGE_VERSION: "{{ openshift_storage_glusterfs_heketi_version }}"
+      HEKETI_USER_KEY: "{{ openshift_storage_glusterfs_heketi_user_key }}"
+      HEKETI_ADMIN_KEY: "{{ openshift_storage_glusterfs_heketi_admin_key }}"
+
+- name: Wait for heketi pod
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=heketi-pod"
+  register: heketi_pod
+  until:
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # Pod's 'Ready' status must be True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Determine heketi URL
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    state: list
+    kind: ep
+    selector: "glusterfs=heketi-service"
+  register: heketi_url
+  until:
+  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+
+- name: Set heketi URL
+  set_fact:
+    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+
+- name: Verify heketi service
+  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+  changed_when: False

+ 182 - 0
roles/openshift_storage_glusterfs/tasks/main.yml

@@ -0,0 +1,182 @@
+---
+- name: Create temp directory for doing work in
+  command: mktemp -d /tmp/openshift-glusterfs-ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+  check_mode: no
+
+- name: Verify target namespace exists
+  oc_project:
+    state: present
+    name: "{{ openshift_storage_glusterfs_namespace }}"
+  when: openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native
+
+- include: glusterfs_deploy.yml
+  when: openshift_storage_glusterfs_is_native
+
+- name: Make sure heketi-client is installed
+  package: name=heketi-client state=present
+
+- name: Delete pre-existing heketi resources
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "template,route,service,jobs,dc,secret"
+    selector: "deploy-heketi"
+  - kind: "template,route,dc,service"
+    name: "heketi"
+  - kind: "svc,ep"
+    name: "heketi-storage-endpoints"
+  - kind: "sa"
+    name: "heketi-service-account"
+  failed_when: False
+  when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for deploy-heketi pods to terminate
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=deploy-heketi-pod"
+  register: heketi_pod
+  until: "heketi_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Wait for heketi pods to terminate
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=heketi-pod"
+  register: heketi_pod
+  until: "heketi_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  when: openshift_storage_glusterfs_heketi_wipe
+
+- name: Create heketi service account
+  oc_serviceaccount:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    name: heketi-service-account
+    state: present
+  when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Add heketi service account to privileged SCC
+  oc_adm_policy_user:
+    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+  when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Allow heketi service account to view/edit pods
+  oc_adm_policy_user:
+    user: "system:serviceaccount:{{ openshift_storage_glusterfs_namespace }}:heketi-service-account"
+    resource_kind: role
+    resource_name: edit
+    state: present
+  when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check for existing deploy-heketi pod
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=deploy-heketi-pod,deploy-heketi=support"
+  register: heketi_pod
+  when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy deploy-heketi
+  set_fact:
+    openshift_storage_glusterfs_heketi_deploy_is_missing: False
+  when:
+  - "openshift_storage_glusterfs_heketi_is_native"
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- name: Check for existing heketi pod
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    state: list
+    kind: pod
+    selector: "glusterfs=heketi-pod"
+  register: heketi_pod
+  when: openshift_storage_glusterfs_heketi_is_native
+
+- name: Check if need to deploy heketi
+  set_fact:
+    openshift_storage_glusterfs_heketi_is_missing: False
+  when:
+  - "openshift_storage_glusterfs_heketi_is_native"
+  - "heketi_pod.results.results[0]['items'] | count > 0"
+  # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True
+  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"
+
+- include: heketi_deploy_part1.yml
+  when:
+  - openshift_storage_glusterfs_heketi_is_native
+  - openshift_storage_glusterfs_heketi_deploy_is_missing
+  - openshift_storage_glusterfs_heketi_is_missing
+
+- name: Determine heketi URL
+  oc_obj:
+    namespace: "{{ openshift_storage_glusterfs_namespace }}"
+    state: list
+    kind: ep
+    selector: "glusterfs in (deploy-heketi-service, heketi-service)"
+  register: heketi_url
+  until:
+  - "heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip != ''"
+  - "heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port != ''"
+  delay: 10
+  retries: "{{ (openshift_storage_glusterfs_timeout / 10) | int }}"
+  when:
+  - openshift_storage_glusterfs_heketi_is_native
+  - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Set heketi URL
+  set_fact:
+    openshift_storage_glusterfs_heketi_url: "{{ heketi_url.results.results[0]['items'][0].subsets[0].addresses[0].ip }}:{{ heketi_url.results.results[0]['items'][0].subsets[0].ports[0].port }}"
+  when:
+  - openshift_storage_glusterfs_heketi_is_native
+  - openshift_storage_glusterfs_heketi_url is undefined
+
+- name: Verify heketi service
+  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' cluster list"
+  changed_when: False
+
+- name: Generate topology file
+  template:
+    src: "{{ openshift.common.examples_content_version }}/topology.json.j2"
+    dest: "{{ mktemp.stdout }}/topology.json"
+  when:
+  - openshift_storage_glusterfs_is_native
+  - openshift_storage_glusterfs_heketi_topology_load
+
+- name: Load heketi topology
+  command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
+  register: topology_load
+  failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+  when:
+  - openshift_storage_glusterfs_is_native
+  - openshift_storage_glusterfs_heketi_topology_load
+
+- include: heketi_deploy_part2.yml
+  when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
+
+- include: glusterfs_registry.yml
+  when: "openshift.hosted.registry.storage.kind == 'glusterfs'"
+
+- name: Delete temp directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  changed_when: False
+  check_mode: no

+ 11 - 0
roles/openshift_storage_glusterfs/templates/v1.6/glusterfs-registry-endpoints.yml.j2

@@ -0,0 +1,11 @@
+apiVersion: v1
+kind: Endpoints
+metadata:
+  name: glusterfs-registry-endpoints
+subsets:
+- addresses:
+{% for node in groups.oo_glusterfs_to_config %}
+  - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+  ports:
+  - port: 1

+ 39 - 0
roles/openshift_storage_glusterfs/templates/v1.6/topology.json.j2

@@ -0,0 +1,39 @@
+{
+  "clusters": [
+{%- set clusters = {} -%}
+{%- for node in groups.oo_glusterfs_to_config -%}
+  {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+  {%- if cluster in clusters -%}
+    {%- set _dummy = clusters[cluster].append(node) -%}
+  {%- else -%}
+    {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+  {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+    {
+      "nodes": [
+{%- for node in clusters[cluster] -%}
+        {
+          "node": {
+            "hostnames": {
+              "manage": [
+                "{{ hostvars[node].glusterfs_hostname | default(hostvars[node].openshift.common.hostname) }}"
+              ],
+              "storage": [
+                "{{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}"
+              ]
+            },
+            "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+          },
+          "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+            "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+          ]
+        }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+      ]
+    }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+  ]
+}