Browse Source

Merge pull request #5784 from jarrpa/glusterfs-new-features

Automatic merge from submit-queue.

GlusterFS: Update for new features

This is a small collection of updates to take advantage of new features available for containerized GlusterFS, currently available downstream in CNS 3.6.
OpenShift Merge Robot 7 years ago
parent
commit
3cdf382be5

+ 15 - 0
roles/openshift_storage_glusterfs/README.md

@@ -84,6 +84,20 @@ GlusterFS cluster into a new or existing OpenShift cluster:
 | openshift_storage_glusterfs_storageclass         | True                    | Automatically create a StorageClass for each GlusterFS cluster
 | openshift_storage_glusterfs_image                | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'
 | openshift_storage_glusterfs_version              | 'latest'                | Container image version to use for GlusterFS pods
+| openshift_storage_glusterfs_block_deploy         | True                    | Deploy glusterblock provisioner service
+| openshift_storage_glusterfs_block_image          | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
+| openshift_storage_glusterfs_block_version        | 'latest'                | Container image version to use for glusterblock-provisioner pod
+| openshift_storage_glusterfs_block_max_host_vol   | 15                      | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_s3_deploy            | True                    | Deploy gluster-s3 service
+| openshift_storage_glusterfs_s3_image             | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
+| openshift_storage_glusterfs_s3_version           | 'latest'                | Container image version to use for gluster=s3 pod
+| openshift_storage_glusterfs_s3_account           | Undefined               | S3 account name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_user              | Undefined               | S3 user name for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_password          | Undefined               | S3 user password for the S3 service, required for S3 service deployment
+| openshift_storage_glusterfs_s3_pvc               | Dynamic                 | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_pvc_size          | "2Gi"                   | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage
+| openshift_storage_glusterfs_s3_meta_pvc          | Dynamic                 | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default
+| openshift_storage_glusterfs_s3_meta_pvc_size     | "1Gi"                   | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage
 | openshift_storage_glusterfs_wipe                 | False                   | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.**
 | openshift_storage_glusterfs_heketi_is_native     | True                    | heketi should be containerized
 | openshift_storage_glusterfs_heketi_cli           | 'heketi-cli'            | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible
@@ -99,6 +113,7 @@ GlusterFS cluster into a new or existing OpenShift cluster:
 | openshift_storage_glusterfs_heketi_ssh_user      | 'root'                  | SSH user for external GlusterFS nodes via native heketi
 | openshift_storage_glusterfs_heketi_ssh_sudo      | False                   | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi
 | openshift_storage_glusterfs_heketi_ssh_keyfile   | Undefined               | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path
+| openshift_storage_glusterfs_heketi_fstab         | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed
 | openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`
 
 Each role variable also has a corresponding variable to optionally configure a

+ 31 - 0
roles/openshift_storage_glusterfs/defaults/main.yml

@@ -7,6 +7,20 @@ openshift_storage_glusterfs_use_default_selector: False
 openshift_storage_glusterfs_storageclass: True
 openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"
 openshift_storage_glusterfs_version: 'latest'
+openshift_storage_glusterfs_block_deploy: True
+openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
+openshift_storage_glusterfs_block_version: 'latest'
+openshift_storage_glusterfs_block_max_host_vol: 15
+openshift_storage_glusterfs_s3_deploy: True
+openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
+openshift_storage_glusterfs_s3_version: 'latest'
+openshift_storage_glusterfs_s3_account: "{{ omit }}"
+openshift_storage_glusterfs_s3_user: "{{ omit }}"
+openshift_storage_glusterfs_s3_password: "{{ omit }}"
+openshift_storage_glusterfs_s3_pvc: "gluster-s3-{{ openshift_storage_glusterfs_name }}-{{ openshift_storage_glusterfs_s3_account }}-claim"
+openshift_storage_glusterfs_s3_pvc_size: "2Gi"
+openshift_storage_glusterfs_s3_meta_pvc: "gluster-s3-{{ openshift_storage_glusterfs_name }}-{{ openshift_storage_glusterfs_s3_account }}-meta-claim"
+openshift_storage_glusterfs_s3_meta_pvc_size: "1Gi"
 openshift_storage_glusterfs_wipe: False
 openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is_native }}"
 openshift_storage_glusterfs_heketi_is_missing: True
@@ -25,6 +39,7 @@ openshift_storage_glusterfs_heketi_ssh_port: 22
 openshift_storage_glusterfs_heketi_ssh_user: 'root'
 openshift_storage_glusterfs_heketi_ssh_sudo: False
 openshift_storage_glusterfs_heketi_ssh_keyfile: "{{ omit }}"
+openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_heketi_executor == 'kubernetes' else '/etc/fstab' | quote }}"
 openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
 
 openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
@@ -36,6 +51,20 @@ openshift_storage_glusterfs_registry_use_default_selector: "{{ openshift_storage
 openshift_storage_glusterfs_registry_storageclass: False
 openshift_storage_glusterfs_registry_image: "{{ openshift_storage_glusterfs_image }}"
 openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_version }}"
+openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
+openshift_storage_glusterfs_registry_block_image: "{{ openshift_storage_glusterfs_block_image }}"
+openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_glusterfs_block_version }}"
+openshift_storage_glusterfs_registry_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
+openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
+openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
+openshift_storage_glusterfs_registry_s3_account: "{{ openshift_storage_glusterfs_s3_account }}"
+openshift_storage_glusterfs_registry_s3_user: "{{ openshift_storage_glusterfs_s3_user }}"
+openshift_storage_glusterfs_registry_s3_password: "{{ openshift_storage_glusterfs_s3_password }}"
+openshift_storage_glusterfs_registry_s3_pvc: "gluster-s3-{{ openshift_storage_glusterfs_registry_name }}-{{ openshift_storage_glusterfs_registry_s3_account }}-claim"
+openshift_storage_glusterfs_registry_s3_pvc_size: "{{ openshift_storage_glusterfs_s3_pvc_size }}"
+openshift_storage_glusterfs_registry_s3_meta_pvc: "gluster-s3-{{ openshift_storage_glusterfs_registry_name }}-{{ openshift_storage_glusterfs_registry_s3_account }}-meta-claim"
+openshift_storage_glusterfs_registry_s3_meta_pvc_size: "{{ openshift_storage_glusterfs_s3_meta_pvc_size }}"
 openshift_storage_glusterfs_registry_wipe: "{{ openshift_storage_glusterfs_wipe }}"
 openshift_storage_glusterfs_registry_heketi_is_native: "{{ openshift_storage_glusterfs_registry_is_native }}"
 openshift_storage_glusterfs_registry_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing }}"
@@ -54,6 +83,8 @@ openshift_storage_glusterfs_registry_heketi_ssh_port: "{{ openshift_storage_glus
 openshift_storage_glusterfs_registry_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
 openshift_storage_glusterfs_registry_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo }}"
 openshift_storage_glusterfs_registry_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile | default(omit) }}"
+openshift_storage_glusterfs_registry_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if openshift_storage_glusterfs_registry_heketi_executor == 'kubernetes' else '/etc/fstab' | quote }}"
+
 r_openshift_storage_glusterfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_storage_glusterfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 r_openshift_storage_glusterfs_os_firewall_deny: []

+ 5 - 7
roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml

@@ -73,13 +73,11 @@ objects:
           - name: HEKETI_EXECUTOR
             value: ${HEKETI_EXECUTOR}
           - name: HEKETI_FSTAB
-            value: /var/lib/heketi/fstab
+            value: ${HEKETI_FSTAB}
           - name: HEKETI_SNAPSHOT_LIMIT
             value: '14'
           - name: HEKETI_KUBE_GLUSTER_DAEMONSET
             value: '1'
-          - name: HEKETI_KUBE_NAMESPACE
-            value: ${HEKETI_KUBE_NAMESPACE}
           ports:
           - containerPort: 8080
           volumeMounts:
@@ -115,10 +113,10 @@ parameters:
   displayName: heketi executor type
   description: Set the executor type, kubernetes or ssh
   value: kubernetes
-- name: HEKETI_KUBE_NAMESPACE
-  displayName: Namespace
-  description: Set the namespace where the GlusterFS pods reside
-  value: default
+- name: HEKETI_FSTAB
+  displayName: heketi fstab path
+  description: Set the fstab path, file that is populated with bricks that heketi creates
+  value: /var/lib/heketi/fstab
 - name: HEKETI_ROUTE
   displayName: heketi route name
   description: Set the hostname for the route URL

+ 67 - 0
roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-pvcs-template.yml

@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+  name: gluster-s3-pvcs
+  labels:
+    glusterfs: s3-pvcs-template
+    gluster-s3: pvcs-template
+  annotations:
+    description: Gluster S3 service template
+    tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+  apiVersion: v1
+  metadata:
+    name: "${PVC}"
+    labels:
+      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+    annotations:
+      volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+  spec:
+    accessModes:
+    - ReadWriteMany
+    resources:
+      requests:
+        storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+  apiVersion: v1
+  metadata:
+    name: "${META_PVC}"
+    labels:
+      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+    annotations:
+      volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+  spec:
+    accessModes:
+    - ReadWriteMany
+    resources:
+      requests:
+        storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+  displayName: S3 Account Name
+  description: S3 storage account which will provide storage on GlusterFS volumes
+  required: true
+- name: PVC
+  displayName: Primary GlusterFS-backed PVC
+  description: GlusterFS-backed PVC for object storage
+  required: true
+- name: PVC_SIZE
+  displayName: Primary GlusterFS-backed PVC capacity
+  description: Capacity for GlusterFS-backed PVC for object storage
+  value: 2Gi
+- name: META_PVC
+  displayName: Metadata GlusterFS-backed PVC
+  description: GlusterFS-backed PVC for object storage metadata
+  required: true
+- name: META_PVC_SIZE
+  displayName: Metadata GlusterFS-backed PVC capacity
+  description: Capacity for GlusterFS-backed PVC for object storage metadata
+  value: 1Gi
+- name: CLUSTER_NAME
+  displayName: GlusterFS cluster name
+  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+  value: storage

+ 140 - 0
roles/openshift_storage_glusterfs/files/v3.7/gluster-s3-template.yml

@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+  name: gluster-s3
+  labels:
+    glusterfs: s3-template
+    gluster-s3: template
+  annotations:
+    description: Gluster S3 service template
+    tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+  apiVersion: v1
+  metadata:
+    name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+    labels:
+      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+  spec:
+    ports:
+    - protocol: TCP
+      port: 8080
+      targetPort: 8080
+    selector:
+      glusterfs: s3-pod
+    type: ClusterIP
+    sessionAffinity: None
+  status:
+    loadBalancer: {}
+- kind: Route
+  apiVersion: v1
+  metadata:
+    name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+    labels:
+      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+  spec:
+    to:
+      kind: Service
+      name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+  apiVersion: v1
+  metadata:
+    name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+    labels:
+      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+      gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+    annotations:
+      openshift.io/scc: privileged
+      description: Defines how to deploy gluster s3 object storage
+  spec:
+    replicas: 1
+    selector:
+      glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+    template:
+      metadata:
+        name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+        labels:
+          glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+          gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+      spec:
+        containers:
+        - name: gluster-s3
+          image: ${IMAGE_NAME}:${IMAGE_VERSION}
+          imagePullPolicy: IfNotPresent
+          ports:
+          - name: gluster
+            containerPort: 8080
+            protocol: TCP
+          env:
+          - name: S3_ACCOUNT
+            value: "${S3_ACCOUNT}"
+          - name: S3_USER
+            value: "${S3_USER}"
+          - name: S3_PASSWORD
+            value: "${S3_PASSWORD}"
+          resources: {}
+          volumeMounts:
+          - name: gluster-vol1
+            mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+          - name: gluster-vol2
+            mountPath: "/mnt/gluster-object/gsmetadata"
+          - name: glusterfs-cgroup
+            readOnly: true
+            mountPath: "/sys/fs/cgroup"
+          terminationMessagePath: "/dev/termination-log"
+          securityContext:
+            privileged: true
+        volumes:
+        - name: glusterfs-cgroup
+          hostPath:
+            path: "/sys/fs/cgroup"
+        - name: gluster-vol1
+          persistentVolumeClaim:
+            claimName: ${PVC}
+        - name: gluster-vol2
+          persistentVolumeClaim:
+            claimName: ${META_PVC}
+        restartPolicy: Always
+        terminationGracePeriodSeconds: 30
+        dnsPolicy: ClusterFirst
+        serviceAccountName: default
+        serviceAccount: default
+        securityContext: {}
+parameters:
+- name: IMAGE_NAME
+  displayName: glusterblock provisioner container image name
+  required: True
+- name: IMAGE_VERSION
+  displayName: glusterblock provisioner container image version
+  required: True
+- name: CLUSTER_NAME
+  displayName: GlusterFS cluster name
+  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+  value: storage
+- name: S3_ACCOUNT
+  displayName: S3 Account Name
+  description: S3 storage account which will provide storage on GlusterFS volumes
+  required: true
+- name: S3_USER
+  displayName: S3 User
+  description: S3 user who can access the S3 storage account
+  required: true
+- name: S3_PASSWORD
+  displayName: S3 User Password
+  description: Password for the S3 user
+  required: true
+- name: PVC
+  displayName: Primary GlusterFS-backed PVC
+  description: GlusterFS-backed PVC for object storage
+  value: gluster-s3-claim
+- name: META_PVC
+  displayName: Metadata GlusterFS-backed PVC
+  description: GlusterFS-backed PVC for object storage metadata
+  value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+  displayName: GlusterFS cluster name
+  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+  value: storage

+ 105 - 0
roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml

@@ -0,0 +1,105 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+  name: glusterblock
+  labels:
+    glusterfs: block-template
+    glusterblock: template
+  annotations:
+    description: glusterblock provisioner template
+    tags: glusterfs
+objects:
+- kind: ClusterRole
+  apiVersion: v1
+  metadata:
+    name: glusterblock-provisioner-runner
+    labels:
+      glusterfs: block-provisioner-runner-clusterrole
+      glusterblock: provisioner-runner-clusterrole
+  rules:
+    - apiGroups: [""]
+      resources: ["persistentvolumes"]
+      verbs: ["get", "list", "watch", "create", "delete"]
+    - apiGroups: [""]
+      resources: ["persistentvolumeclaims"]
+      verbs: ["get", "list", "watch", "update"]
+    - apiGroups: ["storage.k8s.io"]
+      resources: ["storageclasses"]
+      verbs: ["get", "list", "watch"]
+    - apiGroups: [""]
+      resources: ["events"]
+      verbs: ["list", "watch", "create", "update", "patch"]
+    - apiGroups: [""]
+      resources: ["services"]
+      verbs: ["get"]
+    - apiGroups: [""]
+      resources: ["secrets"]
+      verbs: ["get", "create", "delete"]
+    - apiGroups: [""]
+      resources: ["routes"]
+      verbs: ["get", "list"]
+- apiVersion: v1
+  kind: ServiceAccount
+  metadata:
+    name: glusterblock-${CLUSTER_NAME}-provisioner
+    labels:
+      glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+      glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+  kind: ClusterRoleBinding
+  metadata:
+    name: glusterblock-${CLUSTER_NAME}-provisioner
+  roleRef:
+    name: glusterblock-provisioner-runner
+  subjects:
+  - kind: ServiceAccount
+    name: glusterblock-${CLUSTER_NAME}-provisioner
+    namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+  apiVersion: v1
+  metadata:
+    name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+    labels:
+      glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+      glusterblock: ${CLUSTER_NAME}-provisioner-dc
+    annotations:
+      description: Defines how to deploy the glusterblock provisioner pod.
+  spec:
+    replicas: 1
+    selector:
+      glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+    triggers:
+    - type: ConfigChange
+    strategy:
+      type: Recreate
+    template:
+      metadata:
+        name: glusterblock-provisioner
+        labels:
+          glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+      spec:
+        serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+        containers:
+        - name: glusterblock-provisioner
+          image: ${IMAGE_NAME}:${IMAGE_VERSION}
+          image: gluster/glusterblock-provisioner:latest
+          imagePullPolicy: IfNotPresent
+          env:
+          - name: PROVISIONER_NAME
+            value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+  displayName: glusterblock provisioner container image name
+  required: True
+- name: IMAGE_VERSION
+  displayName: glusterblock provisioner container image version
+  required: True
+- name: NAMESPACE
+  displayName: glusterblock provisioner namespace
+  description: The namespace in which these resources are being created
+  required: True
+- name: CLUSTER_NAME
+  displayName: GlusterFS cluster name
+  description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+  value: storage

+ 19 - 1
roles/openshift_storage_glusterfs/files/v3.7/glusterfs-template.yml

@@ -35,6 +35,15 @@ objects:
         - name: glusterfs
           image: ${IMAGE_NAME}:${IMAGE_VERSION}
           imagePullPolicy: IfNotPresent
+          env:
+          - name: GB_GLFS_LRU_COUNT
+            value: "${GB_GLFS_LRU_COUNT}"
+          - name: TCMU_LOGDIR
+            value: "${TCMU_LOGDIR}"
+          resources:
+            requests:
+              memory: 100Mi
+              cpu: 100m
           volumeMounts:
           - name: glusterfs-heketi
             mountPath: "/var/lib/heketi"
@@ -83,7 +92,6 @@ objects:
             periodSeconds: 25
             successThreshold: 1
             failureThreshold: 15
-          resources: {}
           terminationMessagePath: "/dev/termination-log"
         volumes:
         - name: glusterfs-heketi
@@ -134,3 +142,13 @@ parameters:
   displayName: GlusterFS cluster name
   description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
   value: storage
+- name: GB_GLFS_LRU_COUNT
+  displayName: Maximum number of block hosting volumes
+  description: This value is to set maximum number of block hosting volumes.
+  value: "15"
+  required: true
+- name: TCMU_LOGDIR
+  displayName: Tcmu runner log directory
+  description: This value is to set tcmu runner log directory
+  value: "/var/log/glusterfs/gluster-block"
+  required: true

+ 9 - 7
roles/openshift_storage_glusterfs/files/v3.7/heketi-template.yml

@@ -15,6 +15,7 @@ objects:
     name: heketi-${CLUSTER_NAME}
     labels:
       glusterfs: heketi-${CLUSTER_NAME}-service
+      heketi: ${CLUSTER_NAME}-service
     annotations:
       description: Exposes Heketi service
   spec:
@@ -30,6 +31,7 @@ objects:
     name: ${HEKETI_ROUTE}
     labels:
       glusterfs: heketi-${CLUSTER_NAME}-route
+      heketi: ${CLUSTER_NAME}-route
   spec:
     to:
       kind: Service
@@ -40,6 +42,7 @@ objects:
     name: heketi-${CLUSTER_NAME}
     labels:
       glusterfs: heketi-${CLUSTER_NAME}-dc
+      heketi: ${CLUSTER_NAME}-dc
     annotations:
       description: Defines how to deploy Heketi
   spec:
@@ -55,6 +58,7 @@ objects:
         name: heketi-${CLUSTER_NAME}
         labels:
           glusterfs: heketi-${CLUSTER_NAME}-pod
+          heketi: ${CLUSTER_NAME}-pod
       spec:
         serviceAccountName: heketi-${CLUSTER_NAME}-service-account
         containers:
@@ -69,13 +73,11 @@ objects:
           - name: HEKETI_EXECUTOR
             value: ${HEKETI_EXECUTOR}
           - name: HEKETI_FSTAB
-            value: /var/lib/heketi/fstab
+            value: ${HEKETI_FSTAB}
           - name: HEKETI_SNAPSHOT_LIMIT
             value: '14'
           - name: HEKETI_KUBE_GLUSTER_DAEMONSET
             value: '1'
-          - name: HEKETI_KUBE_NAMESPACE
-            value: ${HEKETI_KUBE_NAMESPACE}
           ports:
           - containerPort: 8080
           volumeMounts:
@@ -114,10 +116,10 @@ parameters:
   displayName: heketi executor type
   description: Set the executor type, kubernetes or ssh
   value: kubernetes
-- name: HEKETI_KUBE_NAMESPACE
-  displayName: Namespace
-  description: Set the namespace where the GlusterFS pods reside
-  value: default
+- name: HEKETI_FSTAB
+  displayName: heketi fstab path
+  description: Set the fstab path, file that is populated with bricks that heketi creates
+  value: /var/lib/heketi/fstab
 - name: HEKETI_ROUTE
   displayName: heketi route name
   description: Set the hostname for the route URL

+ 113 - 0
roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml

@@ -0,0 +1,113 @@
+---
+- name: Delete pre-existing gluster-s3 resources
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "all,svc,deploy,secret,sc,pvc"
+    selector: "gluster-s3"
+  failed_when: False
+  when: glusterfs_wipe
+
+- name: Wait for gluster-s3 pods to terminate
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=s3-{{ glusterfs_name }}-provisioner-pod"
+  register: gluster_s3_pod
+  until: "gluster_s3_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+  when: glusterfs_wipe
+
+- name: Copy gluster-s3 PVCs template file
+  copy:
+    src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+    dest: "{{ mktemp.stdout }}/{{ item }}"
+  with_items:
+  - "gluster-s3-pvcs-template.yml"
+
+- name: Create gluster-s3 PVCs template
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: template
+    name: "gluster-s3-pvcs"
+    state: present
+    files:
+    - "{{ mktemp.stdout }}/gluster-s3-pvcs-template.yml"
+
+- name: Create gluster-s3 PVCs
+  oc_process:
+    namespace: "{{ glusterfs_namespace }}"
+    template_name: "gluster-s3-pvcs"
+    create: True
+    params:
+      S3_ACCOUNT: "{{ glusterfs_s3_account }}"
+      PVC: "{{ glusterfs_s3_pvc }}"
+      PVC_SIZE: "{{ glusterfs_s3_pvc_size }}"
+      META_PVC: "{{ glusterfs_s3_meta_pvc }}"
+      META_PVC_SIZE: "{{ glusterfs_s3_meta_pvc_size }}"
+      CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for gluster-s3 PVCs
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pvc
+    state: list
+    selector: "glusterfs=s3-{{ glusterfs_name }}-{{ glusterfs_s3_account }}-storage"
+  register: gluster_s3_pvcs
+  until:
+  - "gluster_s3_pvcs.results.results[0]['items'] | count > 0"
+  # Pod's 'Bound' status must be True
+  - "gluster_s3_pvcs.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+
+- name: Copy gluster-s3 template file
+  copy:
+    src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+    dest: "{{ mktemp.stdout }}/{{ item }}"
+  with_items:
+  - "gluster-s3-template.yml"
+
+- name: Create gluster-s3 template
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: template
+    name: "gluster-s3"
+    state: present
+    files:
+    - "{{ mktemp.stdout }}/gluster-s3-template.yml"
+
+- name: Deploy gluster-s3 service
+  oc_process:
+    namespace: "{{ glusterfs_namespace }}"
+    template_name: "gluster-s3"
+    create: True
+    params:
+      IMAGE_NAME: "{{ glusterfs_s3_image }}"
+      IMAGE_VERSION: "{{ glusterfs_s3_version }}"
+      S3_ACCOUNT: "{{ glusterfs_s3_account }}"
+      S3_USER: "{{ glusterfs_s3_user }}"
+      S3_PASSWORD: "{{ glusterfs_s3_password }}"
+      PVC: "{{ glusterfs_s3_pvc }}"
+      META_PVC: "{{ glusterfs_s3_meta_pvc }}"
+      CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for gluster-s3 pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=s3-{{ glusterfs_name }}-{{ glusterfs_s3_account }}-pod"
+  register: gluster_s3_pod
+  until:
+  - "gluster_s3_pod.results.results[0]['items'] | count > 0"
+  # Pod's 'Ready' status must be True
+  - "gluster_s3_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"

+ 66 - 0
roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml

@@ -0,0 +1,66 @@
+---
+- name: Delete pre-existing glusterblock provisioner resources
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: "{{ item.kind }}"
+    name: "{{ item.name | default(omit) }}"
+    selector: "{{ item.selector | default(omit) }}"
+    state: absent
+  with_items:
+  - kind: "all,deploy,sa,clusterrole,clusterrolebinding"
+    selector: "glusterblock"
+  failed_when: False
+  when: glusterfs_wipe
+
+- name: Wait for glusterblock provisioner pods to terminate
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=block-{{ glusterfs_name }}-provisioner-pod"
+  register: glusterblock_pod
+  until: "glusterblock_pod.results.results[0]['items'] | count == 0"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"
+  when: glusterfs_wipe
+
+- name: Copy initial glusterblock provisioner resource file
+  copy:
+    src: "{{ openshift.common.examples_content_version }}/{{ item }}"
+    dest: "{{ mktemp.stdout }}/{{ item }}"
+  with_items:
+  - "glusterblock-template.yml"
+
+- name: Create glusterblock provisioner template
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: template
+    name: "glusterblock"
+    state: present
+    files:
+    - "{{ mktemp.stdout }}/glusterblock-template.yml"
+
+- name: Deploy glusterblock provisioner
+  oc_process:
+    namespace: "{{ glusterfs_namespace }}"
+    template_name: "glusterblock"
+    create: True
+    params:
+      IMAGE_NAME: "{{ glusterfs_block_image }}"
+      IMAGE_VERSION: "{{ glusterfs_block_version }}"
+      NAMESPACE: "{{ glusterfs_namespace }}"
+      CLUSTER_NAME: "{{ glusterfs_name }}"
+
+- name: Wait for glusterblock provisioner pod
+  oc_obj:
+    namespace: "{{ glusterfs_namespace }}"
+    kind: pod
+    state: list
+    selector: "glusterfs=block-{{ glusterfs_name }}-provisioner-pod"
+  register: glusterblock_pod
+  until:
+  - "glusterblock_pod.results.results[0]['items'] | count > 0"
+  # Pod's 'Ready' status must be True
+  - "glusterblock_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
+  delay: 10
+  retries: "{{ (glusterfs_timeout | int / 10) | int }}"

+ 52 - 44
roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml

@@ -259,51 +259,59 @@
   - glusterfs_heketi_is_native
   - glusterfs_heketi_is_missing
 
-- name: Create heketi secret
-  oc_secret:
-    namespace: "{{ glusterfs_namespace }}"
-    state: present
-    name: "heketi-{{ glusterfs_name }}-admin-secret"
-    type: "kubernetes.io/glusterfs"
-    force: True
-    contents:
-    - path: key
-      data: "{{ glusterfs_heketi_admin_key }}"
-  when:
-  - glusterfs_storageclass
-  - glusterfs_heketi_admin_key is defined
-
-- name: Get heketi route
-  oc_obj:
-    namespace: "{{ glusterfs_namespace }}"
-    kind: route
-    state: list
-    name: "heketi-{{ glusterfs_name }}"
-  register: heketi_route
-  when:
-  - glusterfs_storageclass
-  - glusterfs_heketi_is_native
-
-- name: Determine StorageClass heketi URL
+- name: Check if gluster-s3 can't be deployed
   set_fact:
-    glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}"
+    glusterfs_s3_deploy: False
   when:
-  - glusterfs_storageclass
-  - glusterfs_heketi_is_native
-
-- name: Generate GlusterFS StorageClass file
-  template:
-    src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
-    dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+  - "glusterfs_s3_account is not defined or glusterfs_s3_user is not defined or glusterfs_s3_password is not defined"
+
+- block:
+  - name: Create heketi secret
+    oc_secret:
+      namespace: "{{ glusterfs_namespace }}"
+      state: present
+      name: "heketi-{{ glusterfs_name }}-admin-secret"
+      type: "kubernetes.io/glusterfs"
+      force: True
+      contents:
+      - path: key
+        data: "{{ glusterfs_heketi_admin_key }}"
+    when:
+    - glusterfs_heketi_admin_key is defined
+
+  - name: Get heketi route
+    oc_obj:
+      namespace: "{{ glusterfs_namespace }}"
+      kind: route
+      state: list
+      name: "heketi-{{ glusterfs_name }}"
+    register: heketi_route
+    when:
+    - glusterfs_heketi_is_native
+
+  - name: Determine StorageClass heketi URL
+    set_fact:
+      glusterfs_heketi_route: "{{ heketi_route.results.results[0]['spec']['host'] }}"
+    when:
+    - glusterfs_heketi_is_native
+
+  - name: Generate GlusterFS StorageClass file
+    template:
+      src: "{{ openshift.common.examples_content_version }}/glusterfs-storageclass.yml.j2"
+      dest: "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
+
+  - name: Create GlusterFS StorageClass
+    oc_obj:
+      state: present
+      kind: storageclass
+      name: "glusterfs-{{ glusterfs_name }}"
+      files:
+      - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
   when:
-  - glusterfs_storageclass
+  - glusterfs_storageclass or glusterfs_s3_deploy
 
-- name: Create GlusterFS StorageClass
-  oc_obj:
-    state: present
-    kind: storageclass
-    name: "glusterfs-{{ glusterfs_name }}"
-    files:
-    - "{{ mktemp.stdout }}/glusterfs-storageclass.yml"
-  when:
-  - glusterfs_storageclass
+- include: glusterblock_deploy.yml
+  when: glusterfs_block_deploy
+
+- include: gluster_s3_deploy.yml
+  when: glusterfs_s3_deploy

+ 15 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml

@@ -9,6 +9,20 @@
     glusterfs_storageclass: "{{ openshift_storage_glusterfs_storageclass }}"
     glusterfs_image: "{{ openshift_storage_glusterfs_image }}"
     glusterfs_version: "{{ openshift_storage_glusterfs_version }}"
+    glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
+    glusterfs_block_image: "{{ openshift_storage_glusterfs_block_image }}"
+    glusterfs_block_version: "{{ openshift_storage_glusterfs_block_version }}"
+    glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+    glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
+    glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
+    glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
+    glusterfs_s3_account: "{{ openshift_storage_glusterfs_s3_account }}"
+    glusterfs_s3_user: "{{ openshift_storage_glusterfs_s3_user }}"
+    glusterfs_s3_password: "{{ openshift_storage_glusterfs_s3_password }}"
+    glusterfs_s3_pvc: "{{ openshift_storage_glusterfs_s3_pvc }}"
+    glusterfs_s3_pvc_size: "{{ openshift_storage_glusterfs_s3_pvc_size }}"
+    glusterfs_s3_meta_pvc: "{{ openshift_storage_glusterfs_s3_meta_pvc }}"
+    glusterfs_s3_meta_pvc_size: "{{ openshift_storage_glusterfs_s3_meta_pvc_size }}"
     glusterfs_wipe: "{{ openshift_storage_glusterfs_wipe | bool }}"
     glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_heketi_is_native | bool }}"
     glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_heketi_is_missing | bool }}"
@@ -27,6 +41,7 @@
     glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_heketi_ssh_user }}"
     glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_heketi_ssh_sudo | bool }}"
     glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_heketi_ssh_keyfile }}"
+    glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_heketi_fstab }}"
     glusterfs_nodes: "{{ groups.glusterfs }}"
 
 - include: glusterfs_common.yml

+ 1 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml

@@ -87,6 +87,7 @@
       IMAGE_VERSION: "{{ glusterfs_version }}"
       NODE_LABELS: "{{ glusterfs_nodeselector }}"
       CLUSTER_NAME: "{{ glusterfs_name }}"
+      GB_GLFS_LRU_COUNT: "{{ glusterfs_block_max_host_vol }}"
 
 - name: Wait for GlusterFS pods
   oc_obj:

+ 15 - 0
roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml

@@ -9,6 +9,20 @@
     glusterfs_storageclass: "{{ openshift_storage_glusterfs_registry_storageclass }}"
     glusterfs_image: "{{ openshift_storage_glusterfs_registry_image }}"
     glusterfs_version: "{{ openshift_storage_glusterfs_registry_version }}"
+    glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
+    glusterfs_block_image: "{{ openshift_storage_glusterfs_registry_block_image }}"
+    glusterfs_block_version: "{{ openshift_storage_glusterfs_registry_block_version }}"
+    glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_registry_block_max_host_vol }}"
+    glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
+    glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
+    glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
+    glusterfs_s3_account: "{{ openshift_storage_glusterfs_registry_s3_account }}"
+    glusterfs_s3_user: "{{ openshift_storage_glusterfs_registry_s3_user }}"
+    glusterfs_s3_password: "{{ openshift_storage_glusterfs_registry_s3_password }}"
+    glusterfs_s3_pvc: "{{ openshift_storage_glusterfs_registry_s3_pvc }}"
+    glusterfs_s3_pvc_size: "{{ openshift_storage_glusterfs_registry_s3_pvc_size }}"
+    glusterfs_s3_meta_pvc: "{{ openshift_storage_glusterfs_registry_s3_meta_pvc }}"
+    glusterfs_s3_meta_pvc_size: "{{ openshift_storage_glusterfs_registry_s3_meta_pvc_size }}"
     glusterfs_wipe: "{{ openshift_storage_glusterfs_registry_wipe | bool }}"
     glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_registry_heketi_is_native | bool }}"
     glusterfs_heketi_is_missing: "{{ openshift_storage_glusterfs_registry_heketi_is_missing | bool }}"
@@ -27,6 +41,7 @@
     glusterfs_heketi_ssh_user: "{{ openshift_storage_glusterfs_registry_heketi_ssh_user }}"
     glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}"
     glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}"
+    glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}"
     glusterfs_nodes: "{{ groups.glusterfs_registry | default(groups.glusterfs) }}"
 
 - include: glusterfs_common.yml

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part1.yml

@@ -27,7 +27,7 @@
       HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
       HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
       HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
-      HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+      HEKETI_FSTAB: "{{ glusterfs_heketi_fstab }}"
       CLUSTER_NAME: "{{ glusterfs_name }}"
 
 - name: Set heketi Deployed fact

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml

@@ -107,7 +107,7 @@
       HEKETI_USER_KEY: "{{ glusterfs_heketi_user_key }}"
       HEKETI_ADMIN_KEY: "{{ glusterfs_heketi_admin_key }}"
       HEKETI_EXECUTOR: "{{ glusterfs_heketi_executor }}"
-      HEKETI_KUBE_NAMESPACE: "{{ glusterfs_namespace }}"
+      HEKETI_FSTAB: "{{ glusterfs_heketi_fstab }}"
       CLUSTER_NAME: "{{ glusterfs_name }}"
 
 - name: Wait for heketi pod