Browse Source

Fix prometheus role nfs

Allow for external or internal nfs.
use facts as used for logging and metrics.
Update prometheus-alertmanager image to v0.9.1
Zohar Galor 7 years ago
parent
commit
418b742c36

+ 137 - 187
filter_plugins/oo_filters.py

@@ -710,8 +710,8 @@ def oo_openshift_env(hostvars):
     return facts
 
 
-# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
-def oo_component_persistent_volumes(hostvars, groups, component):
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals
+def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None):
     """ Generate list of persistent volumes based on oo_openshift_env
         storage options set in host variables for a specific component.
     """
@@ -723,84 +723,90 @@ def oo_component_persistent_volumes(hostvars, groups, component):
     persistent_volume = None
 
     if component in hostvars['openshift']:
-        if 'storage' in hostvars['openshift'][component]:
-            params = hostvars['openshift'][component]['storage']
+        if subcomponent is not None:
+            storage_component = hostvars['openshift'][component][subcomponent]
+        else:
+            storage_component = hostvars['openshift'][component]
+
+        if 'storage' in storage_component:
+            params = storage_component['storage']
             kind = params['kind']
-            create_pv = params['create_pv']
-            if kind is not None and create_pv:
-                if kind == 'nfs':
-                    host = params['host']
-                    if host is None:
-                        if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
-                            host = groups['oo_nfs_to_config'][0]
+            if 'create_pv' in params:
+                create_pv = params['create_pv']
+                if kind is not None and create_pv:
+                    if kind == 'nfs':
+                        host = params['host']
+                        if host is None:
+                            if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+                                host = groups['oo_nfs_to_config'][0]
+                            else:
+                                raise errors.AnsibleFilterError("|failed no storage host detected")
+                        directory = params['nfs']['directory']
+                        volume = params['volume']['name']
+                        path = directory + '/' + volume
+                        size = params['volume']['size']
+                        if 'labels' in params:
+                            labels = params['labels']
                         else:
-                            raise errors.AnsibleFilterError("|failed no storage host detected")
-                    directory = params['nfs']['directory']
-                    volume = params['volume']['name']
-                    path = directory + '/' + volume
-                    size = params['volume']['size']
-                    if 'labels' in params:
-                        labels = params['labels']
-                    else:
-                        labels = dict()
-                    access_modes = params['access']['modes']
-                    persistent_volume = dict(
-                        name="{0}-volume".format(volume),
-                        capacity=size,
-                        labels=labels,
-                        access_modes=access_modes,
-                        storage=dict(
-                            nfs=dict(
-                                server=host,
-                                path=path)))
-
-                elif kind == 'openstack':
-                    volume = params['volume']['name']
-                    size = params['volume']['size']
-                    if 'labels' in params:
-                        labels = params['labels']
-                    else:
-                        labels = dict()
-                    access_modes = params['access']['modes']
-                    filesystem = params['openstack']['filesystem']
-                    volume_id = params['openstack']['volumeID']
-                    persistent_volume = dict(
-                        name="{0}-volume".format(volume),
-                        capacity=size,
-                        labels=labels,
-                        access_modes=access_modes,
-                        storage=dict(
-                            cinder=dict(
-                                fsType=filesystem,
-                                volumeID=volume_id)))
-
-                elif kind == 'glusterfs':
-                    volume = params['volume']['name']
-                    size = params['volume']['size']
-                    if 'labels' in params:
-                        labels = params['labels']
-                    else:
-                        labels = dict()
-                    access_modes = params['access']['modes']
-                    endpoints = params['glusterfs']['endpoints']
-                    path = params['glusterfs']['path']
-                    read_only = params['glusterfs']['readOnly']
-                    persistent_volume = dict(
-                        name="{0}-volume".format(volume),
-                        capacity=size,
-                        labels=labels,
-                        access_modes=access_modes,
-                        storage=dict(
-                            glusterfs=dict(
-                                endpoints=endpoints,
-                                path=path,
-                                readOnly=read_only)))
-
-                elif not (kind == 'object' or kind == 'dynamic'):
-                    msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
-                        kind,
-                        component)
-                    raise errors.AnsibleFilterError(msg)
+                            labels = dict()
+                        access_modes = params['access']['modes']
+                        persistent_volume = dict(
+                            name="{0}-volume".format(volume),
+                            capacity=size,
+                            labels=labels,
+                            access_modes=access_modes,
+                            storage=dict(
+                                nfs=dict(
+                                    server=host,
+                                    path=path)))
+
+                    elif kind == 'openstack':
+                        volume = params['volume']['name']
+                        size = params['volume']['size']
+                        if 'labels' in params:
+                            labels = params['labels']
+                        else:
+                            labels = dict()
+                        access_modes = params['access']['modes']
+                        filesystem = params['openstack']['filesystem']
+                        volume_id = params['openstack']['volumeID']
+                        persistent_volume = dict(
+                            name="{0}-volume".format(volume),
+                            capacity=size,
+                            labels=labels,
+                            access_modes=access_modes,
+                            storage=dict(
+                                cinder=dict(
+                                    fsType=filesystem,
+                                    volumeID=volume_id)))
+
+                    elif kind == 'glusterfs':
+                        volume = params['volume']['name']
+                        size = params['volume']['size']
+                        if 'labels' in params:
+                            labels = params['labels']
+                        else:
+                            labels = dict()
+                        access_modes = params['access']['modes']
+                        endpoints = params['glusterfs']['endpoints']
+                        path = params['glusterfs']['path']
+                        read_only = params['glusterfs']['readOnly']
+                        persistent_volume = dict(
+                            name="{0}-volume".format(volume),
+                            capacity=size,
+                            labels=labels,
+                            access_modes=access_modes,
+                            storage=dict(
+                                glusterfs=dict(
+                                    endpoints=endpoints,
+                                    path=path,
+                                    readOnly=read_only)))
+
+                    elif not (kind == 'object' or kind == 'dynamic'):
+                        msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+                            kind,
+                            component)
+                        raise errors.AnsibleFilterError(msg)
     return persistent_volume
 
 
@@ -820,85 +826,10 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
         persistent_volumes = []
     if 'hosted' in hostvars['openshift']:
         for component in hostvars['openshift']['hosted']:
-            if 'storage' in hostvars['openshift']['hosted'][component]:
-                params = hostvars['openshift']['hosted'][component]['storage']
-                kind = params['kind']
-                if 'create_pv' in params:
-                    create_pv = params['create_pv']
-                    if kind is not None and create_pv:
-                        if kind == 'nfs':
-                            host = params['host']
-                            if host is None:
-                                if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
-                                    host = groups['oo_nfs_to_config'][0]
-                                else:
-                                    raise errors.AnsibleFilterError("|failed no storage host detected")
-                            directory = params['nfs']['directory']
-                            volume = params['volume']['name']
-                            path = directory + '/' + volume
-                            size = params['volume']['size']
-                            if 'labels' in params:
-                                labels = params['labels']
-                            else:
-                                labels = dict()
-                            access_modes = params['access']['modes']
-                            persistent_volume = dict(
-                                name="{0}-volume".format(volume),
-                                capacity=size,
-                                labels=labels,
-                                access_modes=access_modes,
-                                storage=dict(
-                                    nfs=dict(
-                                        server=host,
-                                        path=path)))
-                            persistent_volumes.append(persistent_volume)
-                        elif kind == 'openstack':
-                            volume = params['volume']['name']
-                            size = params['volume']['size']
-                            if 'labels' in params:
-                                labels = params['labels']
-                            else:
-                                labels = dict()
-                            access_modes = params['access']['modes']
-                            filesystem = params['openstack']['filesystem']
-                            volume_id = params['openstack']['volumeID']
-                            persistent_volume = dict(
-                                name="{0}-volume".format(volume),
-                                capacity=size,
-                                labels=labels,
-                                access_modes=access_modes,
-                                storage=dict(
-                                    cinder=dict(
-                                        fsType=filesystem,
-                                        volumeID=volume_id)))
-                            persistent_volumes.append(persistent_volume)
-                        elif kind == 'glusterfs':
-                            volume = params['volume']['name']
-                            size = params['volume']['size']
-                            if 'labels' in params:
-                                labels = params['labels']
-                            else:
-                                labels = dict()
-                            access_modes = params['access']['modes']
-                            endpoints = params['glusterfs']['endpoints']
-                            path = params['glusterfs']['path']
-                            read_only = params['glusterfs']['readOnly']
-                            persistent_volume = dict(
-                                name="{0}-volume".format(volume),
-                                capacity=size,
-                                labels=labels,
-                                access_modes=access_modes,
-                                storage=dict(
-                                    glusterfs=dict(
-                                        endpoints=endpoints,
-                                        path=path,
-                                        readOnly=read_only)))
-                            persistent_volumes.append(persistent_volume)
-                        elif not (kind == 'object' or kind == 'dynamic'):
-                            msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
-                                kind,
-                                component)
-                            raise errors.AnsibleFilterError(msg)
+            persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component)
+            if persistent_volume is not None:
+                persistent_volumes.append(persistent_volume)
+
     if 'logging' in hostvars['openshift']:
         persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging')
         if persistent_volume is not None:
@@ -911,10 +842,22 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
         persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics')
         if persistent_volume is not None:
             persistent_volumes.append(persistent_volume)
+    if 'prometheus' in hostvars['openshift']:
+        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus')
+        if persistent_volume is not None:
+            persistent_volumes.append(persistent_volume)
+    if 'alertmanager' in hostvars['openshift']['prometheus']:
+        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager')
+        if persistent_volume is not None:
+            persistent_volumes.append(persistent_volume)
+    if 'alertbuffer' in hostvars['openshift']['prometheus']:
+        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer')
+        if persistent_volume is not None:
+            persistent_volumes.append(persistent_volume)
     return persistent_volumes
 
 
-def oo_component_pv_claims(hostvars, component):
+def oo_component_pv_claims(hostvars, component, subcomponent=None):
     """ Generate list of persistent volume claims based on oo_openshift_env
         storage options set in host variables for a speicific component.
     """
@@ -922,20 +865,27 @@ def oo_component_pv_claims(hostvars, component):
         raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
 
     if component in hostvars['openshift']:
-        if 'storage' in hostvars['openshift'][component]:
-            params = hostvars['openshift'][component]['storage']
+        if subcomponent is not None:
+            storage_component = hostvars['openshift'][component][subcomponent]
+        else:
+            storage_component = hostvars['openshift'][component]
+
+        if 'storage' in storage_component:
+            params = storage_component['storage']
             kind = params['kind']
-            create_pv = params['create_pv']
-            create_pvc = params['create_pvc']
-            if kind not in [None, 'object'] and create_pv and create_pvc:
-                volume = params['volume']['name']
-                size = params['volume']['size']
-                access_modes = params['access']['modes']
-                persistent_volume_claim = dict(
-                    name="{0}-claim".format(volume),
-                    capacity=size,
-                    access_modes=access_modes)
-                return persistent_volume_claim
+            if 'create_pv' in params:
+                if 'create_pvc' in params:
+                    create_pv = params['create_pv']
+                    create_pvc = params['create_pvc']
+                    if kind not in [None, 'object'] and create_pv and create_pvc:
+                        volume = params['volume']['name']
+                        size = params['volume']['size']
+                        access_modes = params['access']['modes']
+                        persistent_volume_claim = dict(
+                            name="{0}-claim".format(volume),
+                            capacity=size,
+                            access_modes=access_modes)
+                        return persistent_volume_claim
     return None
 
 
@@ -952,22 +902,10 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
         persistent_volume_claims = []
     if 'hosted' in hostvars['openshift']:
         for component in hostvars['openshift']['hosted']:
-            if 'storage' in hostvars['openshift']['hosted'][component]:
-                params = hostvars['openshift']['hosted'][component]['storage']
-                kind = params['kind']
-                if 'create_pv' in params:
-                    if 'create_pvc' in params:
-                        create_pv = params['create_pv']
-                        create_pvc = params['create_pvc']
-                        if kind not in [None, 'object'] and create_pv and create_pvc:
-                            volume = params['volume']['name']
-                            size = params['volume']['size']
-                            access_modes = params['access']['modes']
-                            persistent_volume_claim = dict(
-                                name="{0}-claim".format(volume),
-                                capacity=size,
-                                access_modes=access_modes)
-                            persistent_volume_claims.append(persistent_volume_claim)
+            persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component)
+            if persistent_volume_claim is not None:
+                persistent_volume_claims.append(persistent_volume_claim)
+
     if 'logging' in hostvars['openshift']:
         persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging')
         if persistent_volume_claim is not None:
@@ -980,6 +918,18 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
         persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics')
         if persistent_volume_claim is not None:
             persistent_volume_claims.append(persistent_volume_claim)
+    if 'prometheus' in hostvars['openshift']:
+        persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus')
+        if persistent_volume_claim is not None:
+            persistent_volume_claims.append(persistent_volume_claim)
+    if 'alertmanager' in hostvars['openshift']['prometheus']:
+        persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager')
+        if persistent_volume_claim is not None:
+            persistent_volume_claims.append(persistent_volume_claim)
+    if 'alertbuffer' in hostvars['openshift']['prometheus']:
+        persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer')
+        if persistent_volume_claim is not None:
+            persistent_volume_claims.append(persistent_volume_claim)
     return persistent_volume_claims
 
 

+ 65 - 0
inventory/byo/hosts.example

@@ -615,6 +615,71 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
 #openshift_logging_image_version=3.7.0
 
+# Prometheus deployment
+#
+# Currently prometheus deployment is disabled by default, enable it by setting this
+#openshift_hosted_prometheus_deploy=true
+#
+# Prometheus storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group.  For example, the volume
+# path using these options would be "/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_host=nfs.example.com
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_host=nfs.example.com
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_host=nfs.example.com
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#
+# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
+# which are destroyed when pods are deleted
+
 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
 # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
 

File diff suppressed because it is too large
+ 900 - 0
inventory/byo/hosts.origin.example


+ 9 - 0
playbooks/common/openshift-cluster/create_persistent_volumes.yml

@@ -1,4 +1,13 @@
 ---
+- name: Create persistent volumes
+  hosts: oo_first_master
+  vars:
+    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+  tasks:
+  - debug: var=persistent_volumes
+  - debug: var=persistent_volume_claims
+
 - name: Create Hosted Resources - persistent volumes
   hosts: oo_first_master
   vars:

+ 76 - 1
roles/openshift_facts/library/openshift_facts.py

@@ -498,6 +498,20 @@ def set_selectors(facts):
         facts['hosted']['etcd'] = {}
     if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
         facts['hosted']['etcd']['selector'] = None
+    if 'prometheus' not in facts:
+        facts['prometheus'] = {}
+    if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
+        facts['prometheus']['selector'] = None
+    if 'alertmanager' not in facts['prometheus']:
+        facts['prometheus']['alertmanager'] = {}
+    # pylint: disable=line-too-long
+    if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
+        facts['prometheus']['alertmanager']['selector'] = None
+    if 'alertbuffer' not in facts['prometheus']:
+        facts['prometheus']['alertbuffer'] = {}
+    # pylint: disable=line-too-long
+    if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
+        facts['prometheus']['alertbuffer']['selector'] = None
 
     return facts
 
@@ -1779,7 +1793,8 @@ class OpenShiftFacts(object):
                    'node',
                    'logging',
                    'loggingops',
-                   'metrics']
+                   'metrics',
+                   'prometheus']
 
     # Disabling too-many-arguments, this should be cleaned up as a TODO item.
     # pylint: disable=too-many-arguments,no-value-for-parameter
@@ -2068,6 +2083,66 @@ class OpenShiftFacts(object):
                 )
             )
 
+            defaults['prometheus'] = dict(
+                storage=dict(
+                    kind=None,
+                    volume=dict(
+                        name='prometheus',
+                        size='10Gi'
+                    ),
+                    nfs=dict(
+                        directory='/exports',
+                        options='*(rw,root_squash)'
+                    ),
+                    host=None,
+                    access=dict(
+                        modes=['ReadWriteOnce']
+                    ),
+                    create_pv=True,
+                    create_pvc=False
+                )
+            )
+
+            defaults['prometheus']['alertmanager'] = dict(
+                storage=dict(
+                    kind=None,
+                    volume=dict(
+                        name='prometheus-alertmanager',
+                        size='10Gi'
+                    ),
+                    nfs=dict(
+                        directory='/exports',
+                        options='*(rw,root_squash)'
+                    ),
+                    host=None,
+                    access=dict(
+                        modes=['ReadWriteOnce']
+                    ),
+                    create_pv=True,
+                    create_pvc=False
+                )
+            )
+
+            defaults['prometheus']['alertbuffer'] = dict(
+                storage=dict(
+                    kind=None,
+                    volume=dict(
+                        name='prometheus-alertbuffer',
+                        size='10Gi'
+                    ),
+                    nfs=dict(
+                        directory='/exports',
+                        options='*(rw,root_squash)'
+                    ),
+                    host=None,
+                    access=dict(
+                        modes=['ReadWriteOnce']
+                    ),
+                    create_pv=True,
+                    create_pvc=False
+                )
+            )
+
         return defaults
 
     def guess_host_provider(self):

+ 1 - 1
roles/openshift_hosted_facts/tasks/main.yml

@@ -16,4 +16,4 @@
                        | oo_openshift_env }}"
     openshift_env_structures:
     - 'openshift.hosted.router.*'
-  with_items: [hosted, logging, loggingops, metrics]
+  with_items: [hosted, logging, loggingops, metrics, prometheus]

+ 7 - 27
roles/openshift_prometheus/defaults/main.yaml

@@ -10,50 +10,30 @@ openshift_prometheus_node_selector: {"region":"infra"}
 # images
 openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
 openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
-openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
+openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:v0.9.1"
 openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
 
 # additional prometheus rules file
 openshift_prometheus_additional_rules_file: null
 
-# All the required exports
-openshift_prometheus_pv_exports:
-  - prometheus
-  - prometheus-alertmanager
-  - prometheus-alertbuffer
-# PV template files and their created object names
-openshift_prometheus_pv_data:
-  - pv_name: prometheus
-    pv_template: prom-pv-server.yml
-    pv_label: Prometheus Server PV
-  - pv_name: prometheus-alertmanager
-    pv_template: prom-pv-alertmanager.yml
-    pv_label: Prometheus Alertmanager PV
-  - pv_name: prometheus-alertbuffer
-    pv_template: prom-pv-alertbuffer.yml
-    pv_label: Prometheus Alert Buffer PV
-
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
-
 # storage
 openshift_prometheus_storage_type: pvc
 openshift_prometheus_pvc_name: prometheus
-openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
 openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_pvc_pv_selector: {}
+openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default('') }}"
 
 openshift_prometheus_alertmanager_storage_type: pvc
 openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
-openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
 openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertmanager_pvc_pv_selector: {}
+openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default('') }}"
 
 openshift_prometheus_alertbuffer_storage_type: pvc
 openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
-openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
 openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default('') }}"
 
 # container resources
 openshift_prometheus_cpu_limit: null

+ 0 - 3
roles/openshift_prometheus/files/openshift_prometheus.exports

@@ -1,3 +0,0 @@
-/exports/prometheus *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)

+ 0 - 36
roles/openshift_prometheus/tasks/create_pvs.yaml

@@ -1,36 +0,0 @@
----
-# Check for existance and then conditionally:
-# - evaluate templates
-# - PVs
-#
-# These tasks idempotently create required Prometheus PV objects. Do not
-# call this file directly. This file is intended to be ran as an
-# include that has a 'with_items' attached to it. Hence the use below
-# of variables like "{{ item.pv_label }}"
-
-- name: "Check if the {{ item.pv_label }} template has been created already"
-  oc_obj:
-    namespace: "{{ openshift_prometheus_namespace }}"
-    state: list
-    kind: pv
-    name: "{{ item.pv_name }}"
-  register: prom_pv_check
-
-# Skip all of this if the PV already exists
-- block:
-    - name: "Ensure the {{ item.pv_label }} template is evaluated"
-      template:
-        src: "{{ item.pv_template }}.j2"
-        dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
-
-    - name: "Ensure {{ item.pv_label }} is created"
-      oc_obj:
-        namespace: "{{ openshift_prometheus_namespace }}"
-        kind: pv
-        name: "{{ item.pv_name }}"
-        state: present
-        delete_after: True
-        files:
-          - "{{ tempdir }}/templates/{{ item.pv_template }}"
-  when:
-    - not prom_pv_check.results.results.0

+ 0 - 9
roles/openshift_prometheus/tasks/install_prometheus.yaml

@@ -54,15 +54,6 @@
     resource_name: cluster-reader
     user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
 
-
-######################################################################
-# NFS
-# In the case that we are not running on a cloud provider, volumes must be statically provisioned
-
-- include: nfs.yaml
-  when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-
 # create prometheus and alerts services
 # TODO join into 1 task with loop
 - name: Create prometheus service

+ 0 - 44
roles/openshift_prometheus/tasks/nfs.yaml

@@ -1,44 +0,0 @@
----
-# Tasks to statically provision NFS volumes
-# Include if not using dynamic volume provisioning
-- name: Ensure the /exports/ directory exists
-  file:
-    path: /exports/
-    state: directory
-    mode: 0755
-    owner: root
-    group: root
-
-- name: Ensure the prom-pv0X export directories exist
-  file:
-    path: "/exports/{{ item }}"
-    state: directory
-    mode: 0777
-    owner: nfsnobody
-    group: nfsnobody
-  with_items: "{{ openshift_prometheus_pv_exports }}"
-
-- name: Ensure the NFS exports for Prometheus PVs exist
-  copy:
-    src: openshift_prometheus.exports
-    dest: /etc/exports.d/openshift_prometheus.exports
-  register: nfs_exports_updated
-
-- name: Ensure the NFS export table is refreshed if exports were added
-  command: exportfs -ar
-  when:
-    - nfs_exports_updated.changed
-
-
-######################################################################
-# Create the required Prometheus PVs. Check out these online docs if you
-# need a refresher on includes looping with items:
-# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
-# * http://stackoverflow.com/a/35128533
-#
-# TODO: Handle the case where a PV template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- include: create_pvs.yaml
-  with_items: "{{ openshift_prometheus_pv_data }}"

+ 0 - 15
roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2

@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: prometheus-alertbuffer
-  labels:
-    storage: prometheus-alertbuffer
-spec:
-  capacity:
-    storage: 15Gi
-  accessModes:
-    - ReadWriteOnce
-  nfs:
-    path: /exports/prometheus-alertbuffer
-    server: {{ openshift_prometheus_nfs_server }}
-  persistentVolumeReclaimPolicy: Retain

+ 0 - 15
roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2

@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: prometheus-alertmanager
-  labels:
-    storage: prometheus-alertmanager
-spec:
-  capacity:
-    storage: 15Gi
-  accessModes:
-    - ReadWriteOnce
-  nfs:
-    path: /exports/prometheus-alertmanager
-    server: {{ openshift_prometheus_nfs_server }}
-  persistentVolumeReclaimPolicy: Retain

+ 0 - 15
roles/openshift_prometheus/templates/prom-pv-server.yml.j2

@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: prometheus
-  labels:
-    storage: prometheus
-spec:
-  capacity:
-    storage: 15Gi
-  accessModes:
-    - ReadWriteOnce
-  nfs:
-    path: /exports/prometheus
-    server: {{ openshift_prometheus_nfs_server }}
-  persistentVolumeReclaimPolicy: Retain

+ 3 - 0
roles/openshift_storage_nfs/tasks/main.yml

@@ -35,6 +35,9 @@
     - "{{ openshift.logging }}"
     - "{{ openshift.loggingops }}"
     - "{{ openshift.hosted.etcd }}"
+    - "{{ openshift.prometheus }}"
+    - "{{ openshift.prometheus.alertmanager }}"
+    - "{{ openshift.prometheus.alertbuffer }}"
 
 - name: Configure exports
   template:

+ 3 - 0
roles/openshift_storage_nfs/templates/exports.j2

@@ -3,3 +3,6 @@
 {{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
 {{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
 {{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
+{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
+{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
+{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}