Parcourir la source

Fix hosted vars

Remove hosted vars from openshift_facts.

The current pattern is causing a bunch of undesired sideffects.
Michael Gugino il y a 7 ans
Parent
commit
0de559c8f2
51 fichiers modifiés avec 414 ajouts et 1035 suppressions
  1. 0 225
      filter_plugins/oo_filters.py
  2. 1 1
      playbooks/adhoc/openshift_hosted_logging_efk.yaml
  3. 1 30
      playbooks/openshift-glusterfs/private/registry.yml
  4. 0 4
      playbooks/openshift-hosted/private/create_persistent_volumes.yml
  5. 94 0
      roles/openshift_facts/defaults/main.yml
  6. 2 307
      roles/openshift_facts/library/openshift_facts.py
  7. 10 1
      roles/openshift_hosted/defaults/main.yml
  8. 1 1
      roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py
  9. 12 15
      roles/openshift_hosted/tasks/registry.yml
  10. 6 8
      roles/openshift_hosted/tasks/router.yml
  11. 1 7
      roles/openshift_hosted/tasks/secure.yml
  12. 5 5
      roles/openshift_hosted/tasks/storage/glusterfs.yml
  13. 1 1
      roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml
  14. 1 1
      roles/openshift_hosted/tasks/storage/object_storage.yml
  15. 2 2
      roles/openshift_hosted/tasks/storage/s3.yml
  16. 1 1
      roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2
  17. 1 1
      roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2
  18. 1 1
      roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2
  19. 1 1
      roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2
  20. 0 18
      roles/openshift_hosted_facts/tasks/main.yml
  21. 0 54
      roles/openshift_hosted_metrics/README.md
  22. 0 2
      roles/openshift_hosted_metrics/defaults/main.yml
  23. 0 31
      roles/openshift_hosted_metrics/handlers/main.yml
  24. 0 18
      roles/openshift_hosted_metrics/meta/main.yaml
  25. 0 132
      roles/openshift_hosted_metrics/tasks/install.yml
  26. 0 75
      roles/openshift_hosted_metrics/tasks/main.yaml
  27. 0 21
      roles/openshift_hosted_metrics/vars/main.yaml
  28. 5 5
      roles/openshift_logging/defaults/main.yml
  29. 1 0
      roles/openshift_logging_curator/meta/main.yaml
  30. 1 0
      roles/openshift_logging_elasticsearch/meta/main.yaml
  31. 1 0
      roles/openshift_logging_fluentd/meta/main.yaml
  32. 1 1
      roles/openshift_logging_kibana/defaults/main.yml
  33. 1 0
      roles/openshift_logging_kibana/meta/main.yaml
  34. 1 1
      roles/openshift_logging_mux/defaults/main.yml
  35. 1 0
      roles/openshift_logging_mux/meta/main.yaml
  36. 1 0
      roles/openshift_master/meta/main.yml
  37. 1 1
      roles/openshift_master/templates/master.yaml.v1.j2
  38. 0 1
      roles/openshift_master_facts/defaults/main.yml
  39. 2 8
      roles/openshift_master_facts/tasks/main.yml
  40. 159 0
      roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py
  41. 9 0
      roles/openshift_persistent_volumes/defaults/main.yml
  42. 2 1
      roles/openshift_persistent_volumes/meta/main.yml
  43. 27 30
      roles/openshift_persistent_volumes/tasks/main.yml
  44. 17 0
      roles/openshift_persistent_volumes/tasks/pv.yml
  45. 17 0
      roles/openshift_persistent_volumes/tasks/pvc.yml
  46. 1 1
      roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2
  47. 1 1
      roles/openshift_storage_glusterfs/defaults/main.yml
  48. 2 2
      roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
  49. 4 2
      roles/openshift_storage_glusterfs/tasks/main.yml
  50. 10 10
      roles/openshift_storage_nfs/tasks/main.yml
  51. 8 8
      roles/openshift_storage_nfs/templates/exports.j2

+ 0 - 225
filter_plugins/oo_filters.py

@@ -710,229 +710,6 @@ def oo_openshift_env(hostvars):
     return facts
     return facts
 
 
 
 
-# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals
-def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None):
-    """ Generate list of persistent volumes based on oo_openshift_env
-        storage options set in host variables for a specific component.
-    """
-    if not issubclass(type(hostvars), dict):
-        raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-    if not issubclass(type(groups), dict):
-        raise errors.AnsibleFilterError("|failed expects groups is a dict")
-
-    persistent_volume = None
-
-    if component in hostvars['openshift']:
-        if subcomponent is not None:
-            storage_component = hostvars['openshift'][component][subcomponent]
-        else:
-            storage_component = hostvars['openshift'][component]
-
-        if 'storage' in storage_component:
-            params = storage_component['storage']
-            kind = params['kind']
-            if 'create_pv' in params:
-                create_pv = params['create_pv']
-                if kind is not None and create_pv:
-                    if kind == 'nfs':
-                        host = params['host']
-                        if host is None:
-                            if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
-                                host = groups['oo_nfs_to_config'][0]
-                            else:
-                                raise errors.AnsibleFilterError("|failed no storage host detected")
-                        directory = params['nfs']['directory']
-                        volume = params['volume']['name']
-                        path = directory + '/' + volume
-                        size = params['volume']['size']
-                        if 'labels' in params:
-                            labels = params['labels']
-                        else:
-                            labels = dict()
-                        access_modes = params['access']['modes']
-                        persistent_volume = dict(
-                            name="{0}-volume".format(volume),
-                            capacity=size,
-                            labels=labels,
-                            access_modes=access_modes,
-                            storage=dict(
-                                nfs=dict(
-                                    server=host,
-                                    path=path)))
-
-                    elif kind == 'openstack':
-                        volume = params['volume']['name']
-                        size = params['volume']['size']
-                        if 'labels' in params:
-                            labels = params['labels']
-                        else:
-                            labels = dict()
-                        access_modes = params['access']['modes']
-                        filesystem = params['openstack']['filesystem']
-                        volume_id = params['openstack']['volumeID']
-                        persistent_volume = dict(
-                            name="{0}-volume".format(volume),
-                            capacity=size,
-                            labels=labels,
-                            access_modes=access_modes,
-                            storage=dict(
-                                cinder=dict(
-                                    fsType=filesystem,
-                                    volumeID=volume_id)))
-
-                    elif kind == 'glusterfs':
-                        volume = params['volume']['name']
-                        size = params['volume']['size']
-                        if 'labels' in params:
-                            labels = params['labels']
-                        else:
-                            labels = dict()
-                        access_modes = params['access']['modes']
-                        endpoints = params['glusterfs']['endpoints']
-                        path = params['glusterfs']['path']
-                        read_only = params['glusterfs']['readOnly']
-                        persistent_volume = dict(
-                            name="{0}-volume".format(volume),
-                            capacity=size,
-                            labels=labels,
-                            access_modes=access_modes,
-                            storage=dict(
-                                glusterfs=dict(
-                                    endpoints=endpoints,
-                                    path=path,
-                                    readOnly=read_only)))
-
-                    elif not (kind == 'object' or kind == 'dynamic'):
-                        msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
-                            kind,
-                            component)
-                        raise errors.AnsibleFilterError(msg)
-    return persistent_volume
-
-
-# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
-def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
-    """ Generate list of persistent volumes based on oo_openshift_env
-        storage options set in host variables.
-    """
-    if not issubclass(type(hostvars), dict):
-        raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-    if not issubclass(type(groups), dict):
-        raise errors.AnsibleFilterError("|failed expects groups is a dict")
-    if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
-        raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
-
-    if persistent_volumes is None:
-        persistent_volumes = []
-    if 'hosted' in hostvars['openshift']:
-        for component in hostvars['openshift']['hosted']:
-            persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component)
-            if persistent_volume is not None:
-                persistent_volumes.append(persistent_volume)
-
-    if 'logging' in hostvars['openshift']:
-        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging')
-        if persistent_volume is not None:
-            persistent_volumes.append(persistent_volume)
-    if 'loggingops' in hostvars['openshift']:
-        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'loggingops')
-        if persistent_volume is not None:
-            persistent_volumes.append(persistent_volume)
-    if 'metrics' in hostvars['openshift']:
-        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics')
-        if persistent_volume is not None:
-            persistent_volumes.append(persistent_volume)
-    if 'prometheus' in hostvars['openshift']:
-        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus')
-        if persistent_volume is not None:
-            persistent_volumes.append(persistent_volume)
-    if 'alertmanager' in hostvars['openshift']['prometheus']:
-        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager')
-        if persistent_volume is not None:
-            persistent_volumes.append(persistent_volume)
-    if 'alertbuffer' in hostvars['openshift']['prometheus']:
-        persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer')
-        if persistent_volume is not None:
-            persistent_volumes.append(persistent_volume)
-    return persistent_volumes
-
-
-def oo_component_pv_claims(hostvars, component, subcomponent=None):
-    """ Generate list of persistent volume claims based on oo_openshift_env
-        storage options set in host variables for a speicific component.
-    """
-    if not issubclass(type(hostvars), dict):
-        raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
-    if component in hostvars['openshift']:
-        if subcomponent is not None:
-            storage_component = hostvars['openshift'][component][subcomponent]
-        else:
-            storage_component = hostvars['openshift'][component]
-
-        if 'storage' in storage_component:
-            params = storage_component['storage']
-            kind = params['kind']
-            if 'create_pv' in params:
-                if 'create_pvc' in params:
-                    create_pv = params['create_pv']
-                    create_pvc = params['create_pvc']
-                    if kind not in [None, 'object'] and create_pv and create_pvc:
-                        volume = params['volume']['name']
-                        size = params['volume']['size']
-                        access_modes = params['access']['modes']
-                        persistent_volume_claim = dict(
-                            name="{0}-claim".format(volume),
-                            capacity=size,
-                            access_modes=access_modes)
-                        return persistent_volume_claim
-    return None
-
-
-def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
-    """ Generate list of persistent volume claims based on oo_openshift_env
-        storage options set in host variables.
-    """
-    if not issubclass(type(hostvars), dict):
-        raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-    if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
-        raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
-
-    if persistent_volume_claims is None:
-        persistent_volume_claims = []
-    if 'hosted' in hostvars['openshift']:
-        for component in hostvars['openshift']['hosted']:
-            persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component)
-            if persistent_volume_claim is not None:
-                persistent_volume_claims.append(persistent_volume_claim)
-
-    if 'logging' in hostvars['openshift']:
-        persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging')
-        if persistent_volume_claim is not None:
-            persistent_volume_claims.append(persistent_volume_claim)
-    if 'loggingops' in hostvars['openshift']:
-        persistent_volume_claim = oo_component_pv_claims(hostvars, 'loggingops')
-        if persistent_volume_claim is not None:
-            persistent_volume_claims.append(persistent_volume_claim)
-    if 'metrics' in hostvars['openshift']:
-        persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics')
-        if persistent_volume_claim is not None:
-            persistent_volume_claims.append(persistent_volume_claim)
-    if 'prometheus' in hostvars['openshift']:
-        persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus')
-        if persistent_volume_claim is not None:
-            persistent_volume_claims.append(persistent_volume_claim)
-    if 'alertmanager' in hostvars['openshift']['prometheus']:
-        persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager')
-        if persistent_volume_claim is not None:
-            persistent_volume_claims.append(persistent_volume_claim)
-    if 'alertbuffer' in hostvars['openshift']['prometheus']:
-        persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer')
-        if persistent_volume_claim is not None:
-            persistent_volume_claims.append(persistent_volume_claim)
-    return persistent_volume_claims
-
-
 def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
 def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
     """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
     """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
         names with proper version (if provided)
         names with proper version (if provided)
@@ -1220,8 +997,6 @@ class FilterModule(object):
             "oo_generate_secret": oo_generate_secret,
             "oo_generate_secret": oo_generate_secret,
             "oo_nodes_with_label": oo_nodes_with_label,
             "oo_nodes_with_label": oo_nodes_with_label,
             "oo_openshift_env": oo_openshift_env,
             "oo_openshift_env": oo_openshift_env,
-            "oo_persistent_volumes": oo_persistent_volumes,
-            "oo_persistent_volume_claims": oo_persistent_volume_claims,
             "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
             "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
             "oo_pods_match_component": oo_pods_match_component,
             "oo_pods_match_component": oo_pods_match_component,
             "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
             "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,

+ 1 - 1
playbooks/adhoc/openshift_hosted_logging_efk.yaml

@@ -8,7 +8,7 @@
   hosts: masters:!masters[0]
   hosts: masters:!masters[0]
   pre_tasks:
   pre_tasks:
   - set_fact:
   - set_fact:
-      openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+      openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
   tasks:
   tasks:
   - include_role:
   - include_role:
       name: openshift_logging
       name: openshift_logging

+ 1 - 30
playbooks/openshift-glusterfs/private/registry.yml

@@ -1,40 +1,11 @@
 ---
 ---
 - import_playbook: config.yml
 - import_playbook: config.yml
 
 
-- name: Initialize GlusterFS registry PV and PVC vars
-  hosts: oo_first_master
-  tags: hosted
-  tasks:
-  - set_fact:
-      glusterfs_pv: []
-      glusterfs_pvc: []
-
-  - set_fact:
-      glusterfs_pv:
-      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume"
-        capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
-        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
-        storage:
-          glusterfs:
-            endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}"
-            path: "{{ openshift.hosted.registry.storage.glusterfs.path }}"
-            readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}"
-      glusterfs_pvc:
-      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim"
-        capacity: "{{ openshift.hosted.registry.storage.volume.size }}"
-        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}"
-    when: openshift.hosted.registry.storage.glusterfs.swap
-
 - name: Create persistent volumes
 - name: Create persistent volumes
   hosts: oo_first_master
   hosts: oo_first_master
-  tags:
-  - hosted
-  vars:
-    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}"
-    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}"
   roles:
   roles:
   - role: openshift_persistent_volumes
   - role: openshift_persistent_volumes
-    when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0
+    when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
 
 
 - name: Create Hosted Resources
 - name: Create Hosted Resources
   hosts: oo_first_master
   hosts: oo_first_master

+ 0 - 4
playbooks/openshift-hosted/private/create_persistent_volumes.yml

@@ -1,9 +1,5 @@
 ---
 ---
 - name: Create Hosted Resources - persistent volumes
 - name: Create Hosted Resources - persistent volumes
   hosts: oo_first_master
   hosts: oo_first_master
-  vars:
-    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
-    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
   roles:
   roles:
   - role: openshift_persistent_volumes
   - role: openshift_persistent_volumes
-    when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0

+ 94 - 0
roles/openshift_facts/defaults/main.yml

@@ -3,4 +3,98 @@ openshift_cli_image_dict:
   origin: 'openshift/origin'
   origin: 'openshift/origin'
   openshift-enterprise: 'openshift3/ose'
   openshift-enterprise: 'openshift3/ose'
 
 
+openshift_hosted_images_dict:
+  origin: 'openshift/origin-${component}:${version}'
+  openshift-enterprise: 'openshift3/ose-${component}:${version}'
+
 openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}"
 openshift_cli_image: "{{ osm_image | default(openshift_cli_image_dict[openshift_deployment_type]) }}"
+
+# osm_default_subdomain is an old migrated fact, can probably be removed.
+osm_default_subdomain: "router.default.svc.cluster.local"
+openshift_master_default_subdomain: "{{ osm_default_subdomain }}"
+
+openshift_hosted_etcd_storage_nfs_directory: '/exports'
+openshift_hosted_etcd_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_etcd_storage_volume_name: 'etcd'
+openshift_hosted_etcd_storage_volume_size: '1Gi'
+openshift_hosted_etcd_storage_create_pv: True
+openshift_hosted_etcd_storage_create_pvc: False
+openshift_hosted_etcd_storage_access_modes:
+  - 'ReadWriteOnce'
+
+openshift_hosted_registry_namespace: 'default'
+openshift_hosted_registry_storage_volume_name: 'registry'
+openshift_hosted_registry_storage_volume_size: '5Gi'
+openshift_hosted_registry_storage_create_pv: True
+openshift_hosted_registry_storage_create_pvc: True
+openshift_hosted_registry_storage_nfs_directory: '/exports'
+openshift_hosted_registry_storage_nfs_options: '*(rw,root_squash)'
+openshift_hosted_registry_storage_glusterfs_endpoints: 'glusterfs-registry-endpoints'
+openshift_hosted_registry_storage_glusterfs_path: glusterfs-registry-volume
+openshift_hosted_registry_storage_glusterfs_readOnly: False
+openshift_hosted_registry_storage_glusterfs_swap: False
+openshift_hosted_registry_storage_glusterfs_swapcopy: True
+openshift_hosted_registry_storage_glusterfs_ips: []
+openshift_hosted_registry_storage_access_modes:
+  - 'ReadWriteMany'
+
+openshift_logging_storage_nfs_directory: '/exports'
+openshift_logging_storage_nfs_options: '*(rw,root_squash)'
+openshift_logging_storage_volume_name: 'logging-es'
+openshift_logging_storage_create_pv: True
+openshift_logging_storage_create_pvc: False
+openshift_logging_storage_access_modes:
+  - ['ReadWriteOnce']
+
+openshift_loggingops_storage_volume_name: 'logging-es-ops'
+openshift_loggingops_storage_volume_size: '10Gi'
+openshift_loggingops_storage_create_pv: True
+openshift_loggingops_storage_create_pvc: False
+openshift_loggingops_storage_nfs_directory: '/exports'
+openshift_loggingops_storage_nfs_options: '*(rw,root_squash)'
+openshift_loggingops_storage_access_modes:
+  - 'ReadWriteOnce'
+
+openshift_metrics_deploy: False
+openshift_metrics_duration: 7
+openshift_metrics_resolution: '10s'
+openshift_metrics_storage_volume_name: 'metrics'
+openshift_metrics_storage_volume_size: '10Gi'
+openshift_metrics_storage_create_pv: True
+openshift_metrics_storage_create_pvc: False
+openshift_metrics_storage_nfs_directory: '/exports'
+openshift_metrics_storage_nfs_options: '*(rw,root_squash)'
+openshift_metrics_storage_access_modes:
+  - 'ReadWriteOnce'
+
+openshift_prometheus_storage_volume_name: 'prometheus'
+openshift_prometheus_storage_volume_size: '10Gi'
+openshift_prometheus_storage_nfs_directory: '/exports'
+openshift_prometheus_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_storage_access_modes:
+  - 'ReadWriteOnce'
+openshift_prometheus_storage_create_pv: True
+openshift_prometheus_storage_create_pvc: False
+
+openshift_prometheus_alertmanager_storage_volume_name: 'prometheus-alertmanager'
+openshift_prometheus_alertmanager_storage_volume_size: '10Gi'
+openshift_prometheus_alertmanager_storage_nfs_directory: '/exports'
+openshift_prometheus_alertmanager_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertmanager_storage_access_modes:
+  - 'ReadWriteOnce'
+openshift_prometheus_alertmanager_storage_create_pv: True
+openshift_prometheus_alertmanager_storage_create_pvc: False
+
+openshift_prometheus_alertbuffer_storage_volume_name: 'prometheus-alertbuffer'
+openshift_prometheus_alertbuffer_storage_volume_size: '10Gi'
+openshift_prometheus_alertbuffer_storage_nfs_directory: '/exports'
+openshift_prometheus_alertbuffer_storage_nfs_options: '*(rw,root_squash)'
+openshift_prometheus_alertbuffer_storage_access_modes:
+  - 'ReadWriteOnce'
+openshift_prometheus_alertbuffer_storage_create_pv: True
+openshift_prometheus_alertbuffer_storage_create_pvc: False
+
+
+openshift_router_selector: "region=infra"
+openshift_hosted_router_selector: "{{ openshift_router_selector }}"
+openshift_hosted_registry_selector: "{{ openshift_router_selector }}"

+ 2 - 307
roles/openshift_facts/library/openshift_facts.py

@@ -11,14 +11,13 @@ import copy
 import errno
 import errno
 import json
 import json
 import re
 import re
-import io
 import os
 import os
 import yaml
 import yaml
 import struct
 import struct
 import socket
 import socket
 from distutils.util import strtobool
 from distutils.util import strtobool
 from distutils.version import LooseVersion
 from distutils.version import LooseVersion
-from ansible.module_utils.six import string_types, text_type
+from ansible.module_utils.six import string_types
 from ansible.module_utils.six.moves import configparser
 from ansible.module_utils.six.moves import configparser
 
 
 # ignore pylint errors related to the module_utils import
 # ignore pylint errors related to the module_utils import
@@ -86,24 +85,6 @@ def migrate_node_facts(facts):
     return facts
     return facts
 
 
 
 
-def migrate_hosted_facts(facts):
-    """ Apply migrations for master facts """
-    if 'master' in facts:
-        if 'router_selector' in facts['master']:
-            if 'hosted' not in facts:
-                facts['hosted'] = {}
-            if 'router' not in facts['hosted']:
-                facts['hosted']['router'] = {}
-            facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
-        if 'registry_selector' in facts['master']:
-            if 'hosted' not in facts:
-                facts['hosted'] = {}
-            if 'registry' not in facts['hosted']:
-                facts['hosted']['registry'] = {}
-            facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
-    return facts
-
-
 def migrate_admission_plugin_facts(facts):
 def migrate_admission_plugin_facts(facts):
     """ Apply migrations for admission plugin facts """
     """ Apply migrations for admission plugin facts """
     if 'master' in facts:
     if 'master' in facts:
@@ -125,7 +106,6 @@ def migrate_local_facts(facts):
     migrated_facts = copy.deepcopy(facts)
     migrated_facts = copy.deepcopy(facts)
     migrated_facts = migrate_common_facts(migrated_facts)
     migrated_facts = migrate_common_facts(migrated_facts)
     migrated_facts = migrate_node_facts(migrated_facts)
     migrated_facts = migrate_node_facts(migrated_facts)
-    migrated_facts = migrate_hosted_facts(migrated_facts)
     migrated_facts = migrate_admission_plugin_facts(migrated_facts)
     migrated_facts = migrate_admission_plugin_facts(migrated_facts)
     return migrated_facts
     return migrated_facts
 
 
@@ -412,58 +392,6 @@ def normalize_provider_facts(provider, metadata):
     return facts
     return facts
 
 
 
 
-# pylint: disable=too-many-branches
-def set_selectors(facts):
-    """ Set selectors facts if not already present in facts dict
-        Args:
-            facts (dict): existing facts
-        Returns:
-            dict: the facts dict updated with the generated selectors
-            facts if they were not already present
-
-    """
-    selector = "region=infra"
-
-    if 'hosted' not in facts:
-        facts['hosted'] = {}
-    if 'router' not in facts['hosted']:
-        facts['hosted']['router'] = {}
-    if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
-        facts['hosted']['router']['selector'] = selector
-    if 'registry' not in facts['hosted']:
-        facts['hosted']['registry'] = {}
-    if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
-        facts['hosted']['registry']['selector'] = selector
-    if 'metrics' not in facts['hosted']:
-        facts['hosted']['metrics'] = {}
-    if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
-        facts['hosted']['metrics']['selector'] = None
-    if 'logging' not in facts or not isinstance(facts['logging'], dict):
-        facts['logging'] = {}
-    if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
-        facts['logging']['selector'] = None
-    if 'etcd' not in facts['hosted']:
-        facts['hosted']['etcd'] = {}
-    if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
-        facts['hosted']['etcd']['selector'] = None
-    if 'prometheus' not in facts:
-        facts['prometheus'] = {}
-    if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
-        facts['prometheus']['selector'] = None
-    if 'alertmanager' not in facts['prometheus']:
-        facts['prometheus']['alertmanager'] = {}
-    # pylint: disable=line-too-long
-    if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
-        facts['prometheus']['alertmanager']['selector'] = None
-    if 'alertbuffer' not in facts['prometheus']:
-        facts['prometheus']['alertbuffer'] = {}
-    # pylint: disable=line-too-long
-    if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
-        facts['prometheus']['alertbuffer']['selector'] = None
-
-    return facts
-
-
 def set_identity_providers_if_unset(facts):
 def set_identity_providers_if_unset(facts):
     """ Set identity_providers fact if not already present in facts dict
     """ Set identity_providers fact if not already present in facts dict
 
 
@@ -608,60 +536,6 @@ def set_aggregate_facts(facts):
     return facts
     return facts
 
 
 
 
-def set_etcd_facts_if_unset(facts):
-    """
-    If using embedded etcd, loads the data directory from master-config.yaml.
-
-    If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
-
-    If anything goes wrong parsing these, the fact will not be set.
-    """
-    if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
-        etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
-        if 'etcd_data_dir' not in etcd_facts:
-            try:
-                # Parse master config to find actual etcd data dir:
-                master_cfg_path = os.path.join(facts['common']['config_base'],
-                                               'master/master-config.yaml')
-                master_cfg_f = open(master_cfg_path, 'r')
-                config = yaml.safe_load(master_cfg_f.read())
-                master_cfg_f.close()
-
-                etcd_facts['etcd_data_dir'] = \
-                    config['etcdConfig']['storageDirectory']
-
-                facts['etcd'] = etcd_facts
-
-            # We don't want exceptions bubbling up here:
-            # pylint: disable=broad-except
-            except Exception:
-                pass
-    else:
-        etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
-
-        # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
-        try:
-            # Add a fake section for parsing:
-            ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
-            ini_fp = io.StringIO(ini_str)
-            config = configparser.RawConfigParser()
-            config.readfp(ini_fp)
-            etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
-            if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
-                etcd_data_dir = etcd_data_dir[1:-1]
-
-            etcd_facts['etcd_data_dir'] = etcd_data_dir
-            facts['etcd'] = etcd_facts
-
-        # We don't want exceptions bubbling up here:
-        # pylint: disable=broad-except
-        except Exception:
-            pass
-
-    return facts
-
-
 def set_deployment_facts_if_unset(facts):
 def set_deployment_facts_if_unset(facts):
     """ Set Facts that vary based on deployment_type. This currently
     """ Set Facts that vary based on deployment_type. This currently
         includes common.service_type, master.registry_url, node.registry_url,
         includes common.service_type, master.registry_url, node.registry_url,
@@ -1631,13 +1505,8 @@ class OpenShiftFacts(object):
                    'cloudprovider',
                    'cloudprovider',
                    'common',
                    'common',
                    'etcd',
                    'etcd',
-                   'hosted',
                    'master',
                    'master',
-                   'node',
-                   'logging',
-                   'loggingops',
-                   'metrics',
-                   'prometheus']
+                   'node']
 
 
     # Disabling too-many-arguments, this should be cleaned up as a TODO item.
     # Disabling too-many-arguments, this should be cleaned up as a TODO item.
     # pylint: disable=too-many-arguments,no-value-for-parameter
     # pylint: disable=too-many-arguments,no-value-for-parameter
@@ -1717,7 +1586,6 @@ class OpenShiftFacts(object):
         facts = migrate_oauth_template_facts(facts)
         facts = migrate_oauth_template_facts(facts)
         facts['current_config'] = get_current_config(facts)
         facts['current_config'] = get_current_config(facts)
         facts = set_url_facts_if_unset(facts)
         facts = set_url_facts_if_unset(facts)
-        facts = set_selectors(facts)
         facts = set_identity_providers_if_unset(facts)
         facts = set_identity_providers_if_unset(facts)
         facts = set_deployment_facts_if_unset(facts)
         facts = set_deployment_facts_if_unset(facts)
         facts = set_sdn_facts_if_unset(facts, self.system_facts)
         facts = set_sdn_facts_if_unset(facts, self.system_facts)
@@ -1727,7 +1595,6 @@ class OpenShiftFacts(object):
         facts = build_api_server_args(facts)
         facts = build_api_server_args(facts)
         facts = set_version_facts_if_unset(facts)
         facts = set_version_facts_if_unset(facts)
         facts = set_aggregate_facts(facts)
         facts = set_aggregate_facts(facts)
-        facts = set_etcd_facts_if_unset(facts)
         facts = set_proxy_facts(facts)
         facts = set_proxy_facts(facts)
         facts = set_builddefaults_facts(facts)
         facts = set_builddefaults_facts(facts)
         facts = set_buildoverrides_facts(facts)
         facts = set_buildoverrides_facts(facts)
@@ -1793,178 +1660,6 @@ class OpenShiftFacts(object):
         if 'cloudprovider' in roles:
         if 'cloudprovider' in roles:
             defaults['cloudprovider'] = dict(kind=None)
             defaults['cloudprovider'] = dict(kind=None)
 
 
-        if 'hosted' in roles or self.role == 'hosted':
-            defaults['hosted'] = dict(
-                etcd=dict(
-                    storage=dict(
-                        kind=None,
-                        volume=dict(
-                            name='etcd',
-                            size='1Gi'
-                        ),
-                        nfs=dict(
-                            directory='/exports',
-                            options='*(rw,root_squash)'
-                        ),
-                        host=None,
-                        access=dict(
-                            modes=['ReadWriteOnce']
-                        ),
-                        create_pv=True,
-                        create_pvc=False
-                    )
-                ),
-                registry=dict(
-                    storage=dict(
-                        kind=None,
-                        volume=dict(
-                            name='registry',
-                            size='5Gi'
-                        ),
-                        nfs=dict(
-                            directory='/exports',
-                            options='*(rw,root_squash)'),
-                        glusterfs=dict(
-                            endpoints='glusterfs-registry-endpoints',
-                            path='glusterfs-registry-volume',
-                            ips=[],
-                            readOnly=False,
-                            swap=False,
-                            swapcopy=True),
-                        host=None,
-                        access=dict(
-                            modes=['ReadWriteMany']
-                        ),
-                        create_pv=True,
-                        create_pvc=True
-                    )
-                ),
-                router=dict()
-            )
-
-            defaults['logging'] = dict(
-                storage=dict(
-                    kind=None,
-                    volume=dict(
-                        name='logging-es',
-                        size='10Gi'
-                    ),
-                    nfs=dict(
-                        directory='/exports',
-                        options='*(rw,root_squash)'
-                    ),
-                    host=None,
-                    access=dict(
-                        modes=['ReadWriteOnce']
-                    ),
-                    create_pv=True,
-                    create_pvc=False
-                )
-            )
-
-            defaults['loggingops'] = dict(
-                storage=dict(
-                    kind=None,
-                    volume=dict(
-                        name='logging-es-ops',
-                        size='10Gi'
-                    ),
-                    nfs=dict(
-                        directory='/exports',
-                        options='*(rw,root_squash)'
-                    ),
-                    host=None,
-                    access=dict(
-                        modes=['ReadWriteOnce']
-                    ),
-                    create_pv=True,
-                    create_pvc=False
-                )
-            )
-
-            defaults['metrics'] = dict(
-                deploy=False,
-                duration=7,
-                resolution='10s',
-                storage=dict(
-                    kind=None,
-                    volume=dict(
-                        name='metrics',
-                        size='10Gi'
-                    ),
-                    nfs=dict(
-                        directory='/exports',
-                        options='*(rw,root_squash)'
-                    ),
-                    host=None,
-                    access=dict(
-                        modes=['ReadWriteOnce']
-                    ),
-                    create_pv=True,
-                    create_pvc=False
-                )
-            )
-
-            defaults['prometheus'] = dict(
-                storage=dict(
-                    kind=None,
-                    volume=dict(
-                        name='prometheus',
-                        size='10Gi'
-                    ),
-                    nfs=dict(
-                        directory='/exports',
-                        options='*(rw,root_squash)'
-                    ),
-                    host=None,
-                    access=dict(
-                        modes=['ReadWriteOnce']
-                    ),
-                    create_pv=True,
-                    create_pvc=False
-                )
-            )
-
-            defaults['prometheus']['alertmanager'] = dict(
-                storage=dict(
-                    kind=None,
-                    volume=dict(
-                        name='prometheus-alertmanager',
-                        size='10Gi'
-                    ),
-                    nfs=dict(
-                        directory='/exports',
-                        options='*(rw,root_squash)'
-                    ),
-                    host=None,
-                    access=dict(
-                        modes=['ReadWriteOnce']
-                    ),
-                    create_pv=True,
-                    create_pvc=False
-                )
-            )
-
-            defaults['prometheus']['alertbuffer'] = dict(
-                storage=dict(
-                    kind=None,
-                    volume=dict(
-                        name='prometheus-alertbuffer',
-                        size='10Gi'
-                    ),
-                    nfs=dict(
-                        directory='/exports',
-                        options='*(rw,root_squash)'
-                    ),
-                    host=None,
-                    access=dict(
-                        modes=['ReadWriteOnce']
-                    ),
-                    create_pv=True,
-                    create_pvc=False
-                )
-            )
-
         return defaults
         return defaults
 
 
     def guess_host_provider(self):
     def guess_host_provider(self):

+ 10 - 1
roles/openshift_hosted/defaults/main.yml

@@ -27,6 +27,9 @@ openshift_cluster_domain: 'cluster.local'
 r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_hosted_router_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 r_openshift_hosted_router_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 
 
+openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
+openshift_hosted_router_namespace: 'default'
+
 openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
 openshift_hosted_router_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
 
 
 openshift_hosted_router_edits:
 openshift_hosted_router_edits:
@@ -40,13 +43,14 @@ openshift_hosted_router_edits:
   value: 21600
   value: 21600
   action: put
   action: put
 
 
+openshift_hosted_router_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
 openshift_hosted_routers:
 openshift_hosted_routers:
 - name: router
 - name: router
   replicas: "{{ replicas | default(1) }}"
   replicas: "{{ replicas | default(1) }}"
   namespace: default
   namespace: default
   serviceaccount: router
   serviceaccount: router
   selector: "{{ openshift_hosted_router_selector | default(None) }}"
   selector: "{{ openshift_hosted_router_selector | default(None) }}"
-  images: "{{ openshift_hosted_router_image | default(None)  }}"
+  images: "{{ openshift_hosted_router_registryurl }}"
   edits: "{{ openshift_hosted_router_edits }}"
   edits: "{{ openshift_hosted_router_edits }}"
   stats_port: 1936
   stats_port: 1936
   ports:
   ports:
@@ -64,6 +68,11 @@ r_openshift_hosted_router_os_firewall_allow: []
 # Registry #
 # Registry #
 ############
 ############
 
 
+openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
+penshift_hosted_registry_registryurl: "{{ openshift_hosted_images_dict[openshift_deployment_type] }}"
+openshift_hosted_registry_routecertificates: {}
+openshift_hosted_registry_routetermination: "passthrough"
+
 r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_hosted_registry_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 
 

+ 1 - 1
roles/openshift_hosted/filter_plugins/openshift_hosted_filters.py

@@ -12,7 +12,7 @@ class FilterModule(object):
     def get_router_replicas(replicas=None, router_nodes=None):
     def get_router_replicas(replicas=None, router_nodes=None):
         ''' This function will return the number of replicas
         ''' This function will return the number of replicas
             based on the results from the defined
             based on the results from the defined
-            openshift.hosted.router.replicas OR
+            openshift_hosted_router_replicas OR
             the query from oc_obj on openshift nodes with a selector OR
             the query from oc_obj on openshift nodes with a selector OR
             default to 1
             default to 1
 
 

Fichier diff supprimé car celui-ci est trop grand
+ 12 - 15
roles/openshift_hosted/tasks/registry.yml


+ 6 - 8
roles/openshift_hosted/tasks/router.yml

@@ -11,16 +11,14 @@
   oc_obj:
   oc_obj:
     state: list
     state: list
     kind: node
     kind: node
-    namespace: "{{ openshift.hosted.router.namespace | default('default') }}"
-    selector: "{{ openshift.hosted.router.selector | default(omit) }}"
+    namespace: "{{ openshift_hosted_router_namespace }}"
+    selector: "{{ openshift_hosted_router_selector }}"
   register: router_nodes
   register: router_nodes
-  when: openshift.hosted.router.replicas | default(none) is none
+  when: openshift_hosted_router_replicas | default(none) is none
 
 
 - name: set_fact replicas
 - name: set_fact replicas
   set_fact:
   set_fact:
-    replicas: "{{ openshift.hosted.router.replicas|default(None) | get_router_replicas(router_nodes) }}"
-    openshift_hosted_router_selector: "{{ openshift.hosted.router.selector | default(None) }}"
-    openshift_hosted_router_image: "{{ openshift.hosted.router.registryurl }}"
+    replicas: "{{ openshift_hosted_router_replicas | default(None) | get_router_replicas(router_nodes) }}"
 
 
 - name: Get the certificate contents for router
 - name: Get the certificate contents for router
   copy:
   copy:
@@ -42,8 +40,8 @@
       signer_key: "{{ openshift_master_config_dir }}/ca.key"
       signer_key: "{{ openshift_master_config_dir }}/ca.key"
       signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
       signer_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
       hostnames:
       hostnames:
-      - "{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
-      - "*.{{ openshift_master_default_subdomain | default('router.default.svc.cluster.local') }}"
+      - "{{ openshift_master_default_subdomain }}"
+      - "*.{{ openshift_master_default_subdomain }}"
       cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
       cert: "{{ openshift_master_config_dir ~ '/openshift-router.crt' }}"
       key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
       key: "{{ openshift_master_config_dir ~ '/openshift-router.key' }}"
     with_items: "{{ openshift_hosted_routers }}"
     with_items: "{{ openshift_hosted_routers }}"

+ 1 - 7
roles/openshift_hosted/tasks/secure.yml

@@ -1,10 +1,4 @@
 ---
 ---
-- name: Configure facts for docker-registry
-  set_fact:
-    openshift_hosted_registry_routecertificates: "{{ ('routecertificates' in openshift.hosted.registry.keys()) | ternary(openshift_hosted_registry_routecertificates, {}) }}"
-    openshift_hosted_registry_routehost: "{{ ('routehost' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routehost, False) }}"
-    openshift_hosted_registry_routetermination: "{{ ('routetermination' in openshift.hosted.registry.keys()) | ternary(openshift.hosted.registry.routetermination, 'passthrough') }}"
-
 - name: Include reencrypt route configuration
 - name: Include reencrypt route configuration
   include: secure/reencrypt.yml
   include: secure/reencrypt.yml
   static: no
   static: no
@@ -39,7 +33,7 @@
     - "{{ docker_registry_route.results[0].spec.host }}"
     - "{{ docker_registry_route.results[0].spec.host }}"
     - "{{ openshift_hosted_registry_name }}.default.svc"
     - "{{ openshift_hosted_registry_name }}.default.svc"
     - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
     - "{{ openshift_hosted_registry_name }}.default.svc.{{ openshift_cluster_domain }}"
-    - "{{ openshift_hosted_registry_routehost }}"
+    - "{{ openshift_hosted_registry_routehost | default(omit) }}"
     cert: "{{ docker_registry_cert_path }}"
     cert: "{{ docker_registry_cert_path }}"
     key: "{{ docker_registry_key_path }}"
     key: "{{ docker_registry_key_path }}"
     expire_days: "{{ openshift_hosted_registry_cert_expire_days }}"
     expire_days: "{{ openshift_hosted_registry_cert_expire_days }}"

Fichier diff supprimé car celui-ci est trop grand
+ 5 - 5
roles/openshift_hosted/tasks/storage/glusterfs.yml


+ 1 - 1
roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml

@@ -10,7 +10,7 @@
     dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
     dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
 
 
 - name: Create GlusterFS registry service and endpoint
 - name: Create GlusterFS registry service and endpoint
-  command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift.hosted.registry.namespace | default('default') }}"
+  command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
   with_items:
   with_items:
   - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
   - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
   - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"
   - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"

+ 1 - 1
roles/openshift_hosted/tasks/storage/object_storage.yml

@@ -1,6 +1,6 @@
 ---
 ---
 - include: s3.yml
 - include: s3.yml
-  when: openshift.hosted.registry.storage.provider == 's3'
+  when: openshift_hosted_registry_storage_provider == 's3'
 
 
 - name: Ensure the registry secret exists
 - name: Ensure the registry secret exists
   oc_secret:
   oc_secret:

+ 2 - 2
roles/openshift_hosted/tasks/storage/s3.yml

@@ -2,8 +2,8 @@
 - name: Assert that S3 variables are provided for registry_config template
 - name: Assert that S3 variables are provided for registry_config template
   assert:
   assert:
     that:
     that:
-    - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
-    - openshift.hosted.registry.storage.s3.bucket | default(none) is not none
+    - openshift_hosted_registry_storage_s3_bucket | default(none) is not none
+    - openshift_hosted_registry_storage_s3_region | default(none) is not none
     msg: |
     msg: |
       When using S3 storage, the following variables are required:
       When using S3 storage, the following variables are required:
         openshift_hosted_registry_storage_s3_bucket
         openshift_hosted_registry_storage_s3_bucket

+ 1 - 1
roles/openshift_hosted/templates/v3.6/glusterfs-registry-endpoints.yml.j2

@@ -2,7 +2,7 @@
 apiVersion: v1
 apiVersion: v1
 kind: Endpoints
 kind: Endpoints
 metadata:
 metadata:
-  name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+  name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
 subsets:
 subsets:
 - addresses:
 - addresses:
 {% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
 {% for ip in openshift_hosted_registry_storage_glusterfs_ips %}

+ 1 - 1
roles/openshift_hosted/templates/v3.6/glusterfs-registry-service.yml.j2

@@ -2,7 +2,7 @@
 apiVersion: v1
 apiVersion: v1
 kind: Service
 kind: Service
 metadata:
 metadata:
-  name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+  name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
 spec:
 spec:
   ports:
   ports:
   - port: 1
   - port: 1

+ 1 - 1
roles/openshift_hosted/templates/v3.7/glusterfs-registry-endpoints.yml.j2

@@ -2,7 +2,7 @@
 apiVersion: v1
 apiVersion: v1
 kind: Endpoints
 kind: Endpoints
 metadata:
 metadata:
-  name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+  name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
 subsets:
 subsets:
 - addresses:
 - addresses:
 {% for ip in openshift_hosted_registry_storage_glusterfs_ips %}
 {% for ip in openshift_hosted_registry_storage_glusterfs_ips %}

+ 1 - 1
roles/openshift_hosted/templates/v3.7/glusterfs-registry-service.yml.j2

@@ -2,7 +2,7 @@
 apiVersion: v1
 apiVersion: v1
 kind: Service
 kind: Service
 metadata:
 metadata:
-  name: {{ openshift.hosted.registry.storage.glusterfs.endpoints }}
+  name: {{ openshift_hosted_registry_storage_glusterfs_endpoints }}
 spec:
 spec:
   ports:
   ports:
   - port: 1
   - port: 1

+ 0 - 18
roles/openshift_hosted_facts/tasks/main.yml

@@ -1,19 +1 @@
 ---
 ---
-# openshift_*_selector variables have been deprecated in favor of
-# openshift_hosted_*_selector variables.
-- set_fact:
-    openshift_hosted_router_selector: "{{ openshift_router_selector | default(openshift_hosted_infra_selector) }}"
-  when: openshift_hosted_router_selector is not defined and openshift_hosted_infra_selector is defined
-- set_fact:
-    openshift_hosted_registry_selector: "{{ openshift_registry_selector | default(openshift_hosted_infra_selector) }}"
-  when: openshift_hosted_registry_selector is not defined and openshift_hosted_infra_selector is defined
-
-- name: Set hosted facts
-  openshift_facts:
-    role: "{{ item }}"
-    openshift_env: "{{ hostvars
-                       | oo_merge_hostvars(vars, inventory_hostname)
-                       | oo_openshift_env }}"
-    openshift_env_structures:
-    - 'openshift.hosted.router.*'
-  with_items: [hosted, logging, loggingops, metrics, prometheus]

+ 0 - 54
roles/openshift_hosted_metrics/README.md

@@ -1,54 +0,0 @@
-OpenShift Metrics with Hawkular
-====================
-
-OpenShift Metrics Installation
-
-Requirements
-------------
-
-* Ansible 2.2
-* It requires subdomain fqdn to be set.
-* If persistence is enabled, then it also requires NFS.
-
-Role Variables
---------------
-
-From this role:
-
-| Name                                            | Default value         |                                                             |
-|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
-| openshift_hosted_metrics_deploy                 | `False`               | If metrics should be deployed                               |
-| openshift_hosted_metrics_public_url             | null                  | Hawkular metrics public url                                 |
-| openshift_hosted_metrics_storage_nfs_directory  | `/exports`            | Root export directory.                                      |
-| openshift_hosted_metrics_storage_volume_name    | `metrics`             | Metrics volume within openshift_hosted_metrics_volume_dir   |
-| openshift_hosted_metrics_storage_volume_size    | `10Gi`                | Metrics volume size                                         |
-| openshift_hosted_metrics_storage_nfs_options    | `*(rw,root_squash)`   | NFS options for configured exports.                         |
-| openshift_hosted_metrics_duration               | `7`                   | Metrics query duration                                      |
-| openshift_hosted_metrics_resolution             | `10s`                 | Metrics resolution                                          |
-
-
-Dependencies
-------------
-openshift_facts
-openshift_examples
-openshift_master_facts
-
-Example Playbook
-----------------
-
-```
-- name: Configure openshift-metrics
-  hosts: oo_first_master
-  roles:
-  - role: openshift_hosted_metrics
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Jose David Martín (j.david.nieto@gmail.com)

+ 0 - 2
roles/openshift_hosted_metrics/defaults/main.yml

@@ -1,2 +0,0 @@
----
-hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted"

+ 0 - 31
roles/openshift_hosted_metrics/handlers/main.yml

@@ -1,31 +0,0 @@
----
-- name: restart master api
-  systemd: name={{ openshift.common.service_type }}-master-api state=restarted
-  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-  notify: Verify API Server
-
-# We retry the controllers because the API may not be 100% initialized yet.
-- name: restart master controllers
-  command: "systemctl restart {{ openshift.common.service_type }}-master-controllers"
-  retries: 3
-  delay: 5
-  register: result
-  until: result.rc == 0
-  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
-  # Using curl here since the uri module requires python-httplib2 and
-  # wait_for port doesn't provide health information.
-  command: >
-    curl --silent --tlsv1.2
-    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
-    {{ openshift.master.api_url }}/healthz/ready
-  args:
-    # Disables the following warning:
-    # Consider using get_url or uri module rather than running curl
-    warn: no
-  register: api_available_output
-  until: api_available_output.stdout == 'ok'
-  retries: 120
-  delay: 1
-  changed_when: false

+ 0 - 18
roles/openshift_hosted_metrics/meta/main.yaml

@@ -1,18 +0,0 @@
----
-galaxy_info:
-  author: David Martín
-  description:
-  company:
-  license: Apache License, Version 2.0
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  categories:
-  - cloud
-  - system
-dependencies:
-- { role: openshift_examples }
-- { role: openshift_facts }
-- { role: openshift_master_facts }

+ 0 - 132
roles/openshift_hosted_metrics/tasks/install.yml

@@ -1,132 +0,0 @@
----
-
-- name: Test if metrics-deployer service account exists
-  command: >
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace=openshift-infra
-    get serviceaccount metrics-deployer -o json
-  register: serviceaccount
-  changed_when: false
-  failed_when: false
-
-- name: Create metrics-deployer Service Account
-  shell: >
-    echo {{ metrics_deployer_sa | to_json | quote }} |
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    create -f -
-  when: serviceaccount.rc == 1
-
-- name: Test edit permissions
-  command: >
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
-  register: edit_rolebindings
-  changed_when: false
-
-- name: Add edit permission to the openshift-infra project to metrics-deployer SA
-  command: >
-    {{ openshift.common.client_binary }} adm
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    policy add-role-to-user edit
-    system:serviceaccount:openshift-infra:metrics-deployer
-  when: "'system:serviceaccount:openshift-infra:metrics-deployer' not in edit_rolebindings.stdout"
-
-- name: Test hawkular view permissions
-  command: >
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
-  register: view_rolebindings
-  changed_when: false
-
-- name: Add view permissions to hawkular SA
-  command: >
-      {{ openshift.common.client_binary }} adm
-      --config={{ openshift_hosted_metrics_kubeconfig }}
-      --namespace openshift-infra
-      policy add-role-to-user view
-      system:serviceaccount:openshift-infra:hawkular
-  when: "'system:serviceaccount:openshift-infra:hawkular' not in view_rolebindings"
-
-- name: Test cluster-reader permissions
-  command: >
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
-  register: cluster_reader_clusterrolebindings
-  changed_when: false
-
-- name: Add cluster-reader permission to the openshift-infra project to heapster SA
-  command: >
-    {{ openshift.common.client_binary }} adm
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    policy add-cluster-role-to-user cluster-reader
-    system:serviceaccount:openshift-infra:heapster
-  when: "'system:serviceaccount:openshift-infra:heapster' not in cluster_reader_clusterrolebindings.stdout"
-
-- name: Create metrics-deployer secret
-  command: >
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    secrets new metrics-deployer nothing=/dev/null
-  register: metrics_deployer_secret
-  changed_when: metrics_deployer_secret.rc == 0
-  failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
-
-# TODO: extend this to allow user passed in certs or generating cert with
-# OpenShift CA
-- name: Build metrics deployer command
-  set_fact:
-    deployer_cmd: "{{ openshift.common.client_binary }} process -f \
-      {{ hosted_base }}/metrics-deployer.yaml -v \
-      HAWKULAR_METRICS_HOSTNAME={{ g_metrics_hostname }} \
-      -v USE_PERSISTENT_STORAGE={{metrics_persistence | string | lower }} \
-      -v DYNAMICALLY_PROVISION_STORAGE={{metrics_dynamic_vol | string | lower }} \
-      -v METRIC_DURATION={{ openshift.hosted.metrics.duration }} \
-      -v METRIC_RESOLUTION={{ openshift.hosted.metrics.resolution }}
-      {{ image_prefix }} \
-      {{ image_version }} \
-      -v MODE={{ deployment_mode }} \
-        | {{ openshift.common.client_binary }} --namespace openshift-infra \
-        --config={{ openshift_hosted_metrics_kubeconfig }} \
-        create -o name -f -"
-
-- name: Deploy Metrics
-  shell: "{{ deployer_cmd }}"
-  register: deploy_metrics
-  failed_when: "'already exists' not in deploy_metrics.stderr and deploy_metrics.rc != 0"
-  changed_when: deploy_metrics.rc == 0
-
-- set_fact:
-    deployer_pod: "{{ deploy_metrics.stdout[1:2] }}"
-
-# TODO: re-enable this once the metrics deployer validation issue is fixed
-# when using dynamically provisioned volumes
-- name: "Wait for image pull and deployer pod"
-  shell: >
-    {{ openshift.common.client_binary }}
-    --namespace openshift-infra
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    get {{ deploy_metrics.stdout }}
-  register: deploy_result
-  until: "{{ 'Completed' in deploy_result.stdout }}"
-  failed_when: False
-  retries: 60
-  delay: 10
-
-- name: Configure master for metrics
-  modify_yaml:
-    dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
-    yaml_key: assetConfig.metricsPublicURL
-    yaml_value: "{{ openshift_hosted_metrics_deploy_url }}"
-  notify: restart master

+ 0 - 75
roles/openshift_hosted_metrics/tasks/main.yaml

@@ -1,75 +0,0 @@
----
-- name: Create temp directory for kubeconfig
-  command: mktemp -d /tmp/openshift-ansible-XXXXXX
-  register: mktemp
-  changed_when: False
-
-- name: Record kubeconfig tmp dir
-  set_fact:
-    openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
-
-- name: Copy the admin client config(s)
-  command: >
-    cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }}
-  changed_when: False
-
-- name: Set hosted metrics facts
-  openshift_facts:
-    role: hosted
-    openshift_env: "{{ hostvars
-                       | oo_merge_hostvars(vars, inventory_hostname)
-                       | oo_openshift_env }}"
-    openshift_env_structures:
-    - 'openshift.hosted.metrics.*'
-
-- set_fact:
-    metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
-    metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
-    metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
-    image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
-    image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
-
-
-- name: Check for existing metrics pods
-  shell: >
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    get pods -l {{ item }} | grep -q Running
-  register: metrics_pods_status
-  with_items:
-  - metrics-infra=hawkular-metrics
-  - metrics-infra=heapster
-  - metrics-infra=hawkular-cassandra
-  failed_when: false
-  changed_when: false
-
-- name: Check for previous deployer
-  shell: >
-    {{ openshift.common.client_binary }}
-    --config={{ openshift_hosted_metrics_kubeconfig }}
-    --namespace openshift-infra
-    get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
-  register: metrics_deployer_status
-  failed_when: false
-  changed_when: false
-
-- name: Record current deployment status
-  set_fact:
-    greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
-    failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
-    metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
-
-- name: Set deployment mode
-  set_fact:
-    deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
-
-# TODO: handle non greenfield deployments in the future
-- include: install.yml
-  when: greenfield
-
-- name: Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False

+ 0 - 21
roles/openshift_hosted_metrics/vars/main.yaml

@@ -1,21 +0,0 @@
----
-hawkular_permission_oc_commands:
-  - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
-  - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
-
-metrics_deployer_sa:
-  apiVersion: v1
-  kind: ServiceAccount
-  metadata:
-    name: metrics-deployer
-  secrets:
-    - name: metrics-deployer
-
-
-hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
-
-hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
-
-hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
-
-metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)

+ 5 - 5
roles/openshift_logging/defaults/main.yml

@@ -28,7 +28,7 @@ openshift_logging_curator_ops_memory_limit: 256Mi
 openshift_logging_curator_ops_cpu_request: 100m
 openshift_logging_curator_ops_cpu_request: 100m
 openshift_logging_curator_ops_nodeselector: {}
 openshift_logging_curator_ops_nodeselector: {}
 
 
-openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_hostname: "{{ 'kibana.' ~ openshift_master_default_subdomain }}"
 openshift_logging_kibana_cpu_limit: null
 openshift_logging_kibana_cpu_limit: null
 openshift_logging_kibana_memory_limit: 736Mi
 openshift_logging_kibana_memory_limit: 736Mi
 openshift_logging_kibana_cpu_request: 100m
 openshift_logging_kibana_cpu_request: 100m
@@ -54,7 +54,7 @@ openshift_logging_kibana_key: ""
 #for the public facing kibana certs
 #for the public facing kibana certs
 openshift_logging_kibana_ca: ""
 openshift_logging_kibana_ca: ""
 
 
-openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ openshift_master_default_subdomain }}"
 openshift_logging_kibana_ops_cpu_limit: null
 openshift_logging_kibana_ops_cpu_limit: null
 openshift_logging_kibana_ops_memory_limit: 736Mi
 openshift_logging_kibana_ops_memory_limit: 736Mi
 openshift_logging_kibana_ops_cpu_request: 100m
 openshift_logging_kibana_ops_cpu_request: 100m
@@ -109,7 +109,7 @@ openshift_logging_es_config: {}
 
 
 # for exposing es to external (outside of the cluster) clients
 # for exposing es to external (outside of the cluster) clients
 openshift_logging_es_allow_external: False
 openshift_logging_es_allow_external: False
-openshift_logging_es_hostname: "{{ 'es.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_hostname: "{{ 'es.' ~ openshift_master_default_subdomain }}"
 
 
 #The absolute path on the control node to the cert file to use
 #The absolute path on the control node to the cert file to use
 #for the public facing es certs
 #for the public facing es certs
@@ -145,7 +145,7 @@ openshift_logging_es_ops_nodeselector: {}
 
 
 # for exposing es-ops to external (outside of the cluster) clients
 # for exposing es-ops to external (outside of the cluster) clients
 openshift_logging_es_ops_allow_external: False
 openshift_logging_es_ops_allow_external: False
-openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_es_ops_hostname: "{{ 'es-ops.' ~ openshift_master_default_subdomain }}"
 
 
 #The absolute path on the control node to the cert file to use
 #The absolute path on the control node to the cert file to use
 #for the public facing es-ops certs
 #for the public facing es-ops certs
@@ -165,7 +165,7 @@ openshift_logging_storage_access_modes: ['ReadWriteOnce']
 # mux - secure_forward listener service
 # mux - secure_forward listener service
 openshift_logging_mux_allow_external: False
 openshift_logging_mux_allow_external: False
 openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
 openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain}}"
 openshift_logging_mux_port: 24284
 openshift_logging_mux_port: 24284
 openshift_logging_mux_cpu_limit: null
 openshift_logging_mux_cpu_limit: null
 openshift_logging_mux_memory_limit: 512Mi
 openshift_logging_mux_memory_limit: 512Mi

+ 1 - 0
roles/openshift_logging_curator/meta/main.yaml

@@ -13,3 +13,4 @@ galaxy_info:
   - cloud
   - cloud
 dependencies:
 dependencies:
 - role: lib_openshift
 - role: lib_openshift
+- role: openshift_facts

+ 1 - 0
roles/openshift_logging_elasticsearch/meta/main.yaml

@@ -13,3 +13,4 @@ galaxy_info:
   - cloud
   - cloud
 dependencies:
 dependencies:
 - role: lib_openshift
 - role: lib_openshift
+- role: openshift_facts

+ 1 - 0
roles/openshift_logging_fluentd/meta/main.yaml

@@ -13,3 +13,4 @@ galaxy_info:
   - cloud
   - cloud
 dependencies:
 dependencies:
 - role: lib_openshift
 - role: lib_openshift
+- role: openshift_facts

+ 1 - 1
roles/openshift_logging_kibana/defaults/main.yml

@@ -10,7 +10,7 @@ openshift_logging_kibana_cpu_limit: null
 openshift_logging_kibana_cpu_request: 100m
 openshift_logging_kibana_cpu_request: 100m
 openshift_logging_kibana_memory_limit: 736Mi
 openshift_logging_kibana_memory_limit: 736Mi
 
 
-openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
+openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}"
 
 
 openshift_logging_kibana_es_host: "logging-es"
 openshift_logging_kibana_es_host: "logging-es"
 openshift_logging_kibana_es_port: 9200
 openshift_logging_kibana_es_port: 9200

+ 1 - 0
roles/openshift_logging_kibana/meta/main.yaml

@@ -13,3 +13,4 @@ galaxy_info:
   - cloud
   - cloud
 dependencies:
 dependencies:
 - role: lib_openshift
 - role: lib_openshift
+- role: openshift_facts

+ 1 - 1
roles/openshift_logging_mux/defaults/main.yml

@@ -28,7 +28,7 @@ openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journ
 
 
 openshift_logging_mux_allow_external: False
 openshift_logging_mux_allow_external: False
 openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
 openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
-openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_hostname: "{{ 'mux.' ~ openshift_master_default_subdomain }}"
 openshift_logging_mux_port: 24284
 openshift_logging_mux_port: 24284
 # the namespace to use for undefined projects should come first, followed by any
 # the namespace to use for undefined projects should come first, followed by any
 # additional namespaces to create by default - users will typically not need to set this
 # additional namespaces to create by default - users will typically not need to set this

+ 1 - 0
roles/openshift_logging_mux/meta/main.yaml

@@ -13,3 +13,4 @@ galaxy_info:
   - cloud
   - cloud
 dependencies:
 dependencies:
 - role: lib_openshift
 - role: lib_openshift
+- role: openshift_facts

+ 1 - 0
roles/openshift_master/meta/main.yml

@@ -15,3 +15,4 @@ dependencies:
 - role: lib_openshift
 - role: lib_openshift
 - role: lib_utils
 - role: lib_utils
 - role: lib_os_firewall
 - role: lib_os_firewall
+- role: openshift_facts

+ 1 - 1
roles/openshift_master/templates/master.yaml.v1.j2

@@ -204,7 +204,7 @@ projectConfig:
     mcsLabelsPerProject: {{ osm_mcs_labels_per_project }}
     mcsLabelsPerProject: {{ osm_mcs_labels_per_project }}
     uidAllocatorRange: "{{ osm_uid_allocator_range }}"
     uidAllocatorRange: "{{ osm_uid_allocator_range }}"
 routingConfig:
 routingConfig:
-  subdomain:  "{{ openshift_master_default_subdomain | default("") }}"
+  subdomain:  "{{ openshift_master_default_subdomain }}"
 serviceAccountConfig:
 serviceAccountConfig:
   limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
   limitSecretReferences: {{ openshift_master_saconfig_limitsecretreferences | default(false) }}
   managedNames:
   managedNames:

+ 0 - 1
roles/openshift_master_facts/defaults/main.yml

@@ -1,5 +1,4 @@
 ---
 ---
-openshift_master_default_subdomain: "router.default.svc.cluster.local"
 openshift_master_admission_plugin_config:
 openshift_master_admission_plugin_config:
   openshift.io/ImagePolicy:
   openshift.io/ImagePolicy:
     configuration:
     configuration:

+ 2 - 8
roles/openshift_master_facts/tasks/main.yml

@@ -1,14 +1,8 @@
 ---
 ---
-# Ensure the default sub-domain is set:
-- name: Migrate legacy osm_default_subdomain fact
-  set_fact:
-    openshift_master_default_subdomain: "{{ osm_default_subdomain | default(None) }}"
-  when: openshift_master_default_subdomain is not defined
-
 - name: Verify required variables are set
 - name: Verify required variables are set
   fail:
   fail:
     msg: openshift_master_default_subdomain must be set to deploy metrics
     msg: openshift_master_default_subdomain must be set to deploy metrics
-  when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain | default("") == ""
+  when: openshift_hosted_metrics_deploy | default(false) | bool and openshift_master_default_subdomain == ""
 
 
 # NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles
 # NOTE: These metrics variables are unfortunately needed by both the master and the metrics roles
 # to properly configure the master-config.yaml file.
 # to properly configure the master-config.yaml file.
@@ -20,7 +14,7 @@
 - name: Set g_metrics_hostname
 - name: Set g_metrics_hostname
   set_fact:
   set_fact:
     g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
     g_metrics_hostname: "{{ openshift_hosted_metrics_public_url
-                        | default('hawkular-metrics.' ~ (openshift_master_default_subdomain))
+                        | default('hawkular-metrics.' ~ openshift_master_default_subdomain)
                         | oo_hostname_from_url }}"
                         | oo_hostname_from_url }}"
 
 
 - set_fact:
 - set_fact:

+ 159 - 0
roles/openshift_persistent_volumes/action_plugins/generate_pv_pvcs_list.py

@@ -0,0 +1,159 @@
+"""
+Ansible action plugin to generate pv and pvc dictionaries lists
+"""
+
+from ansible.plugins.action import ActionBase
+from ansible import errors
+
+
+class ActionModule(ActionBase):
+    """Action plugin to execute health checks."""
+
+    def get_templated(self, var_to_template):
+        """Return a properly templated ansible variable"""
+        return self._templar.template(self.task_vars.get(var_to_template))
+
+    def build_common(self, varname=None):
+        """Retrieve common variables for each pv and pvc type"""
+        volume = self.get_templated(str(varname) + '_volume_name')
+        size = self.get_templated(str(varname) + '_volume_size')
+        labels = self.task_vars.get(str(varname) + '_labels')
+        if labels:
+            labels = self._templar.template(labels)
+        else:
+            labels = dict()
+        access_modes = self.get_templated(str(varname) + '_access_modes')
+        return (volume, size, labels, access_modes)
+
+    def build_pv_nfs(self, varname=None):
+        """Build pv dictionary for nfs storage type"""
+        host = self.task_vars.get(str(varname) + '_host')
+        if host:
+            self._templar.template(host)
+        elif host is None:
+            groups = self.task_vars.get('groups')
+            default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group')
+            if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0:
+                host = groups['oo_nfs_to_config'][0]
+            else:
+                raise errors.AnsibleModuleError("|failed no storage host detected")
+        volume, size, labels, access_modes = self.build_common(varname=varname)
+        directory = self.get_templated(str(varname) + '_nfs_directory')
+        path = directory + '/' + volume
+        return dict(
+            name="{0}-volume".format(volume),
+            capacity=size,
+            labels=labels,
+            access_modes=access_modes,
+            storage=dict(
+                nfs=dict(
+                    server=host,
+                    path=path)))
+
+    def build_pv_openstack(self, varname=None):
+        """Build pv dictionary for openstack storage type"""
+        volume, size, labels, access_modes = self.build_common(varname=varname)
+        filesystem = self.get_templated(str(varname) + '_openstack_filesystem')
+        volume_id = self.get_templated(str(varname) + '_openstack_volumeID')
+        return dict(
+            name="{0}-volume".format(volume),
+            capacity=size,
+            labels=labels,
+            access_modes=access_modes,
+            storage=dict(
+                cinder=dict(
+                    fsType=filesystem,
+                    volumeID=volume_id)))
+
+    def build_pv_glusterfs(self, varname=None):
+        """Build pv dictionary for glusterfs storage type"""
+        volume, size, labels, access_modes = self.build_common(varname=varname)
+        endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints')
+        path = self.get_templated(str(varname) + '_glusterfs_path')
+        read_only = self.get_templated(str(varname) + '_glusterfs_readOnly')
+        return dict(
+            name="{0}-volume".format(volume),
+            capacity=size,
+            labels=labels,
+            access_modes=access_modes,
+            storage=dict(
+                glusterfs=dict(
+                    endpoints=endpoints,
+                    path=path,
+                    readOnly=read_only)))
+
+    def build_pv_dict(self, varname=None):
+        """Check for the existence of PV variables"""
+        kind = self.task_vars.get(str(varname) + '_kind')
+        if kind:
+            kind = self._templar.template(kind)
+            create_pv = self.task_vars.get(str(varname) + '_create_pv')
+            if create_pv and self._templar.template(create_pv):
+                if kind == 'nfs':
+                    persistent_volume = self.build_pv_nfs(varname=varname)
+
+                elif kind == 'openstack':
+                    persistent_volume = self.build_pv_openstack(varname=varname)
+
+                elif kind == 'glusterfs':
+                    persistent_volume = self.build_pv_glusterfs(varname=varname)
+
+                elif not (kind == 'object' or kind == 'dynamic'):
+                    msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+                        kind,
+                        varname)
+                    raise errors.AnsibleModuleError(msg)
+
+                return persistent_volume
+        return None
+
+    def build_pvc_dict(self, varname=None):
+        """Check for the existence of PVC variables"""
+        kind = self.task_vars.get(str(varname) + '_kind')
+        if kind:
+            kind = self._templar.template(kind)
+            create_pv = self.task_vars.get(str(varname) + '_create_pv')
+            if create_pv:
+                create_pv = self._templar.template(create_pv)
+                create_pvc = self.task_vars.get(str(varname) + '_create_pvc')
+                if create_pvc:
+                    create_pvc = self._templar.template(create_pvc)
+                    if kind != 'object' and create_pv and create_pvc:
+                        volume, size, _, access_modes = self.build_common(varname=varname)
+                        return dict(
+                            name="{0}-claim".format(volume),
+                            capacity=size,
+                            access_modes=access_modes)
+        return None
+
+    def run(self, tmp=None, task_vars=None):
+        """Run generate_pv_pvcs_list action plugin"""
+        result = super(ActionModule, self).run(tmp, task_vars)
+        # Ignore settting self.task_vars outside of init.
+        # pylint: disable=W0201
+        self.task_vars = task_vars or {}
+
+        result["changed"] = False
+        result["failed"] = False
+        result["msg"] = "persistent_volumes list and persistent_volume_claims list created"
+        vars_to_check = ['openshift_hosted_registry_storage',
+                         'openshift_hosted_router_storage',
+                         'openshift_hosted_etcd_storage',
+                         'openshift_logging_storage',
+                         'openshift_loggingops_storage',
+                         'openshift_metrics_storage',
+                         'openshift_prometheus_storage',
+                         'openshift_prometheus_alertmanager_storage',
+                         'openshift_prometheus_alertbuffer_storage']
+        persistent_volumes = []
+        persistent_volume_claims = []
+        for varname in vars_to_check:
+            pv_dict = self.build_pv_dict(varname)
+            if pv_dict:
+                persistent_volumes.append(pv_dict)
+            pvc_dict = self.build_pvc_dict(varname)
+            if pvc_dict:
+                persistent_volume_claims.append(pvc_dict)
+        result["persistent_volumes"] = persistent_volumes
+        result["persistent_volume_claims"] = persistent_volume_claims
+        return result

+ 9 - 0
roles/openshift_persistent_volumes/defaults/main.yml

@@ -0,0 +1,9 @@
+---
+
+openshift_persistent_volumes_default_nfs_group: 'oo_nfs_to_config'
+
+openshift_persistent_volume_extras: []
+openshift_persistent_volume_claims_extras: []
+
+glusterfs_pv: []
+glusterfs_pvc: []

+ 2 - 1
roles/openshift_persistent_volumes/meta/main.yml

@@ -9,4 +9,5 @@ galaxy_info:
   - name: EL
   - name: EL
     versions:
     versions:
     - 7
     - 7
-dependencies: {}
+dependencies:
+- role: openshift_facts

+ 27 - 30
roles/openshift_persistent_volumes/tasks/main.yml

@@ -9,39 +9,36 @@
     cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
     cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
   changed_when: False
   changed_when: False
 
 
-- name: Deploy PersistentVolume definitions
-  template:
-    dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
-    src: persistent-volume.yml.j2
-  when: persistent_volumes | length > 0
-  changed_when: False
+- set_fact:
+    glusterfs_pv:
+    - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-volume"
+      capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+      access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+      storage:
+        glusterfs:
+          endpoints: "{{ openshift_hosted_registry_storage_glusterfs_endpoints }}"
+          path: "{{ openshift_hosted_registry_storage_glusterfs_path }}"
+          readOnly: "{{ openshift_hosted_registry_storage_glusterfs_readOnly }}"
+    glusterfs_pvc:
+    - name: "{{ openshift_hosted_registry_storage_volume_name }}-glusterfs-claim"
+      capacity: "{{ openshift_hosted_registry_storage_volume_size }}"
+      access_modes: "{{ openshift_hosted_registry_storage_access_modes }}"
+  when: openshift_hosted_registry_storage_glusterfs_swap | default(False)
 
 
-- name: Create PersistentVolumes
-  command: >
-    {{ openshift.common.client_binary }} create
-    -f {{ mktemp.stdout }}/persistent-volumes.yml
-    --config={{ mktemp.stdout }}/admin.kubeconfig
-  register: pv_create_output
-  when: persistent_volumes | length > 0
-  failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
-  changed_when: ('created' in pv_create_output.stdout)
+- name: create standard pv and pvc lists
+  # generate_pv_pvcs_list is a custom action module defined in ../action_plugins
+  generate_pv_pvcs_list: {}
+  register: l_pv_pvcs_list
 
 
-- name: Deploy PersistentVolumeClaim definitions
-  template:
-    dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
-    src: persistent-volume-claim.yml.j2
-  when: persistent_volume_claims | length > 0
-  changed_when: False
+- include_tasks: pv.yml
+  vars:
+    l_extra_persistent_volumes: "{{ openshift_persistent_volume_extras | union(glusterfs_pv) }}"
+    persistent_volumes: "{{ l_pv_pvcs_list.persistent_volumes | union(l_extra_persistent_volumes) }}"
 
 
-- name: Create PersistentVolumeClaims
-  command: >
-    {{ openshift.common.client_binary }} create
-    -f {{ mktemp.stdout }}/persistent-volume-claims.yml
-    --config={{ mktemp.stdout }}/admin.kubeconfig
-  register: pvc_create_output
-  when: persistent_volume_claims | length > 0
-  failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
-  changed_when: ('created' in pvc_create_output.stdout)
+- include_tasks: pvc.yml
+  vars:
+    l_extra_persistent_volume_claims: "{{ openshift_persistent_volume_claims_extras | union(glusterfs_pvc) }}"
+    persistent_volume_claims: "{{ l_pv_pvcs_list.persistent_volume_claims | union(l_extra_persistent_volume_claims) }}"
 
 
 - name: Delete temp directory
 - name: Delete temp directory
   file:
   file:

+ 17 - 0
roles/openshift_persistent_volumes/tasks/pv.yml

@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolume definitions
+  template:
+    dest: "{{ mktemp.stdout }}/persistent-volumes.yml"
+    src: persistent-volume.yml.j2
+  when: persistent_volumes | length > 0
+  changed_when: False
+
+- name: Create PersistentVolumes
+  command: >
+    {{ openshift.common.client_binary }} create
+    -f {{ mktemp.stdout }}/persistent-volumes.yml
+    --config={{ mktemp.stdout }}/admin.kubeconfig
+  register: pv_create_output
+  when: persistent_volumes | length > 0
+  failed_when: ('already exists' not in pv_create_output.stderr) and ('created' not in pv_create_output.stdout)
+  changed_when: ('created' in pv_create_output.stdout)

+ 17 - 0
roles/openshift_persistent_volumes/tasks/pvc.yml

@@ -0,0 +1,17 @@
+---
+- name: Deploy PersistentVolumeClaim definitions
+  template:
+    dest: "{{ mktemp.stdout }}/persistent-volume-claims.yml"
+    src: persistent-volume-claim.yml.j2
+  when: persistent_volume_claims | length > 0
+  changed_when: False
+
+- name: Create PersistentVolumeClaims
+  command: >
+    {{ openshift.common.client_binary }} create
+    -f {{ mktemp.stdout }}/persistent-volume-claims.yml
+    --config={{ mktemp.stdout }}/admin.kubeconfig
+  register: pvc_create_output
+  when: persistent_volume_claims | length > 0
+  failed_when: ('already exists' not in pvc_create_output.stderr) and ('created' not in pvc_create_output.stdout)
+  changed_when: ('created' in pvc_create_output.stdout)

+ 1 - 1
roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2

@@ -17,5 +17,5 @@ items:
     capacity:
     capacity:
       storage: "{{ volume.capacity }}"
       storage: "{{ volume.capacity }}"
     accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }}
     accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }}
-    {{ volume.storage.keys()[0] }}: {{ volume.storage[volume.storage.keys()[0]] | to_padded_yaml(3, 2) }}
+    {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }}
 {% endfor %}
 {% endfor %}

+ 1 - 1
roles/openshift_storage_glusterfs/defaults/main.yml

@@ -45,7 +45,7 @@ openshift_storage_glusterfs_heketi_fstab: "{{ '/var/lib/heketi/fstab' | quote if
 openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
 openshift_storage_glusterfs_namespace: "{{ 'glusterfs' | quote if openshift_storage_glusterfs_is_native or openshift_storage_glusterfs_heketi_is_native else 'default' | quote }}"
 
 
 openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
 openshift_storage_glusterfs_registry_timeout: "{{ openshift_storage_glusterfs_timeout }}"
-openshift_storage_glusterfs_registry_namespace: "{{ openshift.hosted.registry.namespace | default(openshift_storage_glusterfs_namespace) }}"
+openshift_storage_glusterfs_registry_namespace: "{{ openshift_hosted_registry_namespace | default(openshift_storage_glusterfs_namespace) }}"
 openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
 openshift_storage_glusterfs_registry_is_native: "{{ openshift_storage_glusterfs_is_native }}"
 openshift_storage_glusterfs_registry_name: 'registry'
 openshift_storage_glusterfs_registry_name: 'registry'
 openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"
 openshift_storage_glusterfs_registry_nodeselector: "glusterfs={{ openshift_storage_glusterfs_registry_name }}-host"

+ 2 - 2
roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml

@@ -56,5 +56,5 @@
   register: registry_volume
   register: registry_volume
 
 
 - name: Create GlusterFS registry volume
 - name: Create GlusterFS registry volume
-  command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift.hosted.registry.storage.volume.size | replace('Gi','') }} --name={{ openshift.hosted.registry.storage.glusterfs.path }}"
-  when: "openshift.hosted.registry.storage.glusterfs.path not in registry_volume.stdout"
+  command: "{{ glusterfs_heketi_client }} volume create --size={{ openshift_hosted_registry_storage_volume_size | replace('Gi','') }} --name={{ openshift_hosted_registry_storage_glusterfs_path }}"
+  when: "openshift_hosted_registry_storage_glusterfs_path not in registry_volume.stdout"

+ 4 - 2
roles/openshift_storage_glusterfs/tasks/main.yml

@@ -10,8 +10,10 @@
   - groups.glusterfs | default([]) | count > 0
   - groups.glusterfs | default([]) | count > 0
 
 
 - include: glusterfs_registry.yml
 - include: glusterfs_registry.yml
-  when:
-  - "groups.glusterfs_registry | default([]) | count > 0 or openshift.hosted.registry.storage.kind == 'glusterfs' or openshift.hosted.registry.storage.glusterfs.swap"
+  when: >
+    groups.glusterfs_registry | default([]) | count > 0
+    or (openshift_hosted_registry_storage_kind | default(none) == 'glusterfs')
+    or (openshift_hosted_registry_storage_glusterfs_swap | default(False))
 
 
 - name: Delete temp directory
 - name: Delete temp directory
   file:
   file:

+ 10 - 10
roles/openshift_storage_nfs/tasks/main.yml

@@ -20,25 +20,25 @@
 
 
 - name: Ensure exports directory exists
 - name: Ensure exports directory exists
   file:
   file:
-    path: "{{ openshift.hosted.registry.storage.nfs.directory }}"
+    path: "{{ openshift_hosted_registry_storage_nfs_directory }}"
     state: directory
     state: directory
 
 
 - name: Ensure export directories exist
 - name: Ensure export directories exist
   file:
   file:
-    path: "{{ item.storage.nfs.directory }}/{{ item.storage.volume.name }}"
+    path: "{{ item }}"
     state: directory
     state: directory
     mode: 0777
     mode: 0777
     owner: nfsnobody
     owner: nfsnobody
     group: nfsnobody
     group: nfsnobody
   with_items:
   with_items:
-    - "{{ openshift.hosted.registry }}"
-    - "{{ openshift.metrics }}"
-    - "{{ openshift.logging }}"
-    - "{{ openshift.loggingops }}"
-    - "{{ openshift.hosted.etcd }}"
-    - "{{ openshift.prometheus }}"
-    - "{{ openshift.prometheus.alertmanager }}"
-    - "{{ openshift.prometheus.alertbuffer }}"
+    - "{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }}"
+    - "{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }}"
+    - "{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }}"
+    - "{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }}"
+    - "{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }}"
+    - "{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }}"
+    - "{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }}"
+    - "{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }}"
 
 
 - name: Configure exports
 - name: Configure exports
   template:
   template:

+ 8 - 8
roles/openshift_storage_nfs/templates/exports.j2

@@ -1,8 +1,8 @@
-{{ openshift.hosted.registry.storage.nfs.directory }}/{{ openshift.hosted.registry.storage.volume.name }} {{ openshift.hosted.registry.storage.nfs.options }}
-{{ openshift.metrics.storage.nfs.directory }}/{{ openshift.metrics.storage.volume.name }} {{ openshift.metrics.storage.nfs.options }}
-{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
-{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
-{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
-{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
-{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
-{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}
+{{ openshift_hosted_registry_storage_nfs_directory }}/{{ openshift_hosted_registry_storage_volume_name }} {{ openshift_hosted_registry_storage_nfs_options }}
+{{ openshift_metrics_storage_nfs_directory }}/{{ openshift_metrics_storage_volume_name }} {{ openshift_metrics_storage_nfs_options }}
+{{ openshift_logging_storage_nfs_directory }}/{{ openshift_logging_storage_volume_name }} {{ openshift_logging_storage_nfs_options }}
+{{ openshift_loggingops_storage_nfs_directory }}/{{ openshift_loggingops_storage_volume_name }} {{ openshift_loggingops_storage_nfs_options }}
+{{ openshift_hosted_etcd_storage_nfs_directory }}/{{ openshift_hosted_etcd_storage_volume_name }} {{ openshift_hosted_etcd_storage_nfs_options }}
+{{ openshift_prometheus_storage_nfs_directory }}/{{ openshift_prometheus_storage_volume_name }} {{ openshift_prometheus_storage_nfs_options }}
+{{ openshift_prometheus_alertmanager_storage_nfs_directory }}/{{ openshift_prometheus_alertmanager_storage_volume_name }} {{ openshift_prometheus_alertmanager_storage_nfs_options }}
+{{ openshift_prometheus_alertbuffer_storage_nfs_directory }}/{{ openshift_prometheus_alertbuffer_storage_volume_name }} {{ openshift_prometheus_alertbuffer_storage_nfs_options }}