Browse Source

Clean up lib_utils

Removing unused code
Russell Teague 6 years ago
parent
commit
805a141622
39 changed files with 1 additions and 6950 deletions
  1. 0 230
      roles/lib_utils/action_plugins/generate_pv_pvcs_list.py
  2. 0 132
      roles/lib_utils/action_plugins/master_check_paths_in_config.py
  3. 0 136
      roles/lib_utils/action_plugins/node_group_checks.py
  4. 0 538
      roles/lib_utils/action_plugins/sanity_checks.py
  5. 0 66
      roles/lib_utils/filter_plugins/oo_cert_expiry.py
  6. 1 631
      roles/lib_utils/filter_plugins/oo_filters.py
  7. 0 107
      roles/lib_utils/filter_plugins/openshift_aws_filters.py
  8. 0 42
      roles/lib_utils/filter_plugins/openshift_hosted_filters.py
  9. 0 520
      roles/lib_utils/filter_plugins/openshift_master.py
  10. 0 274
      roles/lib_utils/library/delegated_serial_command.py
  11. 0 169
      roles/lib_utils/library/get_current_openshift_version.py
  12. 0 187
      roles/lib_utils/library/glusterfs_check_containerized.py
  13. 0 88
      roles/lib_utils/library/kubeclient_ca.py
  14. 0 117
      roles/lib_utils/library/modify_yaml.py
  15. 0 172
      roles/lib_utils/library/oo_iam_kms.py
  16. 0 835
      roles/lib_utils/library/openshift_cert_expiry.py
  17. 0 644
      roles/lib_utils/library/repoquery.py
  18. 0 93
      roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py
  19. 0 59
      roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py
  20. 0 41
      roles/lib_utils/src/ansible/repoquery.py
  21. 0 180
      roles/lib_utils/src/class/repoquery.py
  22. 0 275
      roles/lib_utils/src/doc/repoquery
  23. 0 92
      roles/lib_utils/src/lib/repoquery.py
  24. 0 9
      roles/lib_utils/src/sources.yml
  25. 0 136
      roles/lib_utils/src/test/integration/repoquery.yml
  26. 0 68
      roles/lib_utils/src/test/unit/test_repoquery.py
  27. 0 172
      roles/lib_utils/test/conftest.py
  28. 0 57
      roles/lib_utils/test/openshift_master_facts_bad_input_tests.py
  29. 0 54
      roles/lib_utils/test/openshift_master_facts_conftest.py
  30. 0 114
      roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py
  31. 0 80
      roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py
  32. 0 110
      roles/lib_utils/test/sanity_check_test.py
  33. 0 90
      roles/lib_utils/test/test_fakeopensslclasses.py
  34. 0 157
      roles/lib_utils/test/test_glusterfs_check_containerized.py
  35. 0 67
      roles/lib_utils/test/test_load_and_handle_cert.py
  36. 0 82
      roles/lib_utils/test/test_master_check_paths_in_config.py
  37. 0 37
      roles/lib_utils/test/test_oo_filters.py
  38. 0 52
      roles/lib_utils/test/test_sanity_checks.py
  39. 0 37
      test/unit/modify_yaml_tests.py

+ 0 - 230
roles/lib_utils/action_plugins/generate_pv_pvcs_list.py

@@ -1,230 +0,0 @@
-"""
-Ansible action plugin to generate pv and pvc dictionaries lists
-"""
-
-from ansible.plugins.action import ActionBase
-from ansible import errors
-
-
-class ActionModule(ActionBase):
-    """Action plugin to execute health checks."""
-
-    def get_templated(self, var_to_template):
-        """Return a properly templated ansible variable"""
-        return self._templar.template(self.task_vars.get(var_to_template))
-
-    def build_common(self, varname=None):
-        """Retrieve common variables for each pv and pvc type"""
-        volume = self.get_templated(str(varname) + '_volume_name')
-        size = self.get_templated(str(varname) + '_volume_size')
-        labels = self.task_vars.get(str(varname) + '_labels')
-        annotations = self.task_vars.get(str(varname) + '_annotations')
-        if labels:
-            labels = self._templar.template(labels)
-        else:
-            labels = dict()
-        if annotations:
-            annotations = self._templar.template(annotations)
-        else:
-            annotations = list()
-        access_modes = self.get_templated(str(varname) + '_access_modes')
-        return (volume, size, labels, annotations, access_modes)
-
-    def build_pv_nfs(self, varname=None):
-        """Build pv dictionary for nfs storage type"""
-        host = self.task_vars.get(str(varname) + '_host')
-        if host:
-            self._templar.template(host)
-        elif host is None:
-            groups = self.task_vars.get('groups')
-            default_group_name = self.get_templated('openshift_persistent_volumes_default_nfs_group')
-            if groups and default_group_name and default_group_name in groups and len(groups[default_group_name]) > 0:
-                host = groups['oo_nfs_to_config'][0]
-            else:
-                raise errors.AnsibleModuleError("|failed no storage host detected")
-        volume, size, labels, _, access_modes = self.build_common(varname=varname)
-        directory = self.get_templated(str(varname) + '_nfs_directory')
-        path = directory + '/' + volume
-        result = dict(
-            name="{0}-volume".format(volume),
-            capacity=size,
-            labels=labels,
-            access_modes=access_modes,
-            storage=dict(
-                nfs=dict(
-                    server=host,
-                    path=path)))
-        # Add claimref for NFS as default storageclass can be different
-        create_pvc = self.task_vars.get(str(varname) + '_create_pvc')
-        if create_pvc and self._templar.template(create_pvc):
-            result['storage']['claimName'] = "{0}-claim".format(volume)
-        return result
-
-    def build_pv_openstack(self, varname=None):
-        """Build pv dictionary for openstack storage type"""
-        volume, size, labels, _, access_modes = self.build_common(varname=varname)
-        filesystem = self.get_templated(str(varname) + '_openstack_filesystem')
-        volume_name = self.get_templated(str(varname) + '_volume_name')
-        volume_id = self.get_templated(str(varname) + '_openstack_volumeID')
-        if volume_name and not volume_id:
-            volume_id = _try_cinder_volume_id_from_name(volume_name)
-        return dict(
-            name="{0}-volume".format(volume),
-            capacity=size,
-            labels=labels,
-            access_modes=access_modes,
-            storage=dict(
-                cinder=dict(
-                    fsType=filesystem,
-                    volumeID=volume_id)))
-
-    def build_pv_glusterfs(self, varname=None):
-        """Build pv dictionary for glusterfs storage type"""
-        volume, size, labels, _, access_modes = self.build_common(varname=varname)
-        endpoints = self.get_templated(str(varname) + '_glusterfs_endpoints')
-        path = self.get_templated(str(varname) + '_glusterfs_path')
-        read_only = self.get_templated(str(varname) + '_glusterfs_readOnly')
-        result = dict(
-            name="{0}-volume".format(volume),
-            capacity=size,
-            labels=labels,
-            access_modes=access_modes,
-            storage=dict(
-                glusterfs=dict(
-                    endpoints=endpoints,
-                    path=path,
-                    readOnly=read_only)))
-        # Add claimref for glusterfs as default storageclass can be different
-        create_pvc = self.task_vars.get(str(varname) + '_create_pvc')
-        if create_pvc and self._templar.template(create_pvc):
-            result['storage']['claimName'] = "{0}-claim".format(volume)
-        return result
-
-    def build_pv_hostpath(self, varname=None):
-        """Build pv dictionary for hostpath storage type"""
-        volume, size, labels, _, access_modes = self.build_common(varname=varname)
-        # hostpath only supports ReadWriteOnce
-        if access_modes[0] != 'ReadWriteOnce':
-            msg = "Hostpath storage only supports 'ReadWriteOnce' Was given {}."
-            raise errors.AnsibleModuleError(msg.format(access_modes.join(', ')))
-        path = self.get_templated(str(varname) + '_hostpath_path')
-        return dict(
-            name="{0}-volume".format(volume),
-            capacity=size,
-            labels=labels,
-            access_modes=access_modes,
-            storage=dict(
-                hostPath=dict(
-                    path=path
-                )
-            )
-        )
-
-    def build_pv_dict(self, varname=None):
-        """Check for the existence of PV variables"""
-        kind = self.task_vars.get(str(varname) + '_kind')
-        if kind:
-            kind = self._templar.template(kind)
-            create_pv = self.task_vars.get(str(varname) + '_create_pv')
-            if create_pv and self._templar.template(create_pv):
-                if kind == 'nfs':
-                    return self.build_pv_nfs(varname=varname)
-
-                elif kind == 'openstack':
-                    return self.build_pv_openstack(varname=varname)
-
-                elif kind == 'glusterfs':
-                    return self.build_pv_glusterfs(varname=varname)
-
-                elif kind == 'hostpath':
-                    return self.build_pv_hostpath(varname=varname)
-
-                elif not (kind == 'object' or kind == 'dynamic' or kind == 'vsphere'):
-                    msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
-                        kind,
-                        varname)
-                    raise errors.AnsibleModuleError(msg)
-        return None
-
-    def build_pvc_dict(self, varname=None):
-        """Check for the existence of PVC variables"""
-        kind = self.task_vars.get(str(varname) + '_kind')
-        if kind:
-            kind = self._templar.template(kind)
-            create_pv = self.task_vars.get(str(varname) + '_create_pv')
-            if create_pv:
-                create_pv = self._templar.template(create_pv)
-                create_pvc = self.task_vars.get(str(varname) + '_create_pvc')
-                if create_pvc:
-                    create_pvc = self._templar.template(create_pvc)
-                    if kind != 'object' and create_pv and create_pvc:
-                        volume, size, _, annotations, access_modes = self.build_common(varname=varname)
-                        storageclass = self.task_vars.get(str(varname) + '_storageclass')
-                        # if storageclass is specified => use it
-                        # if kind is 'nfs' => set to empty
-                        # if any other kind => set to none
-                        if storageclass:
-                            storageclass = self._templar.template(storageclass)
-                        elif kind == 'nfs':
-                            storageclass = ''
-                        if kind == 'dynamic':
-                            storageclass = None
-                        return dict(
-                            name="{0}-claim".format(volume),
-                            capacity=size,
-                            annotations=annotations,
-                            access_modes=access_modes,
-                            storageclass=storageclass)
-        return None
-
-    def run(self, tmp=None, task_vars=None):
-        """Run generate_pv_pvcs_list action plugin"""
-        result = super(ActionModule, self).run(tmp, task_vars)
-        # Ignore settting self.task_vars outside of init.
-        # pylint: disable=W0201
-        self.task_vars = task_vars or {}
-
-        result["changed"] = False
-        result["failed"] = False
-        result["msg"] = "persistent_volumes list and persistent_volume_claims list created"
-        vars_to_check = ['openshift_hosted_registry_storage',
-                         'openshift_hosted_registry_glusterfs_storage',
-                         'openshift_hosted_router_storage',
-                         'openshift_hosted_etcd_storage',
-                         'openshift_logging_storage',
-                         'openshift_loggingops_storage',
-                         'openshift_metrics_storage']
-        persistent_volumes = []
-        persistent_volume_claims = []
-        for varname in vars_to_check:
-            pv_dict = self.build_pv_dict(varname)
-            if pv_dict:
-                persistent_volumes.append(pv_dict)
-            pvc_dict = self.build_pvc_dict(varname)
-            if pvc_dict:
-                persistent_volume_claims.append(pvc_dict)
-        result["persistent_volumes"] = persistent_volumes
-        result["persistent_volume_claims"] = persistent_volume_claims
-        return result
-
-
-def _try_cinder_volume_id_from_name(volume_name):
-    """Try to look up a Cinder volume UUID from its name.
-
-    Returns None on any failure (missing shade, auth, no such volume).
-    """
-    try:
-        import shade
-    except ImportError:
-        return None
-    try:
-        cloud = shade.openstack_cloud()
-    except shade.keystoneauth1.exceptions.ClientException:
-        return None
-    except shade.OpenStackCloudException:
-        return None
-    volume = cloud.get_volume(volume_name)
-    if volume:
-        return volume.id
-    else:
-        return None

+ 0 - 132
roles/lib_utils/action_plugins/master_check_paths_in_config.py

@@ -1,132 +0,0 @@
-"""
-Ansible action plugin to ensure inventory variables are set
-appropriately and no conflicting options have been provided.
-"""
-import collections
-import six
-
-from ansible.plugins.action import ActionBase
-from ansible import errors
-
-
-FAIL_MSG = """A string value that appears to be a file path located outside of
-{} has been found in /etc/origin/master/master-config.yaml.
-In 3.10 and newer, all files needed by the master must reside inside of
-those directories or a subdirectory or it will not be readable by the
-master process. Please migrate all files needed by the master into
-one of {} or a subdirectory and update your master configs before
-proceeding. The string found was: {}
-***********************
-NOTE: the following items do not need to be migrated, they will be migrated
-for you: {}"""
-
-
-ITEMS_TO_POP = (
-    ('oauthConfig', 'identityProviders'),
-)
-# Create csv string of dot-separated dictionary keys:
-# eg: 'oathConfig.identityProviders, something.else.here'
-MIGRATED_ITEMS = ", ".join([".".join(x) for x in ITEMS_TO_POP])
-
-ALLOWED_DIRS = (
-    '/dev/null',
-    '/etc/origin/master/',
-    '/var/lib/origin',
-    '/etc/origin/cloudprovider',
-    '/etc/origin/kubelet-plugins',
-    '/usr/libexec/kubernetes/kubelet-plugins',
-)
-
-ALLOWED_DIRS_STRING = ', '.join(ALLOWED_DIRS)
-
-
-def pop_migrated_fields(mastercfg):
-    """Some fields do not need to be searched because they will be migrated
-    for users automatically"""
-    # Walk down the tree and pop the specific item we migrate / don't care about
-    for item in ITEMS_TO_POP:
-        field = mastercfg
-        for sub_field in item:
-            parent_field = field
-            field = field[sub_field]
-        parent_field.pop(item[len(item) - 1])
-
-
-def do_item_check(val, strings_to_check):
-    """Check type of val, append to strings_to_check if string, otherwise if
-    it's a dictionary-like object call walk_mapping, if it's a list-like
-    object call walk_sequence, else ignore."""
-    if isinstance(val, six.string_types):
-        strings_to_check.append(val)
-    elif isinstance(val, collections.Sequence):
-        # A list-like object
-        walk_sequence(val, strings_to_check)
-    elif isinstance(val, collections.Mapping):
-        # A dictionary-like object
-        walk_mapping(val, strings_to_check)
-    # If it's not a string, list, or dictionary, we're not interested.
-
-
-def walk_sequence(items, strings_to_check):
-    """Walk recursively through a list, items"""
-    for item in items:
-        do_item_check(item, strings_to_check)
-
-
-def walk_mapping(map_to_walk, strings_to_check):
-    """Walk recursively through map_to_walk dictionary and add strings to
-    strings_to_check"""
-    for _, val in map_to_walk.items():
-        do_item_check(val, strings_to_check)
-
-
-def check_strings(strings_to_check):
-    """Check the strings we found to see if they look like file paths and if
-    they are, fail if not start with /etc/origin/master"""
-    for item in strings_to_check:
-        if item.startswith('/') or item.startswith('../'):
-            matches = 0
-            for allowed in ALLOWED_DIRS:
-                if item.startswith(allowed):
-                    matches += 1
-            if matches == 0:
-                raise errors.AnsibleModuleError(
-                    FAIL_MSG.format(ALLOWED_DIRS_STRING,
-                                    ALLOWED_DIRS_STRING,
-                                    item, MIGRATED_ITEMS))
-
-
-# pylint: disable=R0903
-class ActionModule(ActionBase):
-    """Action plugin to validate no files are needed by master that reside
-    outside of /etc/origin/master as masters will now run as pods and cannot
-    utilize files outside of that path as they will not be mounted inside the
-    containers."""
-    def run(self, tmp=None, task_vars=None):
-        """Run this action module"""
-        result = super(ActionModule, self).run(tmp, task_vars)
-
-        # self.task_vars holds all in-scope variables.
-        # Ignore settting self.task_vars outside of init.
-        # pylint: disable=W0201
-        self.task_vars = task_vars or {}
-
-        # mastercfg should be a dictionary from scraping an existing master's
-        # config yaml file.
-        mastercfg = self._task.args.get('mastercfg')
-
-        # We migrate some paths for users automatically, so we pop those.
-        pop_migrated_fields(mastercfg)
-
-        # Create an empty list to append strings from our config file to check
-        # later.
-        strings_to_check = []
-
-        walk_mapping(mastercfg, strings_to_check)
-
-        check_strings(strings_to_check)
-
-        result["changed"] = False
-        result["failed"] = False
-        result["msg"] = "Aight, configs looking good"
-        return result

+ 0 - 136
roles/lib_utils/action_plugins/node_group_checks.py

@@ -1,136 +0,0 @@
-"""
-Ansible action plugin to ensure inventory variables are set
-appropriately related to openshift_node_group_name
-"""
-from ansible.plugins.action import ActionBase
-from ansible import errors
-
-# Runs on first master
-# Checks each openshift_node_group_name is found in openshift_node_groups
-# Checks that master label is present in one of those groups
-# Checks that node label is present in one of those groups
-
-
-def get_or_fail(group, key):
-    """Find a key in a group dictionary or fail"""
-    res = group.get(key)
-    if res is None:
-        msg = "Each group in openshift_node_groups must have {} key".format(key)
-        raise errors.AnsibleModuleError(msg)
-    return res
-
-
-def validate_labels(labels_found):
-    """Ensure mandatory_labels are found in the labels we found, labels_found"""
-    mandatory_labels = ('node-role.kubernetes.io/master=true',
-                        'node-role.kubernetes.io/infra=true')
-    for item in mandatory_labels:
-        if item not in labels_found:
-            msg = ("At least one group in openshift_node_groups requires the"
-                   " {} label").format(item)
-            raise errors.AnsibleModuleError(msg)
-
-
-def process_group(group, groups_found, labels_found):
-    """Validate format of each group in openshift_node_groups"""
-    name = get_or_fail(group, 'name')
-    if name in groups_found:
-        msg = ("Duplicate definition of group {} in"
-               " openshift_node_groups").format(name)
-        raise errors.AnsibleModuleError(msg)
-    groups_found.add(name)
-    labels = get_or_fail(group, 'labels')
-    if not issubclass(type(labels), list):
-        msg = "labels value of each group in openshift_node_groups must be a list"
-        raise errors.AnsibleModuleError(msg)
-    labels_found.update(labels)
-
-
-class ActionModule(ActionBase):
-    """Action plugin to execute node_group_checks."""
-    def template_var(self, hostvars, host, varname):
-        """Retrieve a variable from hostvars and template it.
-           If undefined, return None type."""
-        # We will set the current host and variable checked for easy debugging
-        # if there are any unhandled exceptions.
-        # pylint: disable=W0201
-        self.last_checked_var = varname
-        # pylint: disable=W0201
-        self.last_checked_host = host
-        res = hostvars[host].get(varname)
-        if res is None:
-            return None
-        return self._templar.template(res)
-
-    def get_node_group_name(self, hostvars, host):
-        """Ensure openshift_node_group_name is defined for nodes"""
-        group_name = self.template_var(hostvars, host, 'openshift_node_group_name')
-        if not group_name:
-            msg = "openshift_node_group_name must be defined for all nodes"
-            raise errors.AnsibleModuleError(msg)
-        return group_name
-
-    def run_check(self, hostvars, host, groups_found):
-        """Run the check for each host"""
-        group_name = self.get_node_group_name(hostvars, host)
-        if group_name not in groups_found:
-            msg = "Group: {} not found in openshift_node_groups".format(group_name)
-            raise errors.AnsibleModuleError(msg)
-
-    def run(self, tmp=None, task_vars=None):
-        """Run node_group_checks action plugin"""
-        result = super(ActionModule, self).run(tmp, task_vars)
-        result["changed"] = False
-        result["failed"] = False
-        result["msg"] = "Node group checks passed"
-        # self.task_vars holds all in-scope variables.
-        # Ignore settting self.task_vars outside of init.
-        # pylint: disable=W0201
-        self.task_vars = task_vars or {}
-
-        # pylint: disable=W0201
-        self.last_checked_host = "none"
-        # pylint: disable=W0201
-        self.last_checked_var = "none"
-
-        # check_hosts is hard-set to oo_nodes_to_config
-        check_hosts = self.task_vars['groups'].get('oo_nodes_to_config')
-        if not check_hosts:
-            result["msg"] = "skipping; oo_nodes_to_config is required for this check"
-            return result
-
-        # We need to access each host's variables
-        hostvars = self.task_vars.get('hostvars')
-        if not hostvars:
-            msg = hostvars
-            raise errors.AnsibleModuleError(msg)
-
-        openshift_node_groups = self.task_vars.get('openshift_node_groups')
-        if not openshift_node_groups:
-            msg = "openshift_node_groups undefined"
-            raise errors.AnsibleModuleError(msg)
-
-        openshift_node_groups = self._templar.template(openshift_node_groups)
-        groups_found = set()
-        labels_found = set()
-        # gather the groups and labels we believe should be present.
-        for group in openshift_node_groups:
-            process_group(group, groups_found, labels_found)
-
-        if len(groups_found) == 0:
-            msg = "No groups found in openshift_node_groups"
-            raise errors.AnsibleModuleError(msg)
-
-        validate_labels(labels_found)
-
-        # We loop through each host in the provided list check_hosts
-        for host in check_hosts:
-            try:
-                self.run_check(hostvars, host, groups_found)
-            except Exception as uncaught_e:
-                msg = "last_checked_host: {}, last_checked_var: {};"
-                msg = msg.format(self.last_checked_host, self.last_checked_var)
-                msg += str(uncaught_e)
-                raise errors.AnsibleModuleError(msg)
-
-        return result

+ 0 - 538
roles/lib_utils/action_plugins/sanity_checks.py

@@ -1,538 +0,0 @@
-"""
-Ansible action plugin to ensure inventory variables are set
-appropriately and no conflicting options have been provided.
-"""
-import fnmatch
-import json
-import re
-
-from ansible.plugins.action import ActionBase
-from ansible import errors
-# pylint: disable=import-error,no-name-in-module
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-
-# Valid values for openshift_deployment_type
-VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise')
-
-# Tuple of variable names and default values if undefined.
-NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True),
-                   ('openshift_use_flannel', False),
-                   ('openshift_use_nuage', False),
-                   ('openshift_use_contiv', False),
-                   ('openshift_use_calico', False),
-                   ('openshift_use_kuryr', False),
-                   ('openshift_use_nsx', False))
-
-ENTERPRISE_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
-v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
-v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6
-You specified openshift_image_tag={}"""
-
-ORIGIN_TAG_REGEX_ERROR = """openshift_image_tag must be in the format
-v#.#[.#-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
-You specified openshift_image_tag={}"""
-
-ORIGIN_TAG_REGEX = {'re': '(^v?\\d+\\.\\d+.*)',
-                    'error_msg': ORIGIN_TAG_REGEX_ERROR}
-ENTERPRISE_TAG_REGEX = {'re': '(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)',
-                        'error_msg': ENTERPRISE_TAG_REGEX_ERROR}
-IMAGE_TAG_REGEX = {'origin': ORIGIN_TAG_REGEX,
-                   'openshift-enterprise': ENTERPRISE_TAG_REGEX}
-
-PKG_VERSION_REGEX_ERROR = """openshift_pkg_version must be in the format
--[optional.release]. Examples: -3.6.0, -3.7.0-0.126.0.git.0.9351aae.el7 -3.11*
-You specified openshift_pkg_version={}"""
-PKG_VERSION_REGEX = {'re': '(^-.*)',
-                     'error_msg': PKG_VERSION_REGEX_ERROR}
-
-RELEASE_REGEX_ERROR = """openshift_release must be in the format
-v#[.#[.#]]. Examples: v3.9, v3.10.0
-You specified openshift_release={}"""
-RELEASE_REGEX = {'re': '(^v?\\d+(\\.\\d+(\\.\\d+)?)?$)',
-                 'error_msg': RELEASE_REGEX_ERROR}
-
-STORAGE_KIND_TUPLE = (
-    'openshift_loggingops_storage_kind',
-    'openshift_logging_storage_kind',
-    'openshift_metrics_storage_kind')
-
-IMAGE_POLICY_CONFIG_VAR = "openshift_master_image_policy_config"
-ALLOWED_REGISTRIES_VAR = "openshift_master_image_policy_allowed_registries_for_import"
-
-REMOVED_VARIABLES = (
-    ('openshift_hostname', 'Removed: See documentation'),
-    # TODO(michaelgugino): Remove in 3.12
-    ('oreg_auth_credentials_replace', 'Removed: Credentials are now always updated'),
-    ('oreg_url_master', 'oreg_url'),
-    ('oreg_url_node', 'oreg_url'),
-    ('openshift_cockpit_deployer_prefix', 'openshift_cockpit_deployer_image'),
-    ('openshift_cockpit_deployer_basename', 'openshift_cockpit_deployer_image'),
-    ('openshift_cockpit_deployer_version', 'openshift_cockpit_deployer_image'),
-    ('openshift_hosted_logging_elasticsearch_pvc_prefix', 'openshift_logging_es_pvc_prefix'),
-    ('logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
-    ('openshift_hosted_logging_ops_hostname', 'openshift_logging_kibana_ops_hostname'),
-    ('openshift_hosted_logging_elasticsearch_cluster_size', 'logging_elasticsearch_cluster_size'),
-    ('openshift_hosted_logging_elasticsearch_ops_cluster_size', 'logging_elasticsearch_ops_cluster_size'),
-    ('openshift_hosted_logging_storage_kind', 'openshift_logging_storage_kind'),
-    ('openshift_hosted_logging_storage_host', 'openshift_logging_storage_host'),
-    ('openshift_hosted_logging_storage_labels', 'openshift_logging_storage_labels'),
-    ('openshift_hosted_logging_storage_volume_size', 'openshift_logging_storage_volume_size'),
-    ('openshift_hosted_loggingops_storage_kind', 'openshift_loggingops_storage_kind'),
-    ('openshift_hosted_loggingops_storage_host', 'openshift_loggingops_storage_host'),
-    ('openshift_hosted_loggingops_storage_labels', 'openshift_loggingops_storage_labels'),
-    ('openshift_hosted_loggingops_storage_volume_size', 'openshift_loggingops_storage_volume_size'),
-    ('openshift_hosted_logging_enable_ops_cluster', 'openshift_logging_use_ops'),
-    ('openshift_hosted_logging_image_pull_secret', 'openshift_logging_image_pull_secret'),
-    ('openshift_hosted_logging_hostname', 'openshift_logging_kibana_hostname'),
-    ('openshift_hosted_logging_kibana_nodeselector', 'openshift_logging_kibana_nodeselector'),
-    ('openshift_hosted_logging_kibana_ops_nodeselector', 'openshift_logging_kibana_ops_nodeselector'),
-    ('openshift_hosted_logging_journal_source', 'openshift_logging_fluentd_journal_source'),
-    ('openshift_hosted_logging_journal_read_from_head', 'openshift_logging_fluentd_journal_read_from_head'),
-    ('openshift_hosted_logging_fluentd_nodeselector_label', 'openshift_logging_fluentd_nodeselector'),
-    ('openshift_hosted_logging_elasticsearch_instance_ram', 'openshift_logging_es_memory_limit'),
-    ('openshift_hosted_logging_elasticsearch_nodeselector', 'openshift_logging_es_nodeselector'),
-    ('openshift_hosted_logging_elasticsearch_ops_nodeselector', 'openshift_logging_es_ops_nodeselector'),
-    ('openshift_hosted_logging_elasticsearch_ops_instance_ram', 'openshift_logging_es_ops_memory_limit'),
-    ('openshift_hosted_logging_storage_access_modes', 'openshift_logging_storage_access_modes'),
-    ('openshift_hosted_logging_master_public_url', 'openshift_logging_master_public_url'),
-    ('openshift_hosted_logging_deployer_prefix', 'openshift_logging_image_prefix'),
-    ('openshift_hosted_logging_deployer_version', 'openshift_logging_image_version'),
-    ('openshift_hosted_logging_deploy', 'openshift_logging_install_logging'),
-    ('openshift_hosted_logging_curator_nodeselector', 'openshift_logging_curator_nodeselector'),
-    ('openshift_hosted_logging_curator_ops_nodeselector', 'openshift_logging_curator_ops_nodeselector'),
-    ('openshift_hosted_metrics_storage_access_modes', 'openshift_metrics_storage_access_modes'),
-    ('openshift_hosted_metrics_storage_host', 'openshift_metrics_storage_host'),
-    ('openshift_hosted_metrics_storage_nfs_directory', 'openshift_metrics_storage_nfs_directory'),
-    ('openshift_hosted_metrics_storage_volume_name', 'openshift_metrics_storage_volume_name'),
-    ('openshift_hosted_metrics_storage_volume_size', 'openshift_metrics_storage_volume_size'),
-    ('openshift_hosted_metrics_storage_labels', 'openshift_metrics_storage_labels'),
-    ('openshift_hosted_metrics_deployer_prefix', 'openshift_metrics_image_prefix'),
-    ('openshift_hosted_metrics_deployer_version', 'openshift_metrics_image_version'),
-    ('openshift_hosted_metrics_deploy', 'openshift_metrics_install_metrics'),
-    ('openshift_hosted_metrics_storage_kind', 'openshift_metrics_storage_kind'),
-    ('openshift_hosted_metrics_public_url', 'openshift_metrics_hawkular_hostname'),
-    ('openshift_node_labels', 'openshift_node_groups[<item>].labels'),
-    ('openshift_node_kubelet_args', 'openshift_node_groups[<item>].edits'),
-)
-
-# JSON_FORMAT_VARIABLES does not intende to cover all json variables, but
-# complicated json variables in hosts.example are covered.
-JSON_FORMAT_VARIABLES = (
-    'openshift_builddefaults_json',
-    'openshift_buildoverrides_json',
-    'openshift_master_admission_plugin_config',
-    'openshift_master_audit_config',
-    'openshift_crio_docker_gc_node_selector',
-    'openshift_master_image_policy_allowed_registries_for_import',
-    'openshift_master_image_policy_config',
-    'openshift_master_oauth_templates',
-    'container_runtime_extra_storage',
-    'openshift_additional_repos',
-    'openshift_master_identity_providers',
-    'openshift_master_htpasswd_users',
-    'openshift_additional_projects',
-    'openshift_hosted_routers',
-    'openshift_node_open_ports',
-    'openshift_master_open_ports',
-)
-
-
-def to_bool(var_to_check):
-    """Determine a boolean value given the multiple
-       ways bools can be specified in ansible."""
-    # http://yaml.org/type/bool.html
-    yes_list = (True, 1, "True", "1", "true", "TRUE",
-                "Yes", "yes", "Y", "y", "YES",
-                "on", "ON", "On")
-    return var_to_check in yes_list
-
-
-def check_for_removed_vars(hostvars, host):
-    """Fails if removed variables are found"""
-    found_removed = []
-    for item in REMOVED_VARIABLES:
-        if item in hostvars[host]:
-            found_removed.append(item)
-
-    if found_removed:
-        msg = "Found removed variables: "
-        for item in found_removed:
-            msg += "{} is replaced by {}; ".format(item[0], item[1])
-        raise errors.AnsibleModuleError(msg)
-    return None
-
-
-class ActionModule(ActionBase):
-    """Action plugin to execute sanity checks."""
-    def template_var(self, hostvars, host, varname):
-        """Retrieve a variable from hostvars and template it.
-           If undefined, return None type."""
-        # We will set the current host and variable checked for easy debugging
-        # if there are any unhandled exceptions.
-        # pylint: disable=W0201
-        self.last_checked_var = varname
-        # pylint: disable=W0201
-        self.last_checked_host = host
-        res = hostvars[host].get(varname)
-        if res is None:
-            return None
-        return self._templar.template(res)
-
-    def check_openshift_deployment_type(self, hostvars, host):
-        """Ensure a valid openshift_deployment_type is set"""
-        openshift_deployment_type = self.template_var(hostvars, host,
-                                                      'openshift_deployment_type')
-        if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES:
-            type_strings = ", ".join(VALID_DEPLOYMENT_TYPES)
-            msg = "openshift_deployment_type must be defined and one of {}".format(type_strings)
-            raise errors.AnsibleModuleError(msg)
-        return openshift_deployment_type
-
-    def get_allowed_registries(self, hostvars, host):
-        """Returns a list of configured allowedRegistriesForImport as a list of patterns"""
-        allowed_registries_for_import = self.template_var(hostvars, host, ALLOWED_REGISTRIES_VAR)
-        if allowed_registries_for_import is None:
-            image_policy_config = self.template_var(hostvars, host, IMAGE_POLICY_CONFIG_VAR)
-            if not image_policy_config:
-                return image_policy_config
-
-            if isinstance(image_policy_config, str):
-                try:
-                    image_policy_config = json.loads(image_policy_config)
-                except Exception:
-                    raise errors.AnsibleModuleError(
-                        "{} is not a valid json string".format(IMAGE_POLICY_CONFIG_VAR))
-
-            if not isinstance(image_policy_config, dict):
-                raise errors.AnsibleModuleError(
-                    "expected dictionary for {}, not {}".format(
-                        IMAGE_POLICY_CONFIG_VAR, type(image_policy_config)))
-
-            detailed = image_policy_config.get("allowedRegistriesForImport", None)
-            if not detailed:
-                return detailed
-
-            if not isinstance(detailed, list):
-                raise errors.AnsibleModuleError("expected list for {}['{}'], not {}".format(
-                    IMAGE_POLICY_CONFIG_VAR, "allowedRegistriesForImport",
-                    type(allowed_registries_for_import)))
-
-            try:
-                return [i["domainName"] for i in detailed]
-            except Exception:
-                raise errors.AnsibleModuleError(
-                    "each item of allowedRegistriesForImport must be a dictionary with 'domainName' key")
-
-        if not isinstance(allowed_registries_for_import, list):
-            raise errors.AnsibleModuleError("expected list for {}, not {}".format(
-                IMAGE_POLICY_CONFIG_VAR, type(allowed_registries_for_import)))
-
-        return allowed_registries_for_import
-
-    def check_whitelisted_registries(self, hostvars, host):
-        """Ensure defined registries are whitelisted"""
-        allowed = self.get_allowed_registries(hostvars, host)
-        if allowed is None:
-            return
-
-        unmatched_registries = []
-        for regvar in (
-                "oreg_url"
-                "openshift_cockpit_deployer_prefix",
-                "openshift_metrics_image_prefix",
-                "openshift_logging_image_prefix",
-                "openshift_service_catalog_image_prefix",
-                "openshift_docker_insecure_registries"):
-            value = self.template_var(hostvars, host, regvar)
-            if not value:
-                continue
-            if isinstance(value, list):
-                registries = value
-            else:
-                registries = [value]
-
-            for reg in registries:
-                if not any(is_registry_match(reg, pat) for pat in allowed):
-                    unmatched_registries.append((regvar, reg))
-
-        if unmatched_registries:
-            registry_list = ", ".join(["{}:{}".format(n, v) for n, v in unmatched_registries])
-            raise errors.AnsibleModuleError(
-                "registry hostnames of the following image prefixes are not whitelisted by image"
-                " policy configuration: {}".format(registry_list))
-
-    def check_python_version(self, hostvars, host, distro):
-        """Ensure python version is 3 for Fedora and python 2 for others"""
-        ansible_python = self.template_var(hostvars, host, 'ansible_python')
-        if distro == "Fedora":
-            if ansible_python['version']['major'] != 3:
-                msg = "openshift-ansible requires Python 3 for {};".format(distro)
-                msg += " For information on enabling Python 3 with Ansible,"
-                msg += " see https://docs.ansible.com/ansible/python_3_support.html"
-                raise errors.AnsibleModuleError(msg)
-        else:
-            if ansible_python['version']['major'] != 2:
-                msg = "openshift-ansible requires Python 2 for {};".format(distro)
-
-    def check_image_tag_format(self, hostvars, host, openshift_deployment_type):
-        """Ensure openshift_image_tag is formatted correctly"""
-        openshift_image_tag = self.template_var(hostvars, host, 'openshift_image_tag')
-        if not openshift_image_tag or openshift_image_tag == 'latest':
-            return None
-        regex_to_match = IMAGE_TAG_REGEX[openshift_deployment_type]['re']
-        res = re.match(regex_to_match, str(openshift_image_tag))
-        if res is None:
-            msg = IMAGE_TAG_REGEX[openshift_deployment_type]['error_msg']
-            msg = msg.format(str(openshift_image_tag))
-            raise errors.AnsibleModuleError(msg)
-
-    def check_pkg_version_format(self, hostvars, host):
-        """Ensure openshift_pkg_version is formatted correctly"""
-        openshift_pkg_version = self.template_var(hostvars, host, 'openshift_pkg_version')
-        if not openshift_pkg_version:
-            return None
-        regex_to_match = PKG_VERSION_REGEX['re']
-        res = re.match(regex_to_match, str(openshift_pkg_version))
-        if res is None:
-            msg = PKG_VERSION_REGEX['error_msg']
-            msg = msg.format(str(openshift_pkg_version))
-            raise errors.AnsibleModuleError(msg)
-
-    def check_release_format(self, hostvars, host):
-        """Ensure openshift_release is formatted correctly"""
-        openshift_release = self.template_var(hostvars, host, 'openshift_release')
-        if not openshift_release:
-            return None
-        regex_to_match = RELEASE_REGEX['re']
-        res = re.match(regex_to_match, str(openshift_release))
-        if res is None:
-            msg = RELEASE_REGEX['error_msg']
-            msg = msg.format(str(openshift_release))
-            raise errors.AnsibleModuleError(msg)
-
-    def network_plugin_check(self, hostvars, host):
-        """Ensure only one type of network plugin is enabled"""
-        res = []
-        # Loop through each possible network plugin boolean, determine the
-        # actual boolean value, and append results into a list.
-        for plugin, default_val in NET_PLUGIN_LIST:
-            res_temp = self.template_var(hostvars, host, plugin)
-            if res_temp is None:
-                res_temp = default_val
-            res.append(to_bool(res_temp))
-
-        if sum(res) not in (0, 1):
-            plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res))
-
-            msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str)
-            raise errors.AnsibleModuleError(msg)
-
-    def check_hostname_vars(self, hostvars, host):
-        """Checks to ensure openshift_kubelet_name_override
-           and openshift_public_hostname
-           conform to the proper length of 63 characters or less"""
-        for varname in ('openshift_public_hostname', 'openshift_kubelet_name_override'):
-            var_value = self.template_var(hostvars, host, varname)
-            if var_value and len(var_value) > 63:
-                msg = '{} must be 63 characters or less'.format(varname)
-                raise errors.AnsibleModuleError(msg)
-
-    def check_session_auth_secrets(self, hostvars, host):
-        """Checks session_auth_secrets is correctly formatted"""
-        sas = self.template_var(hostvars, host,
-                                'openshift_master_session_auth_secrets')
-        ses = self.template_var(hostvars, host,
-                                'openshift_master_session_encryption_secrets')
-        # This variable isn't mandatory, only check if set.
-        if sas is None and ses is None:
-            return None
-
-        if not (
-                issubclass(type(sas), list) and issubclass(type(ses), list)
-        ) or len(sas) != len(ses):
-            raise errors.AnsibleModuleError(
-                'Expects openshift_master_session_auth_secrets and '
-                'openshift_master_session_encryption_secrets are equal length lists')
-
-        for secret in sas:
-            if len(secret) < 32:
-                raise errors.AnsibleModuleError(
-                    'Invalid secret in openshift_master_session_auth_secrets. '
-                    'Secrets must be at least 32 characters in length.')
-
-        for secret in ses:
-            if len(secret) not in [16, 24, 32]:
-                raise errors.AnsibleModuleError(
-                    'Invalid secret in openshift_master_session_encryption_secrets. '
-                    'Secrets must be 16, 24, or 32 characters in length.')
-        return None
-
-    def check_unsupported_nfs_configs(self, hostvars, host):
-        """Fails if nfs storage is in use for any components. This check is
-           ignored if openshift_enable_unsupported_configurations=True"""
-
-        enable_unsupported = self.template_var(
-            hostvars, host, 'openshift_enable_unsupported_configurations')
-
-        if to_bool(enable_unsupported):
-            return None
-
-        for storage in STORAGE_KIND_TUPLE:
-            kind = self.template_var(hostvars, host, storage)
-            if kind == 'nfs':
-                raise errors.AnsibleModuleError(
-                    'nfs is an unsupported type for {}. '
-                    'openshift_enable_unsupported_configurations=True must '
-                    'be specified to continue with this configuration.'
-                    ''.format(storage))
-        return None
-
-    def check_htpasswd_provider(self, hostvars, host):
-        """Fails if openshift_master_identity_providers contains an entry of
-        kind HTPasswdPasswordIdentityProvider and
-        openshift_master_manage_htpasswd is False"""
-
-        manage_pass = self.template_var(
-            hostvars, host, 'openshift_master_manage_htpasswd')
-        if to_bool(manage_pass):
-            # If we manage the file, we can just generate in the new path.
-            return None
-        idps = self.template_var(
-            hostvars, host, 'openshift_master_identity_providers')
-        if not idps:
-            # If we don't find any identity_providers, nothing for us to do.
-            return None
-        old_keys = ('file', 'fileName', 'file_name', 'filename')
-        if not isinstance(idps, list):
-            raise errors.AnsibleModuleError("| not a list")
-        for idp in idps:
-            if idp['kind'] == 'HTPasswdPasswordIdentityProvider':
-                for old_key in old_keys:
-                    if old_key in idp is not None:
-                        raise errors.AnsibleModuleError(
-                            'openshift_master_identity_providers contains a '
-                            'provider of kind==HTPasswdPasswordIdentityProvider '
-                            'and {} is set.  Please migrate your htpasswd '
-                            'files to /etc/origin/master/htpasswd and update your '
-                            'existing master configs, and remove the {} key'
-                            'before proceeding.'.format(old_key, old_key))
-
-    def validate_json_format_vars(self, hostvars, host):
-        """Fails if invalid json format are found"""
-        found_invalid_json = []
-        for var in JSON_FORMAT_VARIABLES:
-            if var in hostvars[host]:
-                json_var = self.template_var(hostvars, host, var)
-                try:
-                    json.loads(json_var)
-                except ValueError as json_err:
-                    found_invalid_json.append([var, json_var, json_err])
-                except BaseException:
-                    pass
-
-        if found_invalid_json:
-            msg = "Found invalid json format variables:\n"
-            for item in found_invalid_json:
-                msg += "    {} specified in {} is invalid json format\n    {}".format(item[1], item[0], item[2])
-            raise errors.AnsibleModuleError(msg)
-        return None
-
-    def check_for_oreg_password(self, hostvars, host, odt):
-        """Ensure oreg_password is defined when using registry.redhat.io"""
-        reg_to_check = 'registry.redhat.io'
-        err_msg = ("oreg_auth_user and oreg_auth_password must be provided when "
-                   "deploying openshift-enterprise")
-        err_msg2 = ("oreg_auth_user and oreg_auth_password must be provided when using "
-                    "{}".format(reg_to_check))
-
-        oreg_password = self.template_var(hostvars, host, 'oreg_auth_password')
-        if oreg_password is not None:
-            # A password is defined, so we're good to go.
-            return None
-
-        oreg_url = self.template_var(hostvars, host, 'oreg_url')
-
-        if oreg_url is not None:
-            if reg_to_check in oreg_url:
-                raise errors.AnsibleModuleError(err_msg2)
-
-        elif odt == 'openshift-enterprise':
-            # We're not using an oreg_url, we're using default enterprise
-            # registry.  We require oreg_auth_user and oreg_auth_password
-            raise errors.AnsibleModuleError(err_msg)
-
-    def run_checks(self, hostvars, host):
-        """Execute the hostvars validations against host"""
-        distro = self.template_var(hostvars, host, 'ansible_distribution')
-        odt = self.check_openshift_deployment_type(hostvars, host)
-        self.check_whitelisted_registries(hostvars, host)
-        self.check_python_version(hostvars, host, distro)
-        self.check_image_tag_format(hostvars, host, odt)
-        self.check_pkg_version_format(hostvars, host)
-        self.check_release_format(hostvars, host)
-        self.network_plugin_check(hostvars, host)
-        self.check_hostname_vars(hostvars, host)
-        self.check_session_auth_secrets(hostvars, host)
-        self.check_unsupported_nfs_configs(hostvars, host)
-        self.check_htpasswd_provider(hostvars, host)
-        check_for_removed_vars(hostvars, host)
-        self.validate_json_format_vars(hostvars, host)
-        self.check_for_oreg_password(hostvars, host, odt)
-
-    def run(self, tmp=None, task_vars=None):
-        result = super(ActionModule, self).run(tmp, task_vars)
-
-        # self.task_vars holds all in-scope variables.
-        # Ignore settting self.task_vars outside of init.
-        # pylint: disable=W0201
-        self.task_vars = task_vars or {}
-
-        # pylint: disable=W0201
-        self.last_checked_host = "none"
-        # pylint: disable=W0201
-        self.last_checked_var = "none"
-
-        # self._task.args holds task parameters.
-        # check_hosts is a parameter to this plugin, and should provide
-        # a list of hosts.
-        check_hosts = self._task.args.get('check_hosts')
-        if not check_hosts:
-            msg = "check_hosts is required"
-            raise errors.AnsibleModuleError(msg)
-
-        # We need to access each host's variables
-        hostvars = self.task_vars.get('hostvars')
-        if not hostvars:
-            msg = hostvars
-            raise errors.AnsibleModuleError(msg)
-
-        # We loop through each host in the provided list check_hosts
-        for host in check_hosts:
-            try:
-                self.run_checks(hostvars, host)
-            except Exception as uncaught_e:
-                msg = "last_checked_host: {}, last_checked_var: {};"
-                msg = msg.format(self.last_checked_host, self.last_checked_var)
-                msg += str(uncaught_e)
-                raise errors.AnsibleModuleError(msg)
-
-        result["changed"] = False
-        result["failed"] = False
-        result["msg"] = "Sanity Checks passed"
-
-        return result
-
-
-def is_registry_match(item, pattern):
-    """returns True if the registry matches the given whitelist pattern
-
-    Unlike in OpenShift, the comparison is done solely on hostname part
-    (excluding the port part) since the latter is much more difficult due to
-    vague definition of port defaulting based on insecure flag. Moreover, most
-    of the registries will be listed without the port and insecure flag.
-    """
-    item = "schema://" + item.split('://', 1)[-1]
-    pat = pattern.rsplit(':', 1)[0]
-    name = urlparse(item).hostname
-    return fnmatch.fnmatch(name, pat)

+ 0 - 66
roles/lib_utils/filter_plugins/oo_cert_expiry.py

@@ -1,66 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-"""
-Custom filters for use in openshift-ansible
-"""
-
-
-# Disabling too-many-public-methods, since filter methods are necessarily
-# public
-# pylint: disable=too-many-public-methods
-class FilterModule(object):
-    """ Custom ansible filters """
-
-    @staticmethod
-    def oo_cert_expiry_results_to_json(hostvars, play_hosts):
-        """Takes results (`hostvars`) from the openshift_cert_expiry role
-check and serializes them into proper machine-readable JSON
-output. This filter parameter **MUST** be the playbook `hostvars`
-variable. The `play_hosts` parameter is so we know what to loop over
-when we're extrating the values.
-
-Returns:
-
-Results are collected into two top-level keys under the `json_results`
-dict:
-
-* `json_results.data` [dict] - Each individual host check result, keys are hostnames
-* `json_results.summary` [dict] - Summary of number of `warning` and `expired`
-certificates
-
-Example playbook usage:
-
-  - name: Generate expiration results JSON
-    run_once: yes
-    delegate_to: localhost
-    when: openshift_certificate_expiry_save_json_results|bool
-    copy:
-      content: "{{ hostvars|oo_cert_expiry_results_to_json() }}"
-      dest: "{{ openshift_certificate_expiry_json_results_path }}"
-
-        """
-        json_result = {
-            'data': {},
-            'summary': {},
-        }
-
-        for host in play_hosts:
-            json_result['data'][host] = hostvars[host]['check_results']['check_results']
-
-        total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts])
-        total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts])
-        total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts])
-        total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts])
-
-        json_result['summary']['warning'] = total_warnings
-        json_result['summary']['expired'] = total_expired
-        json_result['summary']['ok'] = total_ok
-        json_result['summary']['total'] = total_total
-
-        return json_result
-
-    def filters(self):
-        """ returns a mapping of filters to methods """
-        return {
-            "oo_cert_expiry_results_to_json": self.oo_cert_expiry_results_to_json,
-        }

+ 1 - 631
roles/lib_utils/filter_plugins/oo_filters.py

@@ -5,155 +5,16 @@
 Custom filters for use in openshift-ansible
 Custom filters for use in openshift-ansible
 """
 """
 import ast
 import ast
-import json
-import os
-import pdb
-import random
 
 
-from base64 import b64encode
 from collections import Mapping
 from collections import Mapping
-# pylint no-name-in-module and import-error disabled here because pylint
-# fails to properly detect the packages when installed in a virtualenv
-from distutils.util import strtobool  # pylint:disable=no-name-in-module,import-error
-from operator import itemgetter
-
-import yaml
-
 from ansible import errors
 from ansible import errors
-from ansible.parsing.yaml.dumper import AnsibleDumper
 
 
 # pylint: disable=import-error,no-name-in-module
 # pylint: disable=import-error,no-name-in-module
-from ansible.module_utils.six import iteritems, string_types, u
-# pylint: disable=import-error,no-name-in-module
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-
-HAS_OPENSSL = False
-try:
-    import OpenSSL.crypto
-    HAS_OPENSSL = True
-except ImportError:
-    pass
+from ansible.module_utils.six import string_types
 
 
 
 
 # pylint: disable=C0103
 # pylint: disable=C0103
 
 
-def lib_utils_oo_pdb(arg):
-    """ This pops you into a pdb instance where arg is the data passed in
-        from the filter.
-        Ex: "{{ hostvars | lib_utils_oo_pdb }}"
-    """
-    pdb.set_trace()
-    return arg
-
-
-def get_attr(data, attribute=None):
-    """ This looks up dictionary attributes of the form a.b.c and returns
-        the value.
-
-        If the key isn't present, None is returned.
-        Ex: data = {'a': {'b': {'c': 5}}}
-            attribute = "a.b.c"
-            returns 5
-    """
-    if not attribute:
-        raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
-    ptr = data
-    for attr in attribute.split('.'):
-        if attr in ptr:
-            ptr = ptr[attr]
-        else:
-            ptr = None
-            break
-
-    return ptr
-
-
-def oo_flatten(data):
-    """ This filter plugin will flatten a list of lists
-    """
-    if not isinstance(data, list):
-        raise errors.AnsibleFilterError("|failed expects to flatten a List")
-
-    return [item for sublist in data for item in sublist]
-
-
-def lib_utils_oo_collect(data_list, attribute=None, filters=None):
-    """ This takes a list of dict and collects all attributes specified into a
-        list. If filter is specified then we will include all items that
-        match _ALL_ of filters.  If a dict entry is missing the key in a
-        filter it will be excluded from the match.
-        Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
-                          {'a':2, 'z': 'z'},        # True, return
-                          {'a':3, 'z': 'z'},        # True, return
-                          {'a':4, 'z': 'b'},        # FAILED, obj['z'] != obj['z']
-                        ]
-            attribute = 'a'
-            filters   = {'z': 'z'}
-            returns [1, 2, 3]
-
-        This also deals with lists of lists with dict as elements.
-        Ex: data_list = [
-                          [ {'a':1, 'b':5, 'z': 'z'}, # True, return
-                            {'a':2, 'b':6, 'z': 'z'}  # True, return
-                          ],
-                          [ {'a':3, 'z': 'z'},        # True, return
-                            {'a':4, 'z': 'b'}         # FAILED, obj['z'] != obj['z']
-                          ],
-                          {'a':5, 'z': 'z'},          # True, return
-                        ]
-            attribute = 'a'
-            filters   = {'z': 'z'}
-            returns [1, 2, 3, 5]
-    """
-    if not isinstance(data_list, list):
-        raise errors.AnsibleFilterError("lib_utils_oo_collect expects to filter on a List")
-
-    if not attribute:
-        raise errors.AnsibleFilterError("lib_utils_oo_collect expects attribute to be set")
-
-    data = []
-    retval = []
-
-    for item in data_list:
-        if isinstance(item, list):
-            retval.extend(lib_utils_oo_collect(item, attribute, filters))
-        else:
-            data.append(item)
-
-    if filters is not None:
-        if not isinstance(filters, dict):
-            raise errors.AnsibleFilterError(
-                "lib_utils_oo_collect expects filter to be a dict")
-        retval.extend([get_attr(d, attribute) for d in data if (
-            all([get_attr(d, key) == filters[key] for key in filters]))])
-    else:
-        retval.extend([get_attr(d, attribute) for d in data])
-
-    retval = [val for val in retval if val is not None]
-
-    return retval
-
-
-def lib_utils_oo_select_keys_from_list(data, keys):
-    """ This returns a list, which contains the value portions for the keys
-        Ex: data = { 'a':1, 'b':2, 'c':3 }
-            keys = ['a', 'c']
-            returns [1, 3]
-    """
-
-    if not isinstance(data, list):
-        raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects to filter on a list")
-
-    if not isinstance(keys, list):
-        raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects first param is a list")
-
-    # Gather up the values for the list of keys passed in
-    retval = [lib_utils_oo_select_keys(item, keys) for item in data]
-
-    return oo_flatten(retval)
-
-
 def lib_utils_oo_select_keys(data, keys):
 def lib_utils_oo_select_keys(data, keys):
     """ This returns a list, which contains the value portions for the keys
     """ This returns a list, which contains the value portions for the keys
         Ex: data = { 'a':1, 'b':2, 'c':3 }
         Ex: data = { 'a':1, 'b':2, 'c':3 }
@@ -173,72 +34,6 @@ def lib_utils_oo_select_keys(data, keys):
     return retval
     return retval
 
 
 
 
-def lib_utils_oo_prepend_strings_in_list(data, prepend):
-    """ This takes a list of strings and prepends a string to each item in the
-        list
-        Ex: data = ['cart', 'tree']
-            prepend = 'apple-'
-            returns ['apple-cart', 'apple-tree']
-    """
-    if not isinstance(data, list):
-        raise errors.AnsibleFilterError("|failed expects first param is a list")
-    if not all(isinstance(x, string_types) for x in data):
-        raise errors.AnsibleFilterError("|failed expects first param is a list"
-                                        " of strings")
-    retval = [prepend + s for s in data]
-    return retval
-
-
-def lib_utils_oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
-    """Take a dict and arrange them as a list of dicts
-
-       Input data:
-       {'region': 'infra', 'test_k': 'test_v'}
-
-       Return data:
-       [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
-
-       Written for use of the oc_label module
-    """
-    if not isinstance(data, dict):
-        # pylint: disable=line-too-long
-        raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
-
-    rval = []
-    for label in data.items():
-        rval.append({key_title: label[0], value_title: label[1]})
-
-    return rval
-
-
-def oo_ami_selector(data, image_name):
-    """ This takes a list of amis and an image name and attempts to return
-        the latest ami.
-    """
-    if not isinstance(data, list):
-        raise errors.AnsibleFilterError("|failed expects first param is a list")
-
-    if not data:
-        return None
-    else:
-        if image_name is None or not image_name.endswith('_*'):
-            ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
-            return ami['ami_id']
-        else:
-            ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
-            ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
-            return ami['ami_id']
-
-
-def lib_utils_oo_split(string, separator=','):
-    """ This splits the input string into a list. If the input string is
-    already a list we will return it as is.
-    """
-    if isinstance(string, list):
-        return string
-    return string.split(separator)
-
-
 def lib_utils_oo_dict_to_keqv_list(data):
 def lib_utils_oo_dict_to_keqv_list(data):
     """Take a dict and return a list of k=v pairs
     """Take a dict and return a list of k=v pairs
 
 
@@ -260,173 +55,6 @@ def lib_utils_oo_dict_to_keqv_list(data):
     return ['='.join(str(e) for e in x) for x in data.items()]
     return ['='.join(str(e) for e in x) for x in data.items()]
 
 
 
 
-def lib_utils_oo_list_to_dict(lst, separator='='):
-    """ This converts a list of ["k=v"] to a dictionary {k: v}.
-    """
-    kvs = [i.split(separator) for i in lst]
-    return {k: v for k, v in kvs}
-
-
-def haproxy_backend_masters(hosts, port):
-    """ This takes an array of dicts and returns an array of dicts
-        to be used as a backend for the haproxy role
-    """
-    servers = []
-    for idx, host_info in enumerate(hosts):
-        server = dict(name="master%s" % idx)
-        server_ip = host_info['openshift']['common']['ip']
-        server['address'] = "%s:%s" % (server_ip, port)
-        server['opts'] = 'check'
-        servers.append(server)
-    return servers
-
-
-# pylint: disable=too-many-branches, too-many-nested-blocks
-def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
-    """ Parses names from list of certificate hashes.
-
-        Ex: certificates = [{ "certfile": "/root/custom1.crt",
-                              "keyfile": "/root/custom1.key",
-                               "cafile": "/root/custom-ca1.crt" },
-                            { "certfile": "custom2.crt",
-                              "keyfile": "custom2.key",
-                              "cafile": "custom-ca2.crt" }]
-
-            returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
-                       "keyfile": "/etc/origin/master/named_certificates/custom1.key",
-                       "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
-                       "names": [ "public-master-host.com",
-                                  "other-master-host.com" ] },
-                     { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
-                       "keyfile": "/etc/origin/master/named_certificates/custom2.key",
-                       "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
-                       "names": [ "some-hostname.com" ] }]
-    """
-    if not isinstance(named_certs_dir, string_types):
-        raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
-
-    if not isinstance(internal_hostnames, list):
-        raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
-
-    if not HAS_OPENSSL:
-        raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
-
-    for certificate in certificates:
-        if 'names' in certificate.keys():
-            continue
-        else:
-            certificate['names'] = []
-
-        if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
-            raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
-                                            (certificate['certfile'], certificate['keyfile']))
-
-        try:
-            st_cert = open(certificate['certfile'], 'rt').read()
-            cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
-            certificate['names'].append(str(cert.get_subject().commonName.decode()))
-            for i in range(cert.get_extension_count()):
-                if cert.get_extension(i).get_short_name() == 'subjectAltName':
-                    for name in str(cert.get_extension(i)).split(', '):
-                        if 'DNS:' in name:
-                            certificate['names'].append(name.replace('DNS:', ''))
-        except Exception:
-            raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
-                                             "please specify certificate names in host inventory"))
-
-        certificate['names'] = list(set(certificate['names']))
-        if 'cafile' not in certificate:
-            certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
-            if not certificate['names']:
-                raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
-                                                 "detected a collision with internal hostname, please specify " +
-                                                 "certificate names in host inventory"))
-
-    for certificate in certificates:
-        # Update paths for configuration
-        certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
-        certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
-        if 'cafile' in certificate:
-            certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
-    return certificates
-
-
-def lib_utils_oo_parse_certificate_san(certificate):
-    """ Parses SubjectAlternativeNames from a PEM certificate.
-
-        Ex: certificate = '''-----BEGIN CERTIFICATE-----
-                MIIEcjCCAlqgAwIBAgIBAzANBgkqhkiG9w0BAQsFADAhMR8wHQYDVQQDDBZldGNk
-                LXNpZ25lckAxNTE2ODIwNTg1MB4XDTE4MDEyNDE5MDMzM1oXDTIzMDEyMzE5MDMz
-                M1owHzEdMBsGA1UEAwwUbWFzdGVyMS5hYnV0Y2hlci5jb20wggEiMA0GCSqGSIb3
-                DQEBAQUAA4IBDwAwggEKAoIBAQD4wBdWXNI3TF1M0b0bEIGyJPvdqKeGwF5XlxWg
-                NoA1Ain/Xz0N1SW5pXW2CDo9HX+ay8DyhzR532yrBa+RO3ivNCmfnexTQinfSLWG
-                mBEdiu7HO3puR/GNm74JNyXoEKlMAIRiTGq9HPoTo7tNV5MLodgYirpHrkSutOww
-                DfFSrNjH/ehqxwQtrIOnTAHigdTOrKVdoYxqXblDEMONTPLI5LMvm4/BqnAVaOyb
-                9RUzND6lxU/ei3FbUS5IoeASOHx0l1ifxae3OeSNAimm/RIRo9rieFNUFh45TzID
-                elsdGrLB75LH/gnRVV1xxVbwPN6xW1mEwOceRMuhIArJQ2G5AgMBAAGjgbYwgbMw
-                UQYDVR0jBEowSIAUXTqN88vCI6E7wONls3QJ4/63unOhJaQjMCExHzAdBgNVBAMM
-                FmV0Y2Qtc2lnbmVyQDE1MTY4MjA1ODWCCQDMaopfom6OljAMBgNVHRMBAf8EAjAA
-                MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDAdBgNVHQ4EFgQU7l05
-                OYeY3HppL6/0VJSirudj8t0wDwYDVR0RBAgwBocEwKh6ujANBgkqhkiG9w0BAQsF
-                AAOCAgEAFU8sicE5EeQsUPnFEqDvoJd1cVE+8aCBqkW0++4GsVw2A/JOJ3OBJL6r
-                BV3b1u8/e8xBNi8hPi42Q+LWBITZZ/COFyhwEAK94hcr7eZLCV2xfUdMJziP4Qkh
-                /WRN7vXHTtJ6NP/d6A22SPbtnMSt9Y6G8y9qa5HBrqIqmkYbLzDw/SdZbDbuGhRk
-                xUwg2ahXNblVoE5P6rxPONgXliA94telZ1/61iyrVaiGQb1/GUP/DRfvvR4dOCrA
-                lMosW6fm37Wdi/8iYW+aDPWGS+yVK/sjSnHNjxqvrzkfGk+COa5riT9hJ7wZY0Hb
-                YiJS74SZgZt/nnr5PI2zFRUiZLECqCkZnC/sz29i+irLabnq7Cif9Mv+TUcXWvry
-                TdJuaaYdTSMRSUkDd/c9Ife8tOr1i1xhFzDNKNkZjTVRk1MBquSXndVCDKucdfGi
-                YoWm+NDFrayw8yxK/KTHo3Db3lu1eIXTHxriodFx898b//hysHr4hs4/tsEFUTZi
-                705L2ScIFLfnyaPby5GK/3sBIXtuhOFM3QV3JoYKlJB5T6wJioVoUmSLc+UxZMeE
-                t9gGVQbVxtLvNHUdW7uKQ5pd76nIJqApQf8wg2Pja8oo56fRZX2XLt8nm9cswcC4
-                Y1mDMvtfxglQATwMTuoKGdREuu1mbdb8QqdyQmZuMa72q+ax2kQ=
-                -----END CERTIFICATE-----'''
-
-            returns ['192.168.122.186']
-    """
-
-    if not HAS_OPENSSL:
-        raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
-
-    names = []
-
-    try:
-        lcert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
-        for i in range(lcert.get_extension_count()):
-            if lcert.get_extension(i).get_short_name() == 'subjectAltName':
-                sanstr = str(lcert.get_extension(i))
-                sanstr = sanstr.replace('DNS:', '')
-                sanstr = sanstr.replace('IP Address:', '')
-                names = sanstr.split(', ')
-    except Exception:
-        raise errors.AnsibleFilterError("|failed to parse certificate")
-
-    return names
-
-
-def lib_utils_oo_generate_secret(num_bytes):
-    """ generate a session secret """
-
-    if not isinstance(num_bytes, int):
-        raise errors.AnsibleFilterError("|failed expects num_bytes is int")
-
-    return b64encode(os.urandom(num_bytes)).decode('utf-8')
-
-
-def lib_utils_to_padded_yaml(data, level=0, indent=2, **kw):
-    """ returns a yaml snippet padded to match the indent level you specify """
-    if data in [None, ""]:
-        return ""
-
-    try:
-        transformed = u(yaml.dump(data, indent=indent, allow_unicode=True,
-                                  default_flow_style=False,
-                                  Dumper=AnsibleDumper, **kw))
-        padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
-        return "\n{0}".format(padded)
-    except Exception as my_e:
-        raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
-
-
 def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False):
 def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False):
     """ Convert an image tag string to an RPM version if necessary
     """ Convert an image tag string to an RPM version if necessary
         Empty strings and strings that are already in rpm version format
         Empty strings and strings that are already in rpm version format
@@ -448,240 +76,6 @@ def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False):
     return version
     return version
 
 
 
 
-def lib_utils_oo_hostname_from_url(url):
-    """ Returns the hostname contained in a URL
-
-        Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
-    """
-    if not isinstance(url, string_types):
-        raise errors.AnsibleFilterError("|failed expects a string or unicode")
-    parse_result = urlparse(url)
-    if parse_result.netloc != '':
-        return parse_result.netloc
-    else:
-        # netloc wasn't parsed, assume url was missing scheme and path
-        return parse_result.path
-
-
-# pylint: disable=invalid-name, unused-argument
-def lib_utils_oo_loadbalancer_frontends(
-        api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
-    """TODO: Document me."""
-    loadbalancer_frontends = [{'name': 'atomic-openshift-api',
-                               'mode': 'tcp',
-                               'options': ['tcplog'],
-                               'binds': ["*:{0}".format(api_port)],
-                               'default_backend': 'atomic-openshift-api'}]
-    if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
-        loadbalancer_frontends.append({'name': 'nuage-monitor',
-                                       'mode': 'tcp',
-                                       'options': ['tcplog'],
-                                       'binds': ["*:{0}".format(nuage_rest_port)],
-                                       'default_backend': 'nuage-monitor'})
-    return loadbalancer_frontends
-
-
-# pylint: disable=invalid-name
-def lib_utils_oo_loadbalancer_backends(
-        api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
-    """TODO: Document me."""
-    loadbalancer_backends = [{'name': 'atomic-openshift-api',
-                              'mode': 'tcp',
-                              'option': 'tcplog',
-                              'balance': 'source',
-                              'servers': haproxy_backend_masters(servers_hostvars, api_port)}]
-    if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
-        # pylint: disable=line-too-long
-        loadbalancer_backends.append({'name': 'nuage-monitor',
-                                      'mode': 'tcp',
-                                      'option': 'tcplog',
-                                      'balance': 'source',
-                                      'servers': haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
-    return loadbalancer_backends
-
-
-def lib_utils_oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
-    """Generates a random string of given length from a set of alphanumeric characters.
-       The default source uses [a-z][A-Z][0-9]
-       Ex:
-       - lib_utils_oo_random_word(3)                => aB9
-       - lib_utils_oo_random_word(4, source='012')  => 0123
-    """
-    return ''.join(random.choice(source) for i in range(length))
-
-
-def lib_utils_oo_selector_to_string_list(user_dict):
-    """Convert a dict of selectors to a key=value list of strings
-
-Given input of {'region': 'infra', 'zone': 'primary'} returns a list
-of items as ['node-role.kubernetes.io/infra=true', 'zone=primary']
-    """
-    selectors = []
-    for key in user_dict:
-        selectors.append("{}={}".format(key, user_dict[key]))
-    return selectors
-
-
-def lib_utils_oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
-    """Parse the Service Account Secrets list, `sa_secrets`, (as from
-oc_serviceaccount_secret:state=list) and return the name of the secret
-containing the `secret_hint` string. For example, by default this will
-return the name of the secret holding the SA bearer token.
-
-Only provide the 'results' object to this filter. This filter expects
-to receive a list like this:
-
-    [
-        {
-            "name": "management-admin-dockercfg-p31s2"
-        },
-        {
-            "name": "management-admin-token-bnqsh"
-        }
-    ]
-
-
-Returns:
-
-* `secret_name` [string] - The name of the secret matching the
-  `secret_hint` parameter. By default this is the secret holding the
-  SA's bearer token.
-
-Example playbook usage:
-
-Register a return value from oc_serviceaccount_secret with and pass
-that result to this filter plugin.
-
-    - name: Get all SA Secrets
-      oc_serviceaccount_secret:
-        state: list
-        service_account: management-admin
-        namespace: management-infra
-      register: sa
-
-    - name: Save the SA bearer token secret name
-      set_fact:
-        management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}"
-
-    - name: Get the SA bearer token value
-      oc_secret:
-        state: list
-        name: "{{ management_token }}"
-        namespace: management-infra
-        decode: true
-      register: sa_secret
-
-    - name: Print the bearer token value
-      debug:
-        var: sa_secret.results.decoded.token
-
-    """
-    secret_name = None
-
-    for secret in sa_secrets:
-        # each secret is a hash
-        if secret['name'].find(secret_hint) == -1:
-            continue
-        else:
-            secret_name = secret['name']
-            break
-
-    return secret_name
-
-
-def lib_utils_oo_l_of_d_to_csv(input_list):
-    """Map a list of dictionaries, input_list, into a csv string
-    of json values.
-
-    Example input:
-    [{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}]
-    Example output:
-    u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}'
-    """
-    return ','.join(json.dumps(x) for x in input_list)
-
-
-def map_from_pairs(source, delim="="):
-    ''' Returns a dict given the source and delim delimited '''
-    if source == '':
-        return dict()
-
-    return dict(item.split(delim) for item in source.split(","))
-
-
-def map_to_pairs(source, delim="="):
-    ''' Returns a comma separated str given the source as a dict '''
-
-    # Some default selectors are empty strings.
-    if source == {} or source == '':
-        return str()
-
-    return ','.join(["{}{}{}".format(key, delim, value) for key, value in iteritems(source)])
-
-
-def lib_utils_oo_etcd_host_urls(hosts, use_ssl=True, port='2379'):
-    '''Return a list of urls for etcd hosts'''
-    urls = []
-    port = str(port)
-    proto = "https://" if use_ssl else "http://"
-    for host in hosts:
-        url_string = "{}{}:{}".format(proto, host, port)
-        urls.append(url_string)
-    return urls
-
-
-def lib_utils_mutate_htpass_provider(idps):
-    '''Updates identityProviders list to mutate filename of htpasswd auth
-    to hardcode filename = /etc/origin/master/htpasswd'''
-    old_keys = ('filename', 'fileName', 'file_name')
-    for idp in idps:
-        if 'provider' in idp:
-            idp_p = idp['provider']
-            if idp_p['kind'] == 'HTPasswdPasswordIdentityProvider':
-                for old_key in old_keys:
-                    if old_key in idp_p:
-                        idp_p.pop(old_key)
-                idp_p['file'] = '/etc/origin/master/htpasswd'
-    return idps
-
-
-def lib_utils_oo_oreg_image(image_default, oreg_url):
-    '''Converts default image string to utilize oreg_url, if defined.
-       oreg_url should be passed in as string "None" if undefined.
-
-       Example input:  "quay.io/coreos/etcd:v99",
-                       "example.com/openshift/origin-${component}:${version}"
-       Example output: "example.com/coreos/etcd:v99"'''
-    # if no oreg_url is specified, we just return the original default
-    if oreg_url == 'None':
-        return image_default
-    oreg_parts = oreg_url.rsplit('/', 2)
-    if len(oreg_parts) < 2:
-        raise errors.AnsibleFilterError("oreg_url malformed: {}".format(oreg_url))
-    if not (len(oreg_parts) >= 3 and '.' in oreg_parts[0]):
-        # oreg_url does not include host information; we'll just return etcd default
-        return image_default
-
-    image_parts = image_default.split('/')
-    if len(image_parts) < 3:
-        raise errors.AnsibleFilterError("default image dictionary malformed, do not adjust this value.")
-    return '/'.join([oreg_parts[0], image_parts[1], image_parts[2]])
-
-
-def lib_utils_oo_list_of_dict_to_dict_from_key(input_list, keyname):
-    '''Converts a list of dictionaries to a dictionary with keyname: dictionary
-
-       Example input: [{'name': 'first', 'url': 'x.com'}, {'name': 'second', 'url': 'y.com'}],
-                      'name'
-       Example output: {'first': {'url': 'x.com', 'name': 'first'}, 'second': {'url': 'y.com', 'name': 'second'}}'''
-    output_dict = {}
-    for item in input_list:
-        retrieved_val = item.get(keyname)
-        if keyname is not None:
-            output_dict[retrieved_val] = item
-    return output_dict
-
-
 class FilterModule(object):
 class FilterModule(object):
     """ Custom ansible filter mapping """
     """ Custom ansible filter mapping """
 
 
@@ -690,30 +84,6 @@ class FilterModule(object):
         """ returns a mapping of filters to methods """
         """ returns a mapping of filters to methods """
         return {
         return {
             "lib_utils_oo_select_keys": lib_utils_oo_select_keys,
             "lib_utils_oo_select_keys": lib_utils_oo_select_keys,
-            "lib_utils_oo_select_keys_from_list": lib_utils_oo_select_keys_from_list,
-            "lib_utils_oo_collect": lib_utils_oo_collect,
-            "lib_utils_oo_pdb": lib_utils_oo_pdb,
-            "lib_utils_oo_prepend_strings_in_list": lib_utils_oo_prepend_strings_in_list,
-            "lib_utils_oo_dict_to_list_of_dict": lib_utils_oo_dict_to_list_of_dict,
-            "lib_utils_oo_split": lib_utils_oo_split,
             "lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list,
             "lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list,
-            "lib_utils_oo_list_to_dict": lib_utils_oo_list_to_dict,
-            "lib_utils_oo_parse_named_certificates": lib_utils_oo_parse_named_certificates,
-            "lib_utils_oo_parse_certificate_san": lib_utils_oo_parse_certificate_san,
-            "lib_utils_oo_generate_secret": lib_utils_oo_generate_secret,
             "lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version,
             "lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version,
-            "lib_utils_oo_hostname_from_url": lib_utils_oo_hostname_from_url,
-            "lib_utils_oo_loadbalancer_frontends": lib_utils_oo_loadbalancer_frontends,
-            "lib_utils_oo_loadbalancer_backends": lib_utils_oo_loadbalancer_backends,
-            "lib_utils_to_padded_yaml": lib_utils_to_padded_yaml,
-            "lib_utils_oo_random_word": lib_utils_oo_random_word,
-            "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
-            "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
-            "lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
-            "map_from_pairs": map_from_pairs,
-            "map_to_pairs": map_to_pairs,
-            "lib_utils_oo_etcd_host_urls": lib_utils_oo_etcd_host_urls,
-            "lib_utils_mutate_htpass_provider": lib_utils_mutate_htpass_provider,
-            "lib_utils_oo_oreg_image": lib_utils_oo_oreg_image,
-            "lib_utils_oo_list_of_dict_to_dict_from_key": lib_utils_oo_list_of_dict_to_dict_from_key,
         }
         }

+ 0 - 107
roles/lib_utils/filter_plugins/openshift_aws_filters.py

@@ -1,107 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use in openshift_aws
-'''
-
-from ansible import errors
-
-
-class FilterModule(object):
-    ''' Custom ansible filters for use by openshift_aws role'''
-
-    @staticmethod
-    def subnet_count_list(size, subnets):
-        """This function will modify create a list of subnets."""
-        items = {}
-        count = 0
-        for _ in range(0, int(size)):
-            if subnets[count]['subnets'][0]['subnet_id'] in items:
-                items[subnets[count]['subnets'][0]['subnet_id']] = \
-                    items[subnets[count]['subnets'][0]['subnet_id']] + 1
-            else:
-                items[subnets[count]['subnets'][0]['subnet_id']] = 1
-            if count < (len(subnets) - 1):
-                count = count + 1
-            else:
-                count = 0
-        return items
-
-    @staticmethod
-    def ec2_to_asg_tag(ec2_tag_info):
-        ''' This function will modify ec2 tag list to an asg dictionary.'''
-        tags = []
-        for tag in ec2_tag_info:
-            for key in tag:
-                if 'deployment_serial' in key:
-                    l_dict = {'tags': []}
-                    l_dict['tags'].append({'key': 'deployment_serial',
-                                           'value': tag[key]})
-                    tags.append(l_dict.copy())
-
-        return tags
-
-    @staticmethod
-    def scale_groups_serial(scale_group_info, upgrade=False):
-        ''' This function will determine what the deployment serial should be and return it
-
-          Search through the tags and find the deployment_serial tag. Once found,
-          determine if an increment is needed during an upgrade.
-          if upgrade is true then increment the serial and return it
-          else return the serial
-        '''
-        if scale_group_info == []:
-            return 1
-
-        scale_group_info = scale_group_info[0]
-
-        if not isinstance(scale_group_info, dict):
-            raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
-
-        serial = None
-
-        for tag in scale_group_info['tags']:
-            if tag['key'] == 'deployment_serial':
-                serial = int(tag['value'])
-                if upgrade:
-                    serial += 1
-                break
-        else:
-            raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
-
-        return serial
-
-    @staticmethod
-    def scale_groups_match_capacity(scale_group_info):
-        ''' This function will verify that the scale group instance count matches
-            the scale group desired capacity
-
-        '''
-        for scale_group in scale_group_info:
-            if scale_group['desired_capacity'] != len(scale_group['instances']):
-                return False
-
-        return True
-
-    @staticmethod
-    def build_instance_tags(clusterid):
-        ''' This function will return a dictionary of the instance tags.
-
-            The main desire to have this inside of a filter_plugin is that we
-            need to build the following key.
-
-            {"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
-
-        '''
-        tags = {'clusterid': clusterid,
-                'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
-
-        return tags
-
-    def filters(self):
-        ''' returns a mapping of filters to methods '''
-        return {'build_instance_tags': self.build_instance_tags,
-                'scale_groups_match_capacity': self.scale_groups_match_capacity,
-                'scale_groups_serial': self.scale_groups_serial,
-                'ec2_to_asg_tag': self.ec2_to_asg_tag,
-                'subnet_count_list': self.subnet_count_list}

+ 0 - 42
roles/lib_utils/filter_plugins/openshift_hosted_filters.py

@@ -1,42 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use in openshift_hosted
-'''
-
-
-class FilterModule(object):
-    ''' Custom ansible filters for use by openshift_hosted role'''
-
-    @staticmethod
-    def get_router_replicas(replicas=None, router_nodes=None):
-        ''' This function will return the number of replicas
-            based on the results from the defined
-            openshift_hosted_router_replicas OR
-            the query from oc_obj on openshift nodes with a selector OR
-            default to 1
-
-        '''
-        # We always use what they've specified if they've specified a value
-        if replicas is not None:
-            return replicas
-
-        replicas = 1
-
-        # Ignore boolean expression limit of 5.
-        # pylint: disable=too-many-boolean-expressions
-        if (isinstance(router_nodes, dict) and
-                'results' in router_nodes and
-                'results' in router_nodes['results'] and
-                isinstance(router_nodes['results']['results'], list) and
-                len(router_nodes['results']['results']) > 0 and
-                'items' in router_nodes['results']['results'][0]):
-
-            if len(router_nodes['results']['results'][0]['items']) > 0:
-                replicas = len(router_nodes['results']['results'][0]['items'])
-
-        return replicas
-
-    def filters(self):
-        ''' returns a mapping of filters to methods '''
-        return {'get_router_replicas': self.get_router_replicas}

+ 0 - 520
roles/lib_utils/filter_plugins/openshift_master.py

@@ -1,520 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-'''
-Custom filters for use in openshift-master
-'''
-import copy
-import sys
-
-from ansible import errors
-from ansible.parsing.yaml.dumper import AnsibleDumper
-from ansible.plugins.filter.core import to_bool as ansible_bool
-
-from ansible.module_utils.six import string_types, u
-
-import yaml
-
-
-class IdentityProviderBase(object):
-    """ IdentityProviderBase
-
-        Attributes:
-            name (str): Identity provider Name
-            login (bool): Is this identity provider a login provider?
-            challenge (bool): Is this identity provider a challenge provider?
-            provider (dict): Provider specific config
-            _idp (dict): internal copy of the IDP dict passed in
-            _required (list): List of lists of strings for required attributes
-            _optional (list): List of lists of strings for optional attributes
-            _allow_additional (bool): Does this provider support attributes
-                not in _required and _optional
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    # disabling this check since the number of instance attributes are
-    # necessary for this class
-    # pylint: disable=too-many-instance-attributes
-    def __init__(self, api_version, idp):
-        if api_version not in ['v1']:
-            raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version))
-
-        self._idp = copy.deepcopy(idp)
-
-        if 'name' not in self._idp:
-            raise errors.AnsibleFilterError("|failed identity provider missing a name")
-
-        if 'kind' not in self._idp:
-            raise errors.AnsibleFilterError("|failed identity provider missing a kind")
-
-        self.name = self._idp.pop('name')
-        self.login = ansible_bool(self._idp.pop('login', False))
-        self.challenge = ansible_bool(self._idp.pop('challenge', False))
-        self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))
-
-        mm_keys = ('mappingMethod', 'mapping_method')
-        mapping_method = None
-        for key in mm_keys:
-            if key in self._idp:
-                mapping_method = self._idp.pop(key)
-        if mapping_method is None:
-            mapping_method = self.get_default('mappingMethod')
-        self.mapping_method = mapping_method
-
-        valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
-        if self.mapping_method not in valid_mapping_methods:
-            raise errors.AnsibleFilterError("|failed unknown mapping method "
-                                            "for provider {0}".format(self.__class__.__name__))
-        self._required = []
-        self._optional = []
-        self._allow_additional = True
-
-    @staticmethod
-    def validate_idp_list(idp_list):
-        ''' validates a list of idps '''
-        names = [x.name for x in idp_list]
-        if len(set(names)) != len(names):
-            raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
-
-        for idp in idp_list:
-            idp.validate()
-
-    def validate(self):
-        ''' validate an instance of this idp class '''
-        pass
-
-    @staticmethod
-    def get_default(key):
-        ''' get a default value for a given key '''
-        if key == 'mappingMethod':
-            return 'claim'
-        else:
-            return None
-
-    def set_provider_item(self, items, required=False):
-        ''' set a provider item based on the list of item names provided. '''
-        for item in items:
-            provider_key = items[0]
-            if item in self._idp:
-                self.provider[provider_key] = self._idp.pop(item)
-                break
-        else:
-            default = self.get_default(provider_key)
-            if default is not None:
-                self.provider[provider_key] = default
-            elif required:
-                raise errors.AnsibleFilterError("|failed provider {0} missing "
-                                                "required key {1}".format(self.__class__.__name__, provider_key))
-
-    def set_provider_items(self):
-        ''' set the provider items for this idp '''
-        for items in self._required:
-            self.set_provider_item(items, True)
-        for items in self._optional:
-            self.set_provider_item(items)
-        if self._allow_additional:
-            for key in self._idp.keys():
-                self.set_provider_item([key])
-        else:
-            if len(self._idp) > 0:
-                raise errors.AnsibleFilterError("|failed provider {0} "
-                                                "contains unknown keys "
-                                                "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys())))
-
-    def to_dict(self):
-        ''' translate this idp to a dictionary '''
-        return dict(name=self.name, challenge=self.challenge,
-                    login=self.login, mappingMethod=self.mapping_method,
-                    provider=self.provider)
-
-
-class LDAPPasswordIdentityProvider(IdentityProviderBase):
-    """ LDAPPasswordIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)
-        self._allow_additional = False
-        self._required += [['attributes'], ['url'], ['insecure']]
-        self._optional += [['ca'],
-                           ['bindDN', 'bind_dn'],
-                           ['bindPassword', 'bind_password']]
-
-        self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))
-
-        if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:
-            pref_user = self._idp['attributes'].pop('preferred_username')
-            self._idp['attributes']['preferredUsername'] = pref_user
-
-        if not self._idp['insecure']:
-            self._idp['ca'] = '/etc/origin/master/{}_ldap_ca.crt'.format(self.name)
-
-    def validate(self):
-        ''' validate this idp instance '''
-        if not isinstance(self.provider['attributes'], dict):
-            raise errors.AnsibleFilterError("|failed attributes for provider "
-                                            "{0} must be a dictionary".format(self.__class__.__name__))
-
-        attrs = ['id', 'email', 'name', 'preferredUsername']
-        for attr in attrs:
-            if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):
-                raise errors.AnsibleFilterError("|failed {0} attribute for "
-                                                "provider {1} must be a list".format(attr, self.__class__.__name__))
-
-        unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)
-        if len(unknown_attrs) > 0:
-            raise errors.AnsibleFilterError("|failed provider {0} has unknown "
-                                            "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs)))
-
-
-class KeystonePasswordIdentityProvider(IdentityProviderBase):
-    """ KeystoneIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)
-        self._allow_additional = False
-        self._required += [['url'], ['domainName', 'domain_name']]
-        self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
-
-
-class RequestHeaderIdentityProvider(IdentityProviderBase):
-    """ RequestHeaderIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)
-        self._allow_additional = False
-        self._required += [['headers']]
-        self._optional += [['challengeURL', 'challenge_url'],
-                           ['loginURL', 'login_url'],
-                           ['clientCA', 'client_ca'],
-                           ['clientCommonNames', 'client_common_names'],
-                           ['emailHeaders', 'email_headers'],
-                           ['nameHeaders', 'name_headers'],
-                           ['preferredUsernameHeaders', 'preferred_username_headers']]
-        self._idp['clientCA'] = \
-            '/etc/origin/master/{}_request_header_ca.crt'.format(self.name)
-
-    def validate(self):
-        ''' validate this idp instance '''
-        if not isinstance(self.provider['headers'], list):
-            raise errors.AnsibleFilterError("|failed headers for provider {0} "
-                                            "must be a list".format(self.__class__.__name__))
-
-
-class AllowAllPasswordIdentityProvider(IdentityProviderBase):
-    """ AllowAllPasswordIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)
-        self._allow_additional = False
-
-
-class DenyAllPasswordIdentityProvider(IdentityProviderBase):
-    """ DenyAllPasswordIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)
-        self._allow_additional = False
-
-
-class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
-    """ HTPasswdPasswordIdentity
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        # Workaround: We used to let users specify arbitrary location of
-        # htpasswd file, but now it needs to be in specific spot.
-        idp['file'] = '/etc/origin/master/htpasswd'
-        super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)
-        self._allow_additional = False
-        self._required += [['file']]
-
-    @staticmethod
-    def get_default(key):
-        if key == 'file':
-            return '/etc/origin/htpasswd'
-        else:
-            return IdentityProviderBase.get_default(key)
-
-
-class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
-    """ BasicAuthPasswordIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)
-        self._allow_additional = False
-        self._required += [['url']]
-        self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
-
-
-class IdentityProviderOauthBase(IdentityProviderBase):
-    """ IdentityProviderOauthBase
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        super(IdentityProviderOauthBase, self).__init__(api_version, idp)
-        self._allow_additional = False
-        self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
-
-    def validate(self):
-        ''' validate an instance of this idp class '''
-        pass
-
-
-class OpenIDIdentityProvider(IdentityProviderOauthBase):
-    """ OpenIDIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        IdentityProviderOauthBase.__init__(self, api_version, idp)
-        self._required += [['claims'], ['urls']]
-        self._optional += [['ca'],
-                           ['extraScopes'],
-                           ['extraAuthorizeParameters']]
-        if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:
-            pref_user = self._idp['claims'].pop('preferred_username')
-            self._idp['claims']['preferredUsername'] = pref_user
-        if 'urls' in self._idp and 'user_info' in self._idp['urls']:
-            user_info = self._idp['urls'].pop('user_info')
-            self._idp['urls']['userInfo'] = user_info
-        if 'extra_scopes' in self._idp:
-            self._idp['extraScopes'] = self._idp.pop('extra_scopes')
-        if 'extra_authorize_parameters' in self._idp:
-            self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
-
-        self._idp['ca'] = '/etc/origin/master/{}_openid_ca.crt'.format(self.name)
-
-    def validate(self):
-        ''' validate this idp instance '''
-        if not isinstance(self.provider['claims'], dict):
-            raise errors.AnsibleFilterError("|failed claims for provider {0} "
-                                            "must be a dictionary".format(self.__class__.__name__))
-
-        for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):
-            if var in self.provider and not isinstance(self.provider[var], var_type):
-                raise errors.AnsibleFilterError("|failed {1} for provider "
-                                                "{0} must be a {2}".format(self.__class__.__name__,
-                                                                           var,
-                                                                           var_type.__class__.__name__))
-
-        required_claims = ['id']
-        optional_claims = ['email', 'name', 'preferredUsername']
-        all_claims = required_claims + optional_claims
-
-        for claim in required_claims:
-            if claim in required_claims and claim not in self.provider['claims']:
-                raise errors.AnsibleFilterError("|failed {0} claim missing "
-                                                "for provider {1}".format(claim, self.__class__.__name__))
-
-        for claim in all_claims:
-            if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):
-                raise errors.AnsibleFilterError("|failed {0} claims for "
-                                                "provider {1} must be a list".format(claim, self.__class__.__name__))
-
-        unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)
-        if len(unknown_claims) > 0:
-            raise errors.AnsibleFilterError("|failed provider {0} has unknown "
-                                            "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims)))
-
-        if not isinstance(self.provider['urls'], dict):
-            raise errors.AnsibleFilterError("|failed urls for provider {0} "
-                                            "must be a dictionary".format(self.__class__.__name__))
-
-        required_urls = ['authorize', 'token']
-        optional_urls = ['userInfo']
-        all_urls = required_urls + optional_urls
-
-        for url in required_urls:
-            if url not in self.provider['urls']:
-                raise errors.AnsibleFilterError("|failed {0} url missing for "
-                                                "provider {1}".format(url, self.__class__.__name__))
-
-        unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)
-        if len(unknown_urls) > 0:
-            raise errors.AnsibleFilterError("|failed provider {0} has unknown "
-                                            "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls)))
-
-
-class GoogleIdentityProvider(IdentityProviderOauthBase):
-    """ GoogleIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        IdentityProviderOauthBase.__init__(self, api_version, idp)
-        self._optional += [['hostedDomain', 'hosted_domain']]
-
-    def validate(self):
-        ''' validate this idp instance '''
-        if self.challenge:
-            raise errors.AnsibleFilterError("|failed provider {0} does not "
-                                            "allow challenge authentication".format(self.__class__.__name__))
-
-
-class GitHubIdentityProvider(IdentityProviderOauthBase):
-    """ GitHubIdentityProvider
-
-        Attributes:
-
-        Args:
-            api_version(str): OpenShift config version
-            idp (dict): idp config dict
-
-        Raises:
-            AnsibleFilterError:
-    """
-    def __init__(self, api_version, idp):
-        IdentityProviderOauthBase.__init__(self, api_version, idp)
-        self._optional += [['organizations'],
-                           ['teams'],
-                           ['ca'],
-                           ['hostname']]
-
-    def validate(self):
-        ''' validate this idp instance '''
-        if self.challenge:
-            raise errors.AnsibleFilterError("|failed provider {0} does not "
-                                            "allow challenge authentication".format(self.__class__.__name__))
-
-        self._idp['ca'] = '/etc/origin/master/{}_github_ca.crt'.format(self.name)
-
-
-class FilterModule(object):
-    ''' Custom ansible filters for use by the openshift_control_plane role'''
-
-    @staticmethod
-    def translate_idps(idps, api_version):
-        ''' Translates a list of dictionaries into a valid identityProviders config '''
-        idp_list = []
-
-        if not isinstance(idps, list):
-            raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers")
-        for idp in idps:
-            if not isinstance(idp, dict):
-                raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries")
-
-            cur_module = sys.modules[__name__]
-            idp_class = getattr(cur_module, idp['kind'], None)
-            idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)
-            idp_inst.set_provider_items()
-            idp_list.append(idp_inst)
-
-        IdentityProviderBase.validate_idp_list(idp_list)
-        return u(yaml.dump([idp.to_dict() for idp in idp_list],
-                           allow_unicode=True,
-                           default_flow_style=False,
-                           width=float("inf"),
-                           Dumper=AnsibleDumper))
-
-    @staticmethod
-    def oo_htpasswd_users_from_file(file_contents):
-        ''' return a dictionary of htpasswd users from htpasswd file contents '''
-        htpasswd_entries = {}
-        if not isinstance(file_contents, string_types):
-            raise errors.AnsibleFilterError("failed, expects to filter on a string")
-        for line in file_contents.splitlines():
-            user = None
-            passwd = None
-            if len(line) == 0:
-                continue
-            if ':' in line:
-                user, passwd = line.split(':', 1)
-
-            if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:
-                error_msg = "failed, expects each line to be a colon separated string representing the user and passwd"
-                raise errors.AnsibleFilterError(error_msg)
-            htpasswd_entries[user] = passwd
-        return htpasswd_entries
-
-    def filters(self):
-        ''' returns a mapping of filters to methods '''
-        return {"translate_idps": self.translate_idps,
-                "oo_htpasswd_users_from_file": self.oo_htpasswd_users_from_file}

+ 0 - 274
roles/lib_utils/library/delegated_serial_command.py

@@ -1,274 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>, and others
-# (c) 2016, Andrew Butcher <abutcher@redhat.com>
-#
-# This module is derrived from the Ansible command module.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
-
-
-# pylint: disable=unused-wildcard-import,wildcard-import,unused-import,redefined-builtin
-
-''' delegated_serial_command '''
-
-import datetime
-import errno
-import glob
-import shlex
-import os
-import fcntl
-import time
-
-DOCUMENTATION = '''
----
-module: delegated_serial_command
-short_description: Executes a command on a remote node
-version_added: historical
-description:
-     - The M(command) module takes the command name followed by a list
-       of space-delimited arguments.
-     - The given command will be executed on all selected nodes. It
-       will not be processed through the shell, so variables like
-       C($HOME) and operations like C("<"), C(">"), C("|"), and C("&")
-       will not work (use the M(shell) module if you need these
-       features).
-     - Creates and maintains a lockfile such that this module will
-       wait for other invocations to proceed.
-options:
-  command:
-    description:
-      - the command to run
-    required: true
-    default: null
-  creates:
-    description:
-      - a filename or (since 2.0) glob pattern, when it already
-        exists, this step will B(not) be run.
-    required: no
-    default: null
-  removes:
-    description:
-      - a filename or (since 2.0) glob pattern, when it does not
-        exist, this step will B(not) be run.
-    version_added: "0.8"
-    required: no
-    default: null
-  chdir:
-    description:
-      - cd into this directory before running the command
-    version_added: "0.6"
-    required: false
-    default: null
-  executable:
-    description:
-      - change the shell used to execute the command. Should be an
-        absolute path to the executable.
-    required: false
-    default: null
-    version_added: "0.9"
-  warn:
-    version_added: "1.8"
-    default: yes
-    description:
-      - if command warnings are on in ansible.cfg, do not warn about
-        this particular line if set to no/false.
-    required: false
-  lockfile:
-    default: yes
-    description:
-      - the lockfile that will be created
-  timeout:
-    default: yes
-    description:
-      - time in milliseconds to wait to obtain the lock
-notes:
-    -  If you want to run a command through the shell (say you are using C(<),
-       C(>), C(|), etc), you actually want the M(shell) module instead. The
-       M(command) module is much more secure as it's not affected by the user's
-       environment.
-    - " C(creates), C(removes), and C(chdir) can be specified after
-       the command. For instance, if you only want to run a command if
-       a certain file does not exist, use this."
-author:
-    - Ansible Core Team
-    - Michael DeHaan
-    - Andrew Butcher
-'''
-
-EXAMPLES = '''
-# Example from Ansible Playbooks.
-- delegated_serial_command:
-    command: /sbin/shutdown -t now
-
-# Run the command if the specified file does not exist.
-- delegated_serial_command:
-    command: /usr/bin/make_database.sh arg1 arg2
-    creates: /path/to/database
-'''
-
-# Dict of options and their defaults
-OPTIONS = {'chdir': None,
-           'creates': None,
-           'command': None,
-           'executable': None,
-           'NO_LOG': None,
-           'removes': None,
-           'warn': True,
-           'lockfile': None,
-           'timeout': None}
-
-
-def check_command(commandline):
-    ''' Check provided command '''
-    arguments = {'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
-                 'ln': 'state=link', 'mkdir': 'state=directory',
-                 'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch'}
-    commands = {'git': 'git', 'hg': 'hg', 'curl': 'get_url or uri', 'wget': 'get_url or uri',
-                'svn': 'subversion', 'service': 'service',
-                'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt',
-                'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
-                'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper'}
-    become = ['sudo', 'su', 'pbrun', 'pfexec', 'runas']
-    warnings = list()
-    command = os.path.basename(commandline.split()[0])
-    # pylint: disable=line-too-long
-    if command in arguments:
-        warnings.append("Consider using file module with {0} rather than running {1}".format(arguments[command], command))
-    if command in commands:
-        warnings.append("Consider using {0} module rather than running {1}".format(commands[command], command))
-    if command in become:
-        warnings.append(
-            "Consider using 'become', 'become_method', and 'become_user' rather than running {0}".format(command,))
-    return warnings
-
-
-# pylint: disable=too-many-statements,too-many-branches,too-many-locals
-def main():
-    ''' Main module function '''
-    module = AnsibleModule(  # noqa: F405
-        argument_spec=dict(
-            _uses_shell=dict(type='bool', default=False),
-            command=dict(required=True),
-            chdir=dict(),
-            executable=dict(),
-            creates=dict(),
-            removes=dict(),
-            warn=dict(type='bool', default=True),
-            lockfile=dict(default='/tmp/delegated_serial_command.lock'),
-            timeout=dict(type='int', default=30)
-        )
-    )
-
-    shell = module.params['_uses_shell']
-    chdir = module.params['chdir']
-    executable = module.params['executable']
-    command = module.params['command']
-    creates = module.params['creates']
-    removes = module.params['removes']
-    warn = module.params['warn']
-    lockfile = module.params['lockfile']
-    timeout = module.params['timeout']
-
-    if command.strip() == '':
-        module.fail_json(rc=256, msg="no command given")
-
-    iterated = 0
-    lockfd = open(lockfile, 'w+')
-    while iterated < timeout:
-        try:
-            fcntl.flock(lockfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
-            break
-        # pylint: disable=invalid-name
-        except IOError as e:
-            if e.errno != errno.EAGAIN:
-                module.fail_json(msg="I/O Error {0}: {1}".format(e.errno, e.strerror))
-            else:
-                iterated += 1
-                time.sleep(0.1)
-
-    if chdir:
-        chdir = os.path.abspath(os.path.expanduser(chdir))
-        os.chdir(chdir)
-
-    if creates:
-        # do not run the command if the line contains creates=filename
-        # and the filename already exists.  This allows idempotence
-        # of command executions.
-        path = os.path.expanduser(creates)
-        if glob.glob(path):
-            module.exit_json(
-                cmd=command,
-                stdout="skipped, since %s exists" % path,
-                changed=False,
-                stderr=False,
-                rc=0
-            )
-
-    if removes:
-        # do not run the command if the line contains removes=filename
-        # and the filename does not exist.  This allows idempotence
-        # of command executions.
-        path = os.path.expanduser(removes)
-        if not glob.glob(path):
-            module.exit_json(
-                cmd=command,
-                stdout="skipped, since %s does not exist" % path,
-                changed=False,
-                stderr=False,
-                rc=0
-            )
-
-    warnings = list()
-    if warn:
-        warnings = check_command(command)
-
-    if not shell:
-        command = shlex.split(command)
-    startd = datetime.datetime.now()
-
-    # pylint: disable=invalid-name
-    rc, out, err = module.run_command(command, executable=executable, use_unsafe_shell=shell)
-
-    fcntl.flock(lockfd, fcntl.LOCK_UN)
-    lockfd.close()
-
-    endd = datetime.datetime.now()
-    delta = endd - startd
-
-    if out is None:
-        out = ''
-    if err is None:
-        err = ''
-
-    module.exit_json(
-        cmd=command,
-        stdout=out.rstrip("\r\n"),
-        stderr=err.rstrip("\r\n"),
-        rc=rc,
-        start=str(startd),
-        end=str(endd),
-        delta=str(delta),
-        changed=True,
-        warnings=warnings,
-        iterated=iterated
-    )
-
-
-# import module snippets
-# pylint: disable=wrong-import-position
-from ansible.module_utils.basic import *  # noqa: F402,F403
-
-main()

+ 0 - 169
roles/lib_utils/library/get_current_openshift_version.py

@@ -1,169 +0,0 @@
-#!/usr/bin/env python
-# pylint: disable=missing-docstring
-#
-# Copyright 2017 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-DOCUMENTATION = '''
----
-module: get_current_openshift_version
-
-short_description: Discovers installed openshift version on masters and nodes
-
-version_added: "2.4"
-
-description:
-    - This module checks various files and program outputs to get the
-      currently installed openshfit version
-
-options:
-    deployment_type:
-        description:
-            - openshift_deployment_type
-        required: true
-
-
-author:
-    - "Michael Gugino <mgugino@redhat.com>"
-'''
-
-EXAMPLES = '''
-- name: Set openshift_current_version
-  get_current_openshift_version:
-    deployment_type: openshift_deployment_type
-'''
-
-
-def chomp_commit_offset(version):
-    """Chomp any "+git.foo" commit offset string from the given `version`
-    and return the modified version string.
-
-Ex:
-- chomp_commit_offset(None)                 => None
-- chomp_commit_offset(1337)                 => "1337"
-- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
-- chomp_commit_offset("v3.4.0.15")          => "v3.4.0.15"
-- chomp_commit_offset("v1.3.0+52492b4")     => "v1.3.0"
-    """
-    if version is None:
-        return version
-    else:
-        # Stringify, just in case it's a Number type. Split by '+' and
-        # return the first split. No concerns about strings without a
-        # '+', .split() returns an array of the original string.
-        return str(version).split('+')[0]
-
-
-def get_container_openshift_version(deployment_type):
-    """
-    If containerized, see if we can determine the installed version via the
-    systemd environment files.
-    """
-    service_type_dict = {'origin': 'origin',
-                         'openshift-enterprise': 'atomic-openshift'}
-    service_type = service_type_dict[deployment_type]
-
-    for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
-        env_path = filename % service_type
-        if not os.path.exists(env_path):
-            continue
-
-        with open(env_path) as env_file:
-            for line in env_file:
-                if line.startswith("IMAGE_VERSION="):
-                    tag = line[len("IMAGE_VERSION="):].strip()
-                    # Remove leading "v" and any trailing release info, we just want
-                    # a version number here:
-                    no_v_version = tag[1:] if tag[0] == 'v' else tag
-                    version = no_v_version.split("-")[0]
-                    return version
-    return None
-
-
-def parse_openshift_version(output):
-    """ Apply provider facts to supplied facts dict
-
-        Args:
-            string: output of 'openshift version'
-        Returns:
-            string: the version number
-    """
-    versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
-    ver = versions.get('openshift', '')
-    # Remove trailing build number and commit hash from older versions, we need to return a straight
-    # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
-    ver = ver.split('-')[0]
-    return ver
-
-
-def get_openshift_version(module, deployment_type):
-    """ Get current version of openshift on the host.
-
-        Checks a variety of ways ranging from fastest to slowest.
-
-        Args:
-            facts (dict): existing facts
-
-        Returns:
-            version: the current openshift version
-    """
-    version = None
-
-    if os.path.isfile('/usr/bin/openshift'):
-        _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])  # noqa: F405
-        version = parse_openshift_version(output)
-    else:
-        version = get_container_openshift_version(deployment_type)
-
-    return chomp_commit_offset(version)
-
-
-def run_module():
-    '''Run this module'''
-    module_args = dict(
-        deployment_type=dict(type='str', required=True)
-    )
-
-    module = AnsibleModule(
-        argument_spec=module_args,
-        supports_check_mode=False
-    )
-
-    # First, create our dest dir if necessary
-    deployment_type = module.params['deployment_type']
-    changed = False
-    ansible_facts = {}
-
-    current_version = get_openshift_version(module, deployment_type)
-    if current_version is not None:
-        ansible_facts = {'openshift_current_version': current_version}
-
-    # Passing back ansible_facts will set_fact the values.
-    result = {'changed': changed, 'ansible_facts': ansible_facts}
-
-    module.exit_json(**result)
-
-
-def main():
-    run_module()
-
-
-if __name__ == '__main__':
-    main()

+ 0 - 187
roles/lib_utils/library/glusterfs_check_containerized.py

@@ -1,187 +0,0 @@
-#!/usr/bin/env python
-"""glusterfs_check_containerized module"""
-# Copyright 2018 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import subprocess
-
-from ansible.module_utils.basic import AnsibleModule
-
-
-DOCUMENTATION = '''
----
-module: glusterfs_check_containerized
-
-short_description: Check health of each volume in glusterfs on openshift.
-
-version_added: "2.6"
-
-description:
-    - This module attempts to ensure all volumes are in healthy state
-      in a glusterfs cluster.  The module is meant to be failure-prone, retries
-      should be executed at the ansible level, they are not implemented in
-      this module.
-      This module by executing the following (roughly):
-      oc exec --namespace=<namespace> <podname> -- gluster volume list
-      for volume in <volume list>:
-        gluster volume heal <volume> info
-
-author:
-    - "Michael Gugino <mgugino@redhat.com>"
-'''
-
-EXAMPLES = '''
-- name: glusterfs volumes check
-  glusterfs_check_containerized
-    oc_bin: "/usr/bin/oc"
-    oc_conf: "/etc/origin/master/admin.kubeconfig"
-    oc_namespace: "glusterfs"
-    cluster_name: "glusterfs"
-'''
-
-
-def fail(module, err):
-    """Fail on error"""
-    result = {'failed': True,
-              'changed': False,
-              'msg': err,
-              'state': 'unknown'}
-    module.fail_json(**result)
-
-
-def call_or_fail(module, call_args):
-    """Call subprocess.check_output and return utf-8 decoded stdout or fail"""
-    try:
-        # Must decode as utf-8 for python3 compatibility
-        res = subprocess.check_output(call_args).decode('utf-8')
-    except subprocess.CalledProcessError as err:
-        fail(module, str(err))
-    return res
-
-
-def get_valid_nodes(module, oc_exec, exclude_node):
-    """Return a list of nodes that will be used to filter running pods"""
-    call_args = oc_exec + ['get', 'nodes']
-    res = call_or_fail(module, call_args)
-    valid_nodes = []
-    for line in res.split('\n'):
-        fields = line.split()
-        if not fields:
-            continue
-        if fields[0] != exclude_node and fields[1] == "Ready":
-            valid_nodes.append(fields[0])
-    if not valid_nodes:
-        fail(module,
-             'Unable to find suitable node in get nodes output: {}'.format(res))
-    return valid_nodes
-
-
-def select_pod(module, oc_exec, cluster_name, valid_nodes):
-    """Select a pod to attempt to run gluster commands on"""
-    call_args = oc_exec + ['get', 'pods', '-owide']
-    res = call_or_fail(module, call_args)
-    # res is returned as a tab/space-separated list with headers.
-    res_lines = res.split('\n')
-    pod_name = None
-    name_search = 'glusterfs-{}'.format(cluster_name)
-    res_lines = list(filter(None, res.split('\n')))
-
-    for line in res_lines[1:]:
-        fields = line.split()
-        if not fields:
-            continue
-        if name_search in fields[0]:
-            if fields[2] == "Running" and fields[6] in valid_nodes:
-                pod_name = fields[0]
-                break
-
-    if pod_name is None:
-        fail(module,
-             "Unable to find suitable pod in get pods output: {}".format(res))
-    else:
-        return pod_name
-
-
-def get_volume_list(module, oc_exec, pod_name):
-    """Retrieve list of active volumes from gluster cluster"""
-    call_args = oc_exec + ['exec', pod_name, '--', 'gluster', 'volume', 'list']
-    res = call_or_fail(module, call_args)
-    # This should always at least return heketidbstorage, so no need to check
-    # for empty string.
-    return list(filter(None, res.split('\n')))
-
-
-def check_volume_health_info(module, oc_exec, pod_name, volume):
-    """Check health info of gluster volume"""
-    call_args = oc_exec + ['exec', pod_name, '--', 'gluster', 'volume', 'heal',
-                           volume, 'info']
-    res = call_or_fail(module, call_args)
-    # Output is not easily parsed
-    for line in res.split('\n'):
-        if line.startswith('Number of entries:'):
-            cols = line.split(':')
-            if cols[1].strip() != '0':
-                fail(module, 'volume {} is not ready'.format(volume))
-
-
-def check_volumes(module, oc_exec, pod_name):
-    """Check status of all volumes on cluster"""
-    volume_list = get_volume_list(module, oc_exec, pod_name)
-    for volume in volume_list:
-        check_volume_health_info(module, oc_exec, pod_name, volume)
-
-
-def run_module():
-    '''Run this module'''
-    module_args = dict(
-        oc_bin=dict(type='path', required=True),
-        oc_conf=dict(type='path', required=True),
-        oc_namespace=dict(type='str', required=True),
-        cluster_name=dict(type='str', required=True),
-        exclude_node=dict(type='str', required=True),
-    )
-    module = AnsibleModule(
-        supports_check_mode=False,
-        argument_spec=module_args
-    )
-    oc_bin = module.params['oc_bin']
-    oc_conf = '--config={}'.format(module.params['oc_conf'])
-    oc_namespace = '--namespace={}'.format(module.params['oc_namespace'])
-    cluster_name = module.params['cluster_name']
-    exclude_node = module.params['exclude_node']
-
-    oc_exec = [oc_bin, oc_conf, oc_namespace]
-
-    # create a nodes to find a pod on; We don't want to try to execute on a
-    # pod running on a "NotReady" node or the inventory_hostname node because
-    # the pods might not actually be alive.
-    valid_nodes = get_valid_nodes(module, [oc_bin, oc_conf], exclude_node)
-
-    # Need to find an alive pod to run gluster commands in.
-    pod_name = select_pod(module, oc_exec, cluster_name, valid_nodes)
-
-    check_volumes(module, oc_exec, pod_name)
-
-    result = {'changed': False}
-    module.exit_json(**result)
-
-
-def main():
-    """main"""
-    run_module()
-
-
-if __name__ == '__main__':
-    main()

+ 0 - 88
roles/lib_utils/library/kubeclient_ca.py

@@ -1,88 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-''' kubeclient_ca ansible module '''
-
-import base64
-import yaml
-from ansible.module_utils.basic import AnsibleModule
-
-
-DOCUMENTATION = '''
----
-module: kubeclient_ca
-short_description: Modify kubeclient certificate-authority-data
-author: Andrew Butcher
-requirements: [ ]
-'''
-EXAMPLES = '''
-- kubeclient_ca:
-    client_path: /etc/origin/master/admin.kubeconfig
-    ca_path: /etc/origin/master/ca-bundle.crt
-
-- slurp:
-    src: /etc/origin/master/ca-bundle.crt
-  register: ca_data
-- kubeclient_ca:
-    client_path: /etc/origin/master/admin.kubeconfig
-    ca_data: "{{ ca_data.content }}"
-'''
-
-
-def main():
-    ''' Modify kubeconfig located at `client_path`, setting the
-        certificate authority data to specified `ca_data` or contents of
-        `ca_path`.
-    '''
-
-    module = AnsibleModule(  # noqa: F405
-        argument_spec=dict(
-            client_path=dict(required=True),
-            ca_data=dict(required=False, default=None),
-            ca_path=dict(required=False, default=None),
-            backup=dict(required=False, default=True, type='bool'),
-        ),
-        supports_check_mode=True,
-        mutually_exclusive=[['ca_data', 'ca_path']],
-        required_one_of=[['ca_data', 'ca_path']]
-    )
-
-    client_path = module.params['client_path']
-    ca_data = module.params['ca_data']
-    ca_path = module.params['ca_path']
-    backup = module.params['backup']
-
-    try:
-        with open(client_path) as client_config_file:
-            client_config_data = yaml.safe_load(client_config_file.read())
-
-        if ca_data is None:
-            with open(ca_path) as ca_file:
-                ca_data = base64.standard_b64encode(ca_file.read())
-
-        changes = []
-        # Naively update the CA information for each cluster in the
-        # kubeconfig.
-        for cluster in client_config_data['clusters']:
-            if cluster['cluster']['certificate-authority-data'] != ca_data:
-                cluster['cluster']['certificate-authority-data'] = ca_data
-                changes.append(cluster['name'])
-
-        if not module.check_mode:
-            if len(changes) > 0 and backup:
-                module.backup_local(client_path)
-
-            with open(client_path, 'w') as client_config_file:
-                client_config_string = yaml.dump(client_config_data, default_flow_style=False)
-                client_config_string = client_config_string.replace('\'\'', '""')
-                client_config_file.write(client_config_string)
-
-        return module.exit_json(changed=(len(changes) > 0))
-
-    # ignore broad-except error to avoid stack trace to ansible user
-    # pylint: disable=broad-except
-    except Exception as error:
-        return module.fail_json(msg=str(error))
-
-
-if __name__ == '__main__':
-    main()

+ 0 - 117
roles/lib_utils/library/modify_yaml.py

@@ -1,117 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-''' modify_yaml ansible module '''
-
-import yaml
-
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
-from ansible.module_utils.basic import *  # noqa: F402,F403
-
-
-DOCUMENTATION = '''
----
-module: modify_yaml
-short_description: Modify yaml key value pairs
-author: Andrew Butcher
-requirements: [ ]
-'''
-EXAMPLES = '''
-- modify_yaml:
-    dest: /etc/origin/master/master-config.yaml
-    yaml_key: 'kubernetesMasterConfig.masterCount'
-    yaml_value: 2
-'''
-
-
-def set_key(yaml_data, yaml_key, yaml_value):
-    ''' Updates a parsed yaml structure setting a key to a value.
-
-        :param yaml_data: yaml structure to modify.
-        :type yaml_data: dict
-        :param yaml_key: Key to modify.
-        :type yaml_key: mixed
-        :param yaml_value: Value use for yaml_key.
-        :type yaml_value: mixed
-        :returns: Changes to the yaml_data structure
-        :rtype: dict(tuple())
-    '''
-    changes = []
-    ptr = yaml_data
-    final_key = yaml_key.split('.')[-1]
-    for key in yaml_key.split('.'):
-        # Key isn't present and we're not on the final key. Set to empty dictionary.
-        if key not in ptr and key != final_key:
-            ptr[key] = {}
-            ptr = ptr[key]
-        # Current key is the final key. Update value.
-        elif key == final_key:
-            if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr):  # noqa: F405
-                ptr[key] = yaml_value
-                changes.append((yaml_key, yaml_value))
-        else:
-            # Next value is None and we're not on the final key.
-            # Turn value into an empty dictionary.
-            if ptr[key] is None and key != final_key:
-                ptr[key] = {}
-            ptr = ptr[key]
-    return changes
-
-
-def main():
-    ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting
-        the key to the desired value.
-    '''
-
-    # disabling pylint errors for global-variable-undefined and invalid-name
-    # for 'global module' usage, since it is required to use ansible_facts
-    # pylint: disable=global-variable-undefined, invalid-name,
-    # redefined-outer-name
-    global module
-
-    module = AnsibleModule(  # noqa: F405
-        argument_spec=dict(
-            dest=dict(required=True),
-            yaml_key=dict(required=True),
-            yaml_value=dict(required=True),
-            backup=dict(required=False, default=True, type='bool'),
-        ),
-        supports_check_mode=True,
-    )
-
-    dest = module.params['dest']
-    yaml_key = module.params['yaml_key']
-    yaml_value = module.safe_eval(module.params['yaml_value'])
-    backup = module.params['backup']
-
-    # Represent null values as an empty string.
-    # pylint: disable=missing-docstring, unused-argument
-    def none_representer(dumper, data):
-        return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'')
-
-    yaml.add_representer(type(None), none_representer)
-
-    try:
-        with open(dest) as yaml_file:
-            yaml_data = yaml.safe_load(yaml_file.read())
-
-        changes = set_key(yaml_data, yaml_key, yaml_value)
-
-        if len(changes) > 0:
-            if backup:
-                module.backup_local(dest)
-            with open(dest, 'w') as yaml_file:
-                yaml_string = yaml.dump(yaml_data, default_flow_style=False)
-                yaml_string = yaml_string.replace('\'\'', '""')
-                yaml_file.write(yaml_string)
-
-        return module.exit_json(changed=(len(changes) > 0), changes=changes)
-
-    # ignore broad-except error to avoid stack trace to ansible user
-    # pylint: disable=broad-except
-    except Exception as error:
-        return module.fail_json(msg=str(error))
-
-
-if __name__ == '__main__':
-    main()

+ 0 - 172
roles/lib_utils/library/oo_iam_kms.py

@@ -1,172 +0,0 @@
-#!/usr/bin/env python
-'''
-ansible module for creating AWS IAM KMS keys
-'''
-# vim: expandtab:tabstop=4:shiftwidth=4
-#
-#   AWS IAM KMS ansible module
-#
-#
-#   Copyright 2016 Red Hat Inc.
-#
-#   Licensed under the Apache License, Version 2.0 (the "License");
-#   you may not use this file except in compliance with the License.
-#   You may obtain a copy of the License at
-#
-#       http://www.apache.org/licenses/LICENSE-2.0
-#
-#   Unless required by applicable law or agreed to in writing, software
-#   distributed under the License is distributed on an "AS IS" BASIS,
-#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#   See the License for the specific language governing permissions and
-#   limitations under the License.
-#
-# Jenkins environment doesn't have all the required libraries
-# pylint: disable=import-error
-import time
-import boto3
-# Ansible modules need this wildcard import
-# pylint: disable=unused-wildcard-import, wildcard-import, redefined-builtin
-from ansible.module_utils.basic import AnsibleModule
-
-AWS_ALIAS_URL = "http://docs.aws.amazon.com/kms/latest/developerguide/programming-aliases.html"
-
-
-class AwsIamKms(object):
-    '''
-    ansible module for AWS IAM KMS
-    '''
-
-    def __init__(self):
-        ''' constructor '''
-        self.module = None
-        self.kms_client = None
-        self.aliases = None
-
-    @staticmethod
-    def valid_alias_name(user_alias):
-        ''' AWS KMS aliases must start with 'alias/' '''
-        valid_start = 'alias/'
-        if user_alias.startswith(valid_start):
-            return True
-
-        return False
-
-    def get_all_kms_info(self):
-        '''fetch all kms info and return them
-
-        list_keys doesn't have information regarding aliases
-        list_aliases doesn't have the full kms arn
-
-        fetch both and join them on the targetKeyId
-        '''
-        aliases = self.kms_client.list_aliases()['Aliases']
-        keys = self.kms_client.list_keys()['Keys']
-
-        for alias in aliases:
-            for key in keys:
-                if 'TargetKeyId' in alias and 'KeyId' in key:
-                    if alias['TargetKeyId'] == key['KeyId']:
-                        alias.update(key)
-
-        return aliases
-
-    def get_kms_entry(self, user_alias, alias_list):
-        ''' return single alias details from list of aliases '''
-        for alias in alias_list:
-            if user_alias == alias.get('AliasName', False):
-                return alias
-
-        msg = "Did not find alias {}".format(user_alias)
-        self.module.exit_json(failed=True, results=msg)
-
-    @staticmethod
-    def exists(user_alias, alias_list):
-        ''' Check if KMS alias already exists '''
-        for alias in alias_list:
-            if user_alias == alias.get('AliasName'):
-                return True
-
-        return False
-
-    def main(self):
-        ''' entry point for module '''
-
-        self.module = AnsibleModule(
-            argument_spec=dict(
-                state=dict(default='list', choices=['list', 'present'], type='str'),
-                region=dict(default=None, required=True, type='str'),
-                alias=dict(default=None, type='str'),
-                # description default cannot be None
-                description=dict(default='', type='str'),
-                aws_access_key=dict(default=None, type='str'),
-                aws_secret_key=dict(default=None, type='str'),
-            ),
-        )
-
-        state = self.module.params['state']
-        aws_access_key = self.module.params['aws_access_key']
-        aws_secret_key = self.module.params['aws_secret_key']
-        if aws_access_key and aws_secret_key:
-            boto3.setup_default_session(aws_access_key_id=aws_access_key,
-                                        aws_secret_access_key=aws_secret_key,
-                                        region_name=self.module.params['region'])
-        else:
-            boto3.setup_default_session(region_name=self.module.params['region'])
-
-        self.kms_client = boto3.client('kms')
-
-        aliases = self.get_all_kms_info()
-
-        if state == 'list':
-            if self.module.params['alias'] is not None:
-                user_kms = self.get_kms_entry(self.module.params['alias'],
-                                              aliases)
-                self.module.exit_json(changed=False, results=user_kms,
-                                      state="list")
-            else:
-                self.module.exit_json(changed=False, results=aliases,
-                                      state="list")
-
-        if state == 'present':
-
-            # early sanity check to make sure the alias name conforms with
-            # AWS alias name requirements
-            if not self.valid_alias_name(self.module.params['alias']):
-                self.module.exit_json(failed=True, changed=False,
-                                      results="Alias must start with the prefix " +
-                                      "'alias/'. Please see " + AWS_ALIAS_URL,
-                                      state='present')
-
-            if not self.exists(self.module.params['alias'], aliases):
-                # if we didn't find it, create it
-                response = self.kms_client.create_key(KeyUsage='ENCRYPT_DECRYPT',
-                                                      Description=self.module.params['description'])
-                kid = response['KeyMetadata']['KeyId']
-                response = self.kms_client.create_alias(AliasName=self.module.params['alias'],
-                                                        TargetKeyId=kid)
-                # sleep for a bit so that the KMS data can be queried
-                time.sleep(10)
-                # get details for newly created KMS entry
-                new_alias_list = self.kms_client.list_aliases()['Aliases']
-                user_kms = self.get_kms_entry(self.module.params['alias'],
-                                              new_alias_list)
-
-                self.module.exit_json(changed=True, results=user_kms,
-                                      state='present')
-
-            # already exists, normally we would check whether we need to update it
-            # but this module isn't written to allow changing the alias name
-            # or changing whether the key is enabled/disabled
-            user_kms = self.get_kms_entry(self.module.params['alias'], aliases)
-            self.module.exit_json(changed=False, results=user_kms,
-                                  state="present")
-
-        self.module.exit_json(failed=True,
-                              changed=False,
-                              results='Unknown state passed. %s' % state,
-                              state="unknown")
-
-
-if __name__ == '__main__':
-    AwsIamKms().main()

+ 0 - 835
roles/lib_utils/library/openshift_cert_expiry.py

@@ -1,835 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# pylint: disable=line-too-long,invalid-name
-
-"""For details on this module see DOCUMENTATION (below)"""
-
-import base64
-import datetime
-import io
-import os
-import subprocess
-import yaml
-import dateutil.parser
-
-# pylint import-error disabled because pylint cannot find the package
-# when installed in a virtualenv
-from ansible.module_utils.six.moves import configparser  # pylint: disable=import-error
-from ansible.module_utils.basic import AnsibleModule
-
-try:
-    # You can comment this import out and include a 'pass' in this
-    # block if you're manually testing this module on a NON-ATOMIC
-    # HOST (or any host that just doesn't have PyOpenSSL
-    # available). That will force the `load_and_handle_cert` function
-    # to use the Fake OpenSSL classes.
-    import OpenSSL.crypto
-    HAS_OPENSSL = True
-except ImportError:
-    # Some platforms (such as RHEL Atomic) may not have the Python
-    # OpenSSL library installed. In this case we will use a manual
-    # work-around to parse each certificate.
-    #
-    # Check for 'OpenSSL.crypto' in `sys.modules` later.
-    HAS_OPENSSL = False
-
-DOCUMENTATION = '''
----
-module: openshift_cert_expiry
-short_description: Check OpenShift Container Platform (OCP) and Kube certificate expirations on a cluster
-description:
-  - The M(openshift_cert_expiry) module has two basic functions: to flag certificates which will expire in a set window of time from now, and to notify you about certificates which have already expired.
-  - When the module finishes, a summary of the examination is returned. Each certificate in the summary has a C(health) key with a value of one of the following:
-  - C(ok) - not expired, and outside of the expiration C(warning_days) window.
-  - C(warning) - not expired, but will expire between now and the C(warning_days) window.
-  - C(expired) - an expired certificate.
-  - Certificate flagging follow this logic:
-  - If the expiration date is before now then the certificate is classified as C(expired).
-  - The certificates time to live (expiration date - now) is calculated, if that time window is less than C(warning_days) the certificate is classified as C(warning).
-  - All other conditions are classified as C(ok).
-  - The following keys are ALSO present in the certificate summary:
-  - C(cert_cn) - The common name of the certificate (additional CNs present in SAN extensions are omitted)
-  - C(days_remaining) - The number of days until the certificate expires.
-  - C(expiry) - The date the certificate expires on.
-  - C(path) - The full path to the certificate on the examined host.
-version_added: "1.0"
-options:
-  config_base:
-    description:
-      - Base path to OCP system settings.
-    required: false
-    default: /etc/origin
-  warning_days:
-    description:
-      - Flag certificates which will expire in C(warning_days) days from now.
-    required: false
-    default: 30
-  show_all:
-    description:
-      - Enable this option to show analysis of ALL certificates examined by this module.
-      - By default only certificates which have expired, or will expire within the C(warning_days) window will be reported.
-    required: false
-    default: false
-
-author: "Tim Bielawa (@tbielawa) <tbielawa@redhat.com>"
-'''
-
-EXAMPLES = '''
-# Default invocation, only notify about expired certificates or certificates which will expire within 30 days from now
-- openshift_cert_expiry:
-
-# Expand the warning window to show certificates expiring within a year from now
-- openshift_cert_expiry: warning_days=365
-
-# Show expired, soon to expire (now + 30 days), and all other certificates examined
-- openshift_cert_expiry: show_all=true
-'''
-
-
-class FakeOpenSSLCertificate(object):
-    """This provides a rough mock of what you get from
-`OpenSSL.crypto.load_certificate()`. This is a work-around for
-platforms missing the Python OpenSSL library.
-    """
-    def __init__(self, cert_string):
-        """`cert_string` is a certificate in the form you get from running a
-.crt through 'openssl x509 -in CERT.cert -text'"""
-        self.cert_string = cert_string
-        self.serial = None
-        self.subject = None
-        self.extensions = []
-        self.not_after = None
-        self._parse_cert()
-
-    def _parse_cert(self):
-        """Manually parse the certificate line by line"""
-        self.extensions = []
-
-        PARSING_ALT_NAMES = False
-        PARSING_HEX_SERIAL = False
-        for line in self.cert_string.split('\n'):
-            l = line.strip()
-            if PARSING_ALT_NAMES:
-                # We're parsing a 'Subject Alternative Name' line
-                self.extensions.append(
-                    FakeOpenSSLCertificateSANExtension(l))
-
-                PARSING_ALT_NAMES = False
-                continue
-
-            if PARSING_HEX_SERIAL:
-                # Hex serials arrive colon-delimited
-                serial_raw = l.replace(':', '')
-                # Convert to decimal
-                self.serial = int('0x' + serial_raw, base=16)
-                PARSING_HEX_SERIAL = False
-                continue
-
-            # parse out the bits that we can
-            if l.startswith('Serial Number:'):
-                # Decimal format:
-                #   Serial Number: 11 (0xb)
-                #   => 11
-                # Hex Format (large serials):
-                #   Serial Number:
-                #       0a:de:eb:24:04:75:ab:56:39:14:e9:5a:22:e2:85:bf
-                #   => 14449739080294792594019643629255165375
-                if l.endswith(':'):
-                    PARSING_HEX_SERIAL = True
-                    continue
-                self.serial = int(l.split()[-2])
-
-            elif l.startswith('Not After :'):
-                # Not After : Feb  7 18:19:35 2019 GMT
-                # => strptime(str, '%b %d %H:%M:%S %Y %Z')
-                # => strftime('%Y%m%d%H%M%SZ')
-                # => 20190207181935Z
-                not_after_raw = l.partition(' : ')[-1]
-                # Last item: ('Not After', ' : ', 'Feb  7 18:19:35 2019 GMT')
-                not_after_parsed = dateutil.parser.parse(not_after_raw)
-                self.not_after = not_after_parsed.strftime('%Y%m%d%H%M%SZ')
-
-            elif l.startswith('X509v3 Subject Alternative Name:'):
-                PARSING_ALT_NAMES = True
-                continue
-
-            elif l.startswith('Subject:'):
-                # O = system:nodes, CN = system:node:m01.example.com
-                self.subject = FakeOpenSSLCertificateSubjects(l.partition(': ')[-1])
-
-    def get_serial_number(self):
-        """Return the serial number of the cert"""
-        return self.serial
-
-    def get_subject(self):
-        """Subjects must implement get_components() and return dicts or
-tuples. An 'openssl x509 -in CERT.cert -text' with 'Subject':
-
-    Subject: Subject: O=system:nodes, CN=system:node:m01.example.com
-
-might return: [('O=system', 'nodes'), ('CN=system', 'node:m01.example.com')]
-        """
-        return self.subject
-
-    def get_extension(self, i):
-        """Extensions must implement get_short_name() and return the string
-'subjectAltName'"""
-        return self.extensions[i]
-
-    def get_extension_count(self):
-        """ get_extension_count """
-        return len(self.extensions)
-
-    def get_notAfter(self):
-        """Returns a date stamp as a string in the form
-'20180922170439Z'. strptime the result with format param:
-'%Y%m%d%H%M%SZ'."""
-        return self.not_after
-
-
-class FakeOpenSSLCertificateSANExtension(object):  # pylint: disable=too-few-public-methods
-    """Mocks what happens when `get_extension` is called on a certificate
-object"""
-
-    def __init__(self, san_string):
-        """With `san_string` as you get from:
-
-    $ openssl x509 -in certificate.crt -text
-        """
-        self.san_string = san_string
-        self.short_name = 'subjectAltName'
-
-    def get_short_name(self):
-        """Return the 'type' of this extension. It's always the same though
-because we only care about subjectAltName's"""
-        return self.short_name
-
-    def __str__(self):
-        """Return this extension and the value as a simple string"""
-        return self.san_string
-
-
-# pylint: disable=too-few-public-methods
-class FakeOpenSSLCertificateSubjects(object):
-    """Mocks what happens when `get_subject` is called on a certificate
-object"""
-
-    def __init__(self, subject_string):
-        """With `subject_string` as you get from:
-
-    $ openssl x509 -in certificate.crt -text
-        """
-        self.subjects = []
-        for s in subject_string.split(', '):
-            name, _, value = s.partition(' = ')
-            self.subjects.append((name, value))
-
-    def get_components(self):
-        """Returns a list of tuples"""
-        return self.subjects
-
-
-######################################################################
-def filter_paths(path_list):
-    """`path_list` - A list of file paths to check. Only files which exist
-will be returned
-    """
-    return [p for p in path_list if os.path.exists(os.path.realpath(p))]
-
-
-# pylint: disable=too-many-locals,too-many-branches
-#
-# TODO: Break this function down into smaller chunks
-def load_and_handle_cert(cert_string, now, base64decode=False, ans_module=None):
-    """Load a certificate, split off the good parts, and return some
-useful data
-
-Params:
-
-- `cert_string` (string) - a certificate loaded into a string object
-- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
-- `base64decode` (bool) - run base64.b64decode() on the input
-- `ans_module` (AnsibleModule) - The AnsibleModule object for this module (so we can raise errors)
-
-Returns:
-A tuple of the form:
-    (cert_subject, cert_expiry_date, time_remaining, cert_serial_number)
-    """
-    if base64decode:
-        _cert_string = base64.b64decode(cert_string).decode('utf-8')
-    else:
-        _cert_string = cert_string
-
-    # Disable this. We 'redefine' the type because we are working
-    # around a missing library on the target host.
-    #
-    # pylint: disable=redefined-variable-type
-    if HAS_OPENSSL:
-        # No work-around required
-        cert_loaded = OpenSSL.crypto.load_certificate(
-            OpenSSL.crypto.FILETYPE_PEM, _cert_string)
-    else:
-        # Missing library, work-around required. Run the 'openssl'
-        # command on it to decode it
-        cmd = 'openssl x509 -text'
-        try:
-            openssl_proc = subprocess.Popen(cmd.split(),
-                                            stdout=subprocess.PIPE,
-                                            stdin=subprocess.PIPE)
-        except OSError:
-            ans_module.fail_json(msg="Error: The 'OpenSSL' python library and CLI command were not found on the target host. Unable to parse any certificates. This host will not be included in generated reports.")
-        else:
-            openssl_decoded = openssl_proc.communicate(_cert_string.encode('utf-8'))[0].decode('utf-8')
-            cert_loaded = FakeOpenSSLCertificate(openssl_decoded)
-
-    ######################################################################
-    # Read all possible names from the cert
-    cert_subjects = []
-    for name, value in cert_loaded.get_subject().get_components():
-        if isinstance(name, bytes) or isinstance(value, bytes):
-            name = name.decode('utf-8')
-            value = value.decode('utf-8')
-        cert_subjects.append('{}:{}'.format(name, value))
-
-    # To read SANs from a cert we must read the subjectAltName
-    # extension from the X509 Object. What makes this more difficult
-    # is that pyOpenSSL does not give extensions as an iterable
-    san = None
-    for i in range(cert_loaded.get_extension_count()):
-        ext = cert_loaded.get_extension(i)
-        if ext.get_short_name() == 'subjectAltName':
-            san = ext
-
-    if san is not None:
-        # The X509Extension object for subjectAltName prints as a
-        # string with the alt names separated by a comma and a
-        # space. Split the string by ', ' and then add our new names
-        # to the list of existing names
-        cert_subjects.extend(str(san).split(', '))
-
-    cert_subject = ', '.join(cert_subjects)
-    ######################################################################
-
-    # Grab the expiration date
-    not_after = cert_loaded.get_notAfter()
-    # example get_notAfter() => 20180922170439Z
-    if isinstance(not_after, bytes):
-        not_after = not_after.decode('utf-8')
-
-    cert_expiry_date = datetime.datetime.strptime(
-        not_after,
-        '%Y%m%d%H%M%SZ')
-
-    time_remaining = cert_expiry_date - now
-
-    return (cert_subject, cert_expiry_date, time_remaining, cert_loaded.get_serial_number())
-
-
-def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
-    """Given metadata about a certificate under examination, classify it
-    into one of three categories, 'ok', 'warning', and 'expired'.
-
-Params:
-
-- `cert_meta` dict - A dict with certificate metadata. Required fields
-  include: 'cert_cn', 'path', 'expiry', 'days_remaining', 'health'.
-- `now` (datetime) - a datetime object of the time to calculate the certificate 'time_remaining' against
-- `time_remaining` (datetime.timedelta) - a timedelta for how long until the cert expires
-- `expire_window` (datetime.timedelta) - a timedelta for how long the warning window is
-- `cert_list` list - A list to shove the classified cert into
-
-Return:
-- `cert_list` - The updated list of classified certificates
-    """
-    expiry_str = str(cert_meta['expiry'])
-    # Categorization
-    if cert_meta['expiry'] < now:
-        # This already expired, must NOTIFY
-        cert_meta['health'] = 'expired'
-    elif time_remaining < expire_window:
-        # WARN about this upcoming expirations
-        cert_meta['health'] = 'warning'
-    else:
-        # Not expired or about to expire
-        cert_meta['health'] = 'ok'
-
-    cert_meta['expiry'] = expiry_str
-    cert_meta['serial_hex'] = hex(int(cert_meta['serial']))
-    cert_list.append(cert_meta)
-    return cert_list
-
-
-def tabulate_summary(certificates, kubeconfigs, etcd_certs, router_certs, registry_certs):
-    """Calculate the summary text for when the module finishes
-running. This includes counts of each classification and what have
-you.
-
-Params:
-
-- `certificates` (list of dicts) - Processed `expire_check_result`
-  dicts with filled in `health` keys for system certificates.
-- `kubeconfigs` - as above for kubeconfigs
-- `etcd_certs` - as above for etcd certs
-
-Return:
-
-- `summary_results` (dict) - Counts of each cert type classification
-  and total items examined.
-    """
-    items = certificates + kubeconfigs + etcd_certs + router_certs + registry_certs
-
-    summary_results = {
-        'system_certificates': len(certificates),
-        'kubeconfig_certificates': len(kubeconfigs),
-        'etcd_certificates': len(etcd_certs),
-        'router_certs': len(router_certs),
-        'registry_certs': len(registry_certs),
-        'total': len(items),
-        'ok': 0,
-        'warning': 0,
-        'expired': 0
-    }
-
-    summary_results['expired'] = len([c for c in items if c['health'] == 'expired'])
-    summary_results['warning'] = len([c for c in items if c['health'] == 'warning'])
-    summary_results['ok'] = len([c for c in items if c['health'] == 'ok'])
-
-    return summary_results
-
-
-######################################################################
-# This is our module MAIN function after all, so there's bound to be a
-# lot of code bundled up into one block
-#
-# Reason: These checks are disabled because the issue was introduced
-# during a period where the pylint checks weren't enabled for this file
-# Status: temporarily disabled pending future refactoring
-# pylint: disable=too-many-locals,too-many-statements,too-many-branches
-def main():
-    """This module examines certificates (in various forms) which compose
-an OpenShift Container Platform cluster
-    """
-
-    module = AnsibleModule(
-        argument_spec=dict(
-            config_base=dict(
-                required=False,
-                default="/etc/origin",
-                type='str'),
-            warning_days=dict(
-                required=False,
-                default=30,
-                type='int'),
-            show_all=dict(
-                required=False,
-                default=False,
-                type='bool')
-        ),
-        supports_check_mode=True,
-    )
-
-    # Basic scaffolding for OpenShift specific certs
-    openshift_base_config_path = os.path.realpath(module.params['config_base'])
-    openshift_master_config_path = os.path.join(openshift_base_config_path,
-                                                "master", "master-config.yaml")
-    openshift_node_config_path = os.path.join(openshift_base_config_path,
-                                              "node", "node-config.yaml")
-    openshift_node_bootstrap_config_path = os.path.join(openshift_base_config_path,
-                                                        "node", "bootstrap-node-config.yaml")
-    openshift_cert_check_paths = [
-        openshift_master_config_path,
-        openshift_node_config_path,
-        openshift_node_bootstrap_config_path,
-    ]
-
-    # Paths for Kubeconfigs. Additional kubeconfigs are conditionally
-    # checked later in the code
-    master_kube_configs = ['admin', 'openshift-master',
-                           'openshift-node', 'openshift-router',
-                           'openshift-registry']
-
-    kubeconfig_paths = []
-    for m_kube_config in master_kube_configs:
-        kubeconfig_paths.append(
-            os.path.join(openshift_base_config_path, "master", m_kube_config + ".kubeconfig")
-        )
-
-    # Validate some paths we have the ability to do ahead of time
-    openshift_cert_check_paths = filter_paths(openshift_cert_check_paths)
-    kubeconfig_paths = filter_paths(kubeconfig_paths)
-
-    # etcd, where do you hide your certs? Used when parsing etcd.conf
-    etcd_cert_params = [
-        "ETCD_TRUSTED_CA_FILE",
-        "ETCD_CERT_FILE",
-        "ETCD_PEER_TRUSTED_CA_FILE",
-        "ETCD_PEER_CERT_FILE",
-    ]
-
-    # Expiry checking stuff
-    now = datetime.datetime.now()
-    # todo, catch exception for invalid input and return a fail_json
-    warning_days = int(module.params['warning_days'])
-    expire_window = datetime.timedelta(days=warning_days)
-
-    # Module stuff
-    #
-    # The results of our cert checking to return from the task call
-    check_results = {}
-    check_results['meta'] = {}
-    check_results['meta']['warning_days'] = warning_days
-    check_results['meta']['checked_at_time'] = str(now)
-    check_results['meta']['warn_before_date'] = str(now + expire_window)
-    check_results['meta']['show_all'] = str(module.params['show_all'])
-    # All the analyzed certs accumulate here
-    ocp_certs = []
-
-    ######################################################################
-    # Sure, why not? Let's enable check mode.
-    if module.check_mode:
-        check_results['ocp_certs'] = []
-        module.exit_json(
-            check_results=check_results,
-            msg="Checked 0 total certificates. Expired/Warning/OK: 0/0/0. Warning window: %s days" % module.params['warning_days'],
-            rc=0,
-            changed=False
-        )
-
-    ######################################################################
-    # Check for OpenShift Container Platform specific certs
-    ######################################################################
-    for os_cert in filter_paths(openshift_cert_check_paths):
-        # Open up that config file and locate the cert and CA
-        with io.open(os_cert, 'r', encoding='utf-8') as fp:
-            cert_meta = {}
-            cfg = yaml.load(fp)
-            # cert files are specified in parsed `fp` as relative to the path
-            # of the original config file. 'master-config.yaml' with certFile
-            # = 'foo.crt' implies that 'foo.crt' is in the same
-            # directory. certFile = '../foo.crt' is in the parent directory.
-            cfg_path = os.path.dirname(fp.name)
-
-            servingInfoFile = cfg.get('servingInfo', {}).get('certFile')
-            if servingInfoFile:
-                cert_meta['certFile'] = os.path.join(cfg_path, servingInfoFile)
-
-            servingInfoCA = cfg.get('servingInfo', {}).get('clientCA')
-            if servingInfoCA:
-                cert_meta['clientCA'] = os.path.join(cfg_path, servingInfoCA)
-
-            serviceSigner = cfg.get('controllerConfig', {}).get('serviceServingCert', {}).get('signer', {}).get('certFile')
-            if serviceSigner:
-                cert_meta['serviceSigner'] = os.path.join(cfg_path, serviceSigner)
-
-            etcdClientCA = cfg.get('etcdClientInfo', {}).get('ca')
-            if etcdClientCA:
-                cert_meta['etcdClientCA'] = os.path.join(cfg_path, etcdClientCA)
-
-            etcdClientCert = cfg.get('etcdClientInfo', {}).get('certFile')
-            if etcdClientCert:
-                cert_meta['etcdClientCert'] = os.path.join(cfg_path, etcdClientCert)
-
-            kubeletCert = cfg.get('kubeletClientInfo', {}).get('certFile')
-            if kubeletCert:
-                cert_meta['kubeletCert'] = os.path.join(cfg_path, kubeletCert)
-
-            proxyClient = cfg.get('kubernetesMasterConfig', {}).get('proxyClientInfo', {}).get('certFile')
-            if proxyClient:
-                cert_meta['proxyClient'] = os.path.join(cfg_path, proxyClient)
-
-        ######################################################################
-        # Load the certificate and the CA, parse their expiration dates into
-        # datetime objects so we can manipulate them later
-        for v in cert_meta.values():
-            with io.open(v, 'r', encoding='utf-8') as fp:
-                cert = fp.read()
-                (cert_subject,
-                 cert_expiry_date,
-                 time_remaining,
-                 cert_serial) = load_and_handle_cert(cert, now, ans_module=module)
-
-                expire_check_result = {
-                    'cert_cn': cert_subject,
-                    'path': fp.name,
-                    'expiry': cert_expiry_date,
-                    'days_remaining': time_remaining.days,
-                    'health': None,
-                    'serial': cert_serial
-                }
-
-                classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
-
-    ######################################################################
-    # /Check for OpenShift Container Platform specific certs
-    ######################################################################
-
-    ######################################################################
-    # Check service Kubeconfigs
-    ######################################################################
-    kubeconfigs = []
-
-    # There may be additional kubeconfigs to check, but their naming
-    # is less predictable than the ones we've already assembled.
-
-    for node_config in [openshift_node_config_path, openshift_node_bootstrap_config_path]:
-        try:
-            # Try to read the standard 'node-config.yaml' file to check if
-            # this host is a node.
-            with io.open(node_config, 'r', encoding='utf-8') as fp:
-                cfg = yaml.load(fp)
-
-            # OK, the config file exists, therefore this is a
-            # node. Nodes have their own kubeconfig files to
-            # communicate with the master API. Let's read the relative
-            # path to that file from the node config.
-            node_masterKubeConfig = cfg['masterKubeConfig']
-            # As before, the path to the 'masterKubeConfig' file is
-            # relative to `fp`
-            cfg_path = os.path.dirname(fp.name)
-            node_kubeconfig = os.path.join(cfg_path, node_masterKubeConfig)
-
-            with io.open(node_kubeconfig, 'r', encoding='utf8') as fp:
-                # Read in the nodes kubeconfig file and grab the good stuff
-                cfg = yaml.load(fp)
-
-            c = cfg['users'][0]['user'].get('client-certificate-data')
-            if not c:
-                # This is not a node
-                raise IOError
-            (cert_subject,
-             cert_expiry_date,
-             time_remaining,
-             cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
-
-            expire_check_result = {
-                'cert_cn': cert_subject,
-                'path': fp.name,
-                'expiry': cert_expiry_date,
-                'days_remaining': time_remaining.days,
-                'health': None,
-                'serial': cert_serial
-            }
-
-            classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
-        except IOError:
-            # This is not a node
-            pass
-
-    for kube in filter_paths(kubeconfig_paths):
-        with io.open(kube, 'r', encoding='utf-8') as fp:
-            # TODO: Maybe consider catching exceptions here?
-            cfg = yaml.load(fp)
-
-        # Per conversation, "the kubeconfigs you care about:
-        # admin, router, registry should all be single
-        # value". Following that advice we only grab the data for
-        # the user at index 0 in the 'users' list. There should
-        # not be more than one user.
-        c = cfg['users'][0]['user']['client-certificate-data']
-        (cert_subject,
-         cert_expiry_date,
-         time_remaining,
-         cert_serial) = load_and_handle_cert(c, now, base64decode=True, ans_module=module)
-
-        expire_check_result = {
-            'cert_cn': cert_subject,
-            'path': fp.name,
-            'expiry': cert_expiry_date,
-            'days_remaining': time_remaining.days,
-            'health': None,
-            'serial': cert_serial
-        }
-
-        classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
-
-    ######################################################################
-    # /Check service Kubeconfigs
-    ######################################################################
-
-    ######################################################################
-    # Check etcd certs
-    #
-    # Two things to check: 'external' etcd, and embedded etcd.
-    ######################################################################
-    # FIRST: The 'external' etcd
-    #
-    # Some values may be duplicated, make this a set for now so we
-    # unique them all
-    etcd_certs_to_check = set([])
-    etcd_certs = []
-    etcd_cert_params.append('dne')
-    try:
-        with io.open('/etc/etcd/etcd.conf', 'r', encoding='utf-8') as fp:
-            # Add dummy header section.
-            config = io.StringIO()
-            config.write(u'[ETCD]\n')
-            config.write(fp.read().replace('%', '%%'))
-            config.seek(0, os.SEEK_SET)
-
-            etcd_config = configparser.ConfigParser()
-            etcd_config.readfp(config)
-
-        for param in etcd_cert_params:
-            try:
-                etcd_certs_to_check.add(etcd_config.get('ETCD', param))
-            except configparser.NoOptionError:
-                # That parameter does not exist, oh well...
-                pass
-    except IOError:
-        # No etcd to see here, move along
-        pass
-
-    for etcd_cert in filter_paths(etcd_certs_to_check):
-        with io.open(etcd_cert, 'r', encoding='utf-8') as fp:
-            c = fp.read()
-            (cert_subject,
-             cert_expiry_date,
-             time_remaining,
-             cert_serial) = load_and_handle_cert(c, now, ans_module=module)
-
-            expire_check_result = {
-                'cert_cn': cert_subject,
-                'path': fp.name,
-                'expiry': cert_expiry_date,
-                'days_remaining': time_remaining.days,
-                'health': None,
-                'serial': cert_serial
-            }
-
-            classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
-
-    ######################################################################
-    # /Check etcd certs
-    ######################################################################
-
-    ######################################################################
-    # Check router/registry certs
-    #
-    # These are saved as secrets in etcd. That means that we can not
-    # simply read a file to grab the data. Instead we're going to
-    # subprocess out to the 'oc get' command. On non-masters this
-    # command will fail, that is expected so we catch that exception.
-    ######################################################################
-    router_certs = []
-    registry_certs = []
-
-    ######################################################################
-    # First the router certs
-    try:
-        router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
-                                              stdout=subprocess.PIPE)
-        router_ds = yaml.load(router_secrets_raw.communicate()[0])
-        router_c = router_ds['data']['tls.crt']
-        router_path = router_ds['metadata']['selfLink']
-    except TypeError:
-        # YAML couldn't load the result, this is not a master
-        pass
-    except OSError:
-        # The OC command doesn't exist here. Move along.
-        pass
-    else:
-        (cert_subject,
-         cert_expiry_date,
-         time_remaining,
-         cert_serial) = load_and_handle_cert(router_c, now, base64decode=True, ans_module=module)
-
-        expire_check_result = {
-            'cert_cn': cert_subject,
-            'path': router_path,
-            'expiry': cert_expiry_date,
-            'days_remaining': time_remaining.days,
-            'health': None,
-            'serial': cert_serial
-        }
-
-        classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
-
-    ######################################################################
-    # Now for registry
-    try:
-        registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
-                                                stdout=subprocess.PIPE)
-        registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
-        registry_c = registry_ds['data']['registry.crt']
-        registry_path = registry_ds['metadata']['selfLink']
-    except TypeError:
-        # YAML couldn't load the result, this is not a master
-        pass
-    except OSError:
-        # The OC command doesn't exist here. Move along.
-        pass
-    else:
-        (cert_subject,
-         cert_expiry_date,
-         time_remaining,
-         cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True, ans_module=module)
-
-        expire_check_result = {
-            'cert_cn': cert_subject,
-            'path': registry_path,
-            'expiry': cert_expiry_date,
-            'days_remaining': time_remaining.days,
-            'health': None,
-            'serial': cert_serial
-        }
-
-        classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
-
-    ######################################################################
-    # /Check router/registry certs
-    ######################################################################
-
-    res = tabulate_summary(ocp_certs, kubeconfigs, etcd_certs, router_certs, registry_certs)
-    warn_certs = bool(res['expired'] + res['warning'])
-    msg = "Checked {count} total certificates. Expired/Warning/OK: {exp}/{warn}/{ok}. Warning window: {window} days".format(
-        count=res['total'],
-        exp=res['expired'],
-        warn=res['warning'],
-        ok=res['ok'],
-        window=int(module.params['warning_days']),
-    )
-
-    # By default we only return detailed information about expired or
-    # warning certificates. If show_all is true then we will print all
-    # the certificates examined.
-    if not module.params['show_all']:
-        check_results['ocp_certs'] = [crt for crt in ocp_certs if crt['health'] in ['expired', 'warning']]
-        check_results['kubeconfigs'] = [crt for crt in kubeconfigs if crt['health'] in ['expired', 'warning']]
-        check_results['etcd'] = [crt for crt in etcd_certs if crt['health'] in ['expired', 'warning']]
-        check_results['registry'] = [crt for crt in registry_certs if crt['health'] in ['expired', 'warning']]
-        check_results['router'] = [crt for crt in router_certs if crt['health'] in ['expired', 'warning']]
-    else:
-        check_results['ocp_certs'] = ocp_certs
-        check_results['kubeconfigs'] = kubeconfigs
-        check_results['etcd'] = etcd_certs
-        check_results['registry'] = registry_certs
-        check_results['router'] = router_certs
-
-    # Sort the final results to report in order of ascending safety
-    # time. That is to say, the certificates which will expire sooner
-    # will be at the front of the list and certificates which will
-    # expire later are at the end. Router and registry certs should be
-    # limited to just 1 result, so don't bother sorting those.
-    def cert_key(item):
-        ''' return the days_remaining key '''
-        return item['days_remaining']
-
-    check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
-    check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
-    check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
-
-    # This module will never change anything, but we might want to
-    # change the return code parameter if there is some catastrophic
-    # error we noticed earlier
-    module.exit_json(
-        check_results=check_results,
-        warn_certs=warn_certs,
-        summary=res,
-        msg=msg,
-        rc=0,
-        changed=False
-    )
-
-
-if __name__ == '__main__':
-    main()

+ 0 - 644
roles/lib_utils/library/repoquery.py

@@ -1,644 +0,0 @@
-#!/usr/bin/env python
-# pylint: disable=missing-docstring
-#     ___ ___ _  _ ___ ___    _ _____ ___ ___
-#    / __| __| \| | __| _ \  /_\_   _| __|   \
-#   | (_ | _|| .` | _||   / / _ \| | | _|| |) |
-#    \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
-#   |   \ / _ \  | \| |/ _ \_   _| | __|   \_ _|_   _|
-#   | |) | (_) | | .` | (_) || |   | _|| |) | |  | |
-#   |___/ \___/  |_|\_|\___/ |_|   |___|___/___| |_|
-#
-# Copyright 2016 Red Hat, Inc. and/or its affiliates
-# and other contributors as indicated by the @author tags.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
-
-# pylint: disable=wrong-import-order,wrong-import-position,unused-import
-
-from __future__ import print_function  # noqa: F401
-import copy  # noqa: F401
-import fcntl  # noqa: F401
-import json   # noqa: F401
-import os  # noqa: F401
-import re  # noqa: F401
-import shutil  # noqa: F401
-import tempfile  # noqa: F401
-import time  # noqa: F401
-
-try:
-    import ruamel.yaml as yaml  # noqa: F401
-except ImportError:
-    import yaml  # noqa: F401
-
-from ansible.module_utils.basic import AnsibleModule
-
-# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
-
-# -*- -*- -*- Begin included fragment: doc/repoquery -*- -*- -*-
-
-DOCUMENTATION = '''
----
-module: repoquery
-short_description: Query package information from Yum repositories
-description:
-  - Query package information from Yum repositories.
-options:
-  state:
-    description:
-    - The expected state. Currently only supports list.
-    required: false
-    default: list
-    choices: ["list"]
-    aliases: []
-  name:
-    description:
-    - The name of the package to query
-    required: true
-    default: None
-    aliases: []
-  query_type:
-    description:
-    - Narrows the packages queried based off of this value.
-    - If repos, it narrows the query to repositories defined on the machine.
-    - If installed, it narrows the query to only packages installed on the machine.
-    - If available, it narrows the query to packages that are available to be installed.
-    - If recent, it narrows the query to only recently edited packages.
-    - If updates, it narrows the query to only packages that are updates to existing installed packages.
-    - If extras, it narrows the query to packages that are not present in any of the available repositories.
-    - If all, it queries all of the above.
-    required: false
-    default: repos
-    aliases: []
-  verbose:
-    description:
-    - Shows more detail for the requested query.
-    required: false
-    default: false
-    aliases: []
-  show_duplicates:
-    description:
-    - Shows multiple versions of a package.
-    required: false
-    default: false
-    aliases: []
-  match_version:
-    description:
-    - Match the specific version given to the package.
-    required: false
-    default: None
-    aliases: []
-author:
-- "Matt Woodson <mwoodson@redhat.com>"
-extends_documentation_fragment: []
-'''
-
-EXAMPLES = '''
-# Example 1: Get bash versions
-  - name: Get bash version
-    repoquery:
-      name: bash
-      show_duplicates: True
-    register: bash_out
-
-# Results:
-#    ok: [localhost] => {
-#        "bash_out": {
-#            "changed": false,
-#            "results": {
-#                "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
-#                "package_found": true,
-#                "package_name": "bash",
-#                "returncode": 0,
-#                "versions": {
-#                    "available_versions": [
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46"
-#                    ],
-#                    "available_versions_full": [
-#                        "4.2.45-5.el7",
-#                        "4.2.45-5.el7_0.2",
-#                        "4.2.45-5.el7_0.4",
-#                        "4.2.46-12.el7",
-#                        "4.2.46-19.el7",
-#                        "4.2.46-20.el7_2",
-#                        "4.2.46-21.el7_3"
-#                    ],
-#                    "latest": "4.2.46",
-#                    "latest_full": "4.2.46-21.el7_3"
-#                }
-#            },
-#            "state": "present"
-#        }
-#    }
-
-
-
-# Example 2: Get bash versions verbosely
-  - name: Get bash versions verbosely
-    repoquery:
-      name: bash
-      show_duplicates: True
-      verbose: True
-    register: bash_out
-
-# Results:
-#    ok: [localhost] => {
-#        "bash_out": {
-#            "changed": false,
-#            "results": {
-#                "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
-#                "package_found": true,
-#                "package_name": "bash",
-#                "raw_versions": {
-#                    "4.2.45-5.el7": {
-#                        "arch": "x86_64",
-#                        "release": "5.el7",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.45",
-#                        "version_release": "4.2.45-5.el7"
-#                    },
-#                    "4.2.45-5.el7_0.2": {
-#                        "arch": "x86_64",
-#                        "release": "5.el7_0.2",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.45",
-#                        "version_release": "4.2.45-5.el7_0.2"
-#                    },
-#                    "4.2.45-5.el7_0.4": {
-#                        "arch": "x86_64",
-#                        "release": "5.el7_0.4",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.45",
-#                        "version_release": "4.2.45-5.el7_0.4"
-#                    },
-#                    "4.2.46-12.el7": {
-#                        "arch": "x86_64",
-#                        "release": "12.el7",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-12.el7"
-#                    },
-#                    "4.2.46-19.el7": {
-#                        "arch": "x86_64",
-#                        "release": "19.el7",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-19.el7"
-#                    },
-#                    "4.2.46-20.el7_2": {
-#                        "arch": "x86_64",
-#                        "release": "20.el7_2",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-20.el7_2"
-#                    },
-#                    "4.2.46-21.el7_3": {
-#                        "arch": "x86_64",
-#                        "release": "21.el7_3",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-21.el7_3"
-#                    }
-#                },
-#                "results": "4.2.45|5.el7|x86_64|rhel-7-server-rpms|4.2.45-5.el7\n4.2.45|5.el7_0.2|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.2\n4.2.45|5.el7_0.4|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.4\n4.2.46|12.el7|x86_64|rhel-7-server-rpms|4.2.46-12.el7\n4.2.46|19.el7|x86_64|rhel-7-server-rpms|4.2.46-19.el7\n4.2.46|20.el7_2|x86_64|rhel-7-server-rpms|4.2.46-20.el7_2\n4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3\n",
-#                "returncode": 0,
-#                "versions": {
-#                    "available_versions": [
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46"
-#                    ],
-#                    "available_versions_full": [
-#                        "4.2.45-5.el7",
-#                        "4.2.45-5.el7_0.2",
-#                        "4.2.45-5.el7_0.4",
-#                        "4.2.46-12.el7",
-#                        "4.2.46-19.el7",
-#                        "4.2.46-20.el7_2",
-#                        "4.2.46-21.el7_3"
-#                    ],
-#                    "latest": "4.2.46",
-#                    "latest_full": "4.2.46-21.el7_3"
-#                }
-#            },
-#            "state": "present"
-#        }
-#    }
-
-# Example 3: Match a specific version
-  - name: matched versions repoquery test
-    repoquery:
-      name: atomic-openshift
-      show_duplicates: True
-      match_version: 3.3
-    register: openshift_out
-
-# Result:
-
-#    ok: [localhost] => {
-#        "openshift_out": {
-#            "changed": false,
-#            "results": {
-#                "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates atomic-openshift",
-#                "package_found": true,
-#                "package_name": "atomic-openshift",
-#                "returncode": 0,
-#                "versions": {
-#                    "available_versions": [
-#                        "3.2.0.43",
-#                        "3.2.1.23",
-#                        "3.3.0.32",
-#                        "3.3.0.34",
-#                        "3.3.0.35",
-#                        "3.3.1.3",
-#                        "3.3.1.4",
-#                        "3.3.1.5",
-#                        "3.3.1.7",
-#                        "3.4.0.39"
-#                    ],
-#                    "available_versions_full": [
-#                        "3.2.0.43-1.git.0.672599f.el7",
-#                        "3.2.1.23-1.git.0.88a7a1d.el7",
-#                        "3.3.0.32-1.git.0.37bd7ea.el7",
-#                        "3.3.0.34-1.git.0.83f306f.el7",
-#                        "3.3.0.35-1.git.0.d7bd9b6.el7",
-#                        "3.3.1.3-1.git.0.86dc49a.el7",
-#                        "3.3.1.4-1.git.0.7c8657c.el7",
-#                        "3.3.1.5-1.git.0.62700af.el7",
-#                        "3.3.1.7-1.git.0.0988966.el7",
-#                        "3.4.0.39-1.git.0.5f32f06.el7"
-#                    ],
-#                    "latest": "3.4.0.39",
-#                    "latest_full": "3.4.0.39-1.git.0.5f32f06.el7",
-#                    "matched_version_found": true,
-#                    "matched_version_full_latest": "3.3.1.7-1.git.0.0988966.el7",
-#                    "matched_version_latest": "3.3.1.7",
-#                    "matched_versions": [
-#                        "3.3.0.32",
-#                        "3.3.0.34",
-#                        "3.3.0.35",
-#                        "3.3.1.3",
-#                        "3.3.1.4",
-#                        "3.3.1.5",
-#                        "3.3.1.7"
-#                    ],
-#                    "matched_versions_full": [
-#                        "3.3.0.32-1.git.0.37bd7ea.el7",
-#                        "3.3.0.34-1.git.0.83f306f.el7",
-#                        "3.3.0.35-1.git.0.d7bd9b6.el7",
-#                        "3.3.1.3-1.git.0.86dc49a.el7",
-#                        "3.3.1.4-1.git.0.7c8657c.el7",
-#                        "3.3.1.5-1.git.0.62700af.el7",
-#                        "3.3.1.7-1.git.0.0988966.el7"
-#                    ],
-#                    "requested_match_version": "3.3"
-#                }
-#            },
-#            "state": "present"
-#        }
-#    }
-
-'''
-
-# -*- -*- -*- End included fragment: doc/repoquery -*- -*- -*-
-
-# -*- -*- -*- Begin included fragment: lib/repoquery.py -*- -*- -*-
-
-'''
-   class that wraps the repoquery commands in a subprocess
-'''
-
-# pylint: disable=too-many-lines,wrong-import-position,wrong-import-order
-
-from collections import defaultdict  # noqa: E402
-
-
-# pylint: disable=no-name-in-module,import-error
-# Reason: pylint errors with "No name 'version' in module 'distutils'".
-#         This is a bug: https://github.com/PyCQA/pylint/issues/73
-from distutils.version import LooseVersion  # noqa: E402
-
-import subprocess  # noqa: E402
-
-
-class RepoqueryCLIError(Exception):
-    '''Exception class for repoquerycli'''
-    pass
-
-
-def _run(cmds):
-    ''' Actually executes the command. This makes mocking easier. '''
-    proc = subprocess.Popen(cmds,
-                            stdin=subprocess.PIPE,
-                            stdout=subprocess.PIPE,
-                            stderr=subprocess.PIPE)
-
-    stdout, stderr = proc.communicate()
-
-    return proc.returncode, stdout, stderr
-
-
-# pylint: disable=too-few-public-methods
-class RepoqueryCLI(object):
-    ''' Class to wrap the command line tools '''
-    def __init__(self,
-                 verbose=False):
-        ''' Constructor for RepoqueryCLI '''
-        self.verbose = verbose
-        self.verbose = True
-
-    def _repoquery_cmd(self, cmd, output=False, output_type='json'):
-        '''Base command for repoquery '''
-        cmds = ['/usr/bin/repoquery', '--plugins', '--quiet']
-
-        cmds.extend(cmd)
-
-        rval = {}
-        results = ''
-        err = None
-
-        if self.verbose:
-            print(' '.join(cmds))
-
-        returncode, stdout, stderr = _run(cmds)
-
-        rval = {
-            "returncode": returncode,
-            "results": results,
-            "cmd": ' '.join(cmds),
-        }
-
-        if returncode == 0:
-            if output:
-                if output_type == 'raw':
-                    rval['results'] = stdout
-
-            if self.verbose:
-                print(stdout)
-                print(stderr)
-
-            if err:
-                rval.update({
-                    "err": err,
-                    "stderr": stderr,
-                    "stdout": stdout,
-                    "cmd": cmds
-                })
-
-        else:
-            rval.update({
-                "stderr": stderr,
-                "stdout": stdout,
-                "results": {},
-            })
-
-        return rval
-
-# -*- -*- -*- End included fragment: lib/repoquery.py -*- -*- -*-
-
-# -*- -*- -*- Begin included fragment: class/repoquery.py -*- -*- -*-
-
-
-class Repoquery(RepoqueryCLI):
-    ''' Class to wrap the repoquery
-    '''
-    # pylint: disable=too-many-arguments,too-many-instance-attributes
-    def __init__(self, name, query_type, show_duplicates,
-                 match_version, ignore_excluders, verbose):
-        ''' Constructor for YumList '''
-        super(Repoquery, self).__init__(None)
-        self.name = name
-        self.query_type = query_type
-        self.show_duplicates = show_duplicates
-        self.match_version = match_version
-        self.ignore_excluders = ignore_excluders
-        self.verbose = verbose
-
-        if self.match_version:
-            self.show_duplicates = True
-
-        self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
-
-        self.tmp_file = None
-
-    def build_cmd(self):
-        ''' build the repoquery cmd options '''
-
-        repo_cmd = []
-
-        repo_cmd.append("--pkgnarrow=" + self.query_type)
-        repo_cmd.append("--queryformat=" + self.query_format)
-
-        if self.show_duplicates:
-            repo_cmd.append('--show-duplicates')
-
-        if self.ignore_excluders:
-            repo_cmd.append('--config=' + self.tmp_file.name)
-
-        repo_cmd.append(self.name)
-
-        return repo_cmd
-
-    @staticmethod
-    def process_versions(query_output):
-        ''' format the package data into something that can be presented '''
-
-        version_dict = defaultdict(dict)
-
-        for version in query_output.decode().split('\n'):
-            pkg_info = version.split("|")
-
-            pkg_version = {}
-            pkg_version['version'] = pkg_info[0]
-            pkg_version['release'] = pkg_info[1]
-            pkg_version['arch'] = pkg_info[2]
-            pkg_version['repo'] = pkg_info[3]
-            pkg_version['version_release'] = pkg_info[4]
-
-            version_dict[pkg_info[4]] = pkg_version
-
-        return version_dict
-
-    def format_versions(self, formatted_versions):
-        ''' Gather and present the versions of each package '''
-
-        versions_dict = {}
-        versions_dict['available_versions_full'] = list(formatted_versions.keys())
-
-        # set the match version, if called
-        if self.match_version:
-            versions_dict['matched_versions_full'] = []
-            versions_dict['requested_match_version'] = self.match_version
-            versions_dict['matched_versions'] = []
-
-        # get the "full version (version - release)
-        versions_dict['available_versions_full'].sort(key=LooseVersion)
-        versions_dict['latest_full'] = versions_dict['available_versions_full'][-1]
-
-        # get the "short version (version)
-        versions_dict['available_versions'] = []
-        for version in versions_dict['available_versions_full']:
-            versions_dict['available_versions'].append(formatted_versions[version]['version'])
-
-            if self.match_version:
-                if version.startswith(self.match_version):
-                    versions_dict['matched_versions_full'].append(version)
-                    versions_dict['matched_versions'].append(formatted_versions[version]['version'])
-
-        versions_dict['available_versions'].sort(key=LooseVersion)
-        versions_dict['latest'] = versions_dict['available_versions'][-1]
-
-        # finish up the matched version
-        if self.match_version:
-            if versions_dict['matched_versions_full']:
-                versions_dict['matched_version_found'] = True
-                versions_dict['matched_versions'].sort(key=LooseVersion)
-                versions_dict['matched_version_latest'] = versions_dict['matched_versions'][-1]
-                versions_dict['matched_version_full_latest'] = versions_dict['matched_versions_full'][-1]
-            else:
-                versions_dict['matched_version_found'] = False
-                versions_dict['matched_versions'] = []
-                versions_dict['matched_version_latest'] = ""
-                versions_dict['matched_version_full_latest'] = ""
-
-        return versions_dict
-
-    def repoquery(self):
-        '''perform a repoquery '''
-
-        if self.ignore_excluders:
-            # Duplicate yum.conf and reset exclude= line to an empty string
-            # to clear a list of all excluded packages
-            self.tmp_file = tempfile.NamedTemporaryFile()
-
-            with open("/etc/yum.conf", "r") as file_handler:
-                yum_conf_lines = file_handler.readlines()
-
-            yum_conf_lines = [l for l in yum_conf_lines if not l.startswith("exclude=")]
-
-            with open(self.tmp_file.name, "w") as file_handler:
-                file_handler.writelines(yum_conf_lines)
-                file_handler.flush()
-
-        repoquery_cmd = self.build_cmd()
-
-        rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
-
-        # check to see if there are actual results
-        rval['package_name'] = self.name
-        if rval['results']:
-            processed_versions = Repoquery.process_versions(rval['results'].strip())
-            formatted_versions = self.format_versions(processed_versions)
-
-            rval['package_found'] = True
-            rval['versions'] = formatted_versions
-
-            if self.verbose:
-                rval['raw_versions'] = processed_versions
-            else:
-                del rval['results']
-
-        # No packages found
-        else:
-            rval['package_found'] = False
-
-        if self.ignore_excluders:
-            self.tmp_file.close()
-
-        return rval
-
-    @staticmethod
-    def run_ansible(params, check_mode):
-        '''run the ansible idempotent code'''
-
-        repoquery = Repoquery(
-            params['name'],
-            params['query_type'],
-            params['show_duplicates'],
-            params['match_version'],
-            params['ignore_excluders'],
-            params['verbose'],
-        )
-
-        state = params['state']
-
-        if state == 'list':
-            results = repoquery.repoquery()
-
-            if results['returncode'] != 0:
-                return {'failed': True,
-                        'msg': results}
-
-            return {'changed': False, 'results': results, 'state': 'list', 'check_mode': check_mode}
-
-        return {'failed': True,
-                'changed': False,
-                'msg': 'Unknown state passed. %s' % state,
-                'state': 'unknown'}
-
-# -*- -*- -*- End included fragment: class/repoquery.py -*- -*- -*-
-
-# -*- -*- -*- Begin included fragment: ansible/repoquery.py -*- -*- -*-
-
-
-def main():
-    '''
-    ansible repoquery module
-    '''
-    module = AnsibleModule(
-        argument_spec=dict(
-            state=dict(default='list', type='str', choices=['list']),
-            name=dict(default=None, required=True, type='str'),
-            query_type=dict(default='repos', required=False, type='str',
-                            choices=[
-                                'installed', 'available', 'recent',
-                                'updates', 'extras', 'all', 'repos'
-                            ]),
-            verbose=dict(default=False, required=False, type='bool'),
-            show_duplicates=dict(default=False, required=False, type='bool'),
-            match_version=dict(default=None, required=False, type='str'),
-            ignore_excluders=dict(default=False, required=False, type='bool'),
-            retries=dict(default=4, required=False, type='int'),
-            retry_interval=dict(default=5, required=False, type='int'),
-        ),
-        supports_check_mode=False,
-        required_if=[('show_duplicates', True, ['name'])],
-    )
-
-    tries = 1
-    while True:
-        rval = Repoquery.run_ansible(module.params, module.check_mode)
-        if 'failed' not in rval:
-            module.exit_json(**rval)
-        elif tries > module.params['retries']:
-            module.fail_json(**rval)
-        tries += 1
-        time.sleep(module.params['retry_interval'])
-
-
-if __name__ == "__main__":
-    main()
-
-# -*- -*- -*- End included fragment: ansible/repoquery.py -*- -*- -*-

+ 0 - 93
roles/lib_utils/lookup_plugins/openshift_master_facts_default_predicates.py

@@ -1,93 +0,0 @@
-# pylint: disable=missing-docstring
-
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-
-
-class LookupModule(LookupBase):
-    # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
-
-    def run(self, terms, variables=None, regions_enabled=True, short_version=None,
-            **kwargs):
-
-        predicates = []
-
-        if short_version is None:
-            if 'openshift_release' in variables:
-                release = variables['openshift_release']
-                if release.startswith('v'):
-                    short_version = release[1:]
-                else:
-                    short_version = release
-                short_version = '.'.join(short_version.split('.')[0:2])
-            elif 'openshift_version' in variables:
-                version = variables['openshift_version']
-                short_version = '.'.join(version.split('.')[0:2])
-            else:
-                # pylint: disable=line-too-long
-                raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
-
-        if short_version not in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '4.0', '4.1', '4.2', 'latest']:
-            raise AnsibleError("Unknown short_version %s" % short_version)
-
-        if short_version == 'latest':
-            short_version = '4.0'
-
-        # Predicates ordered according to OpenShift Origin source:
-        # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
-
-        if short_version in ['3.6']:
-            predicates.extend([
-                {'name': 'NoVolumeZoneConflict'},
-                {'name': 'MaxEBSVolumeCount'},
-                {'name': 'MaxGCEPDVolumeCount'},
-                {'name': 'MatchInterPodAffinity'},
-                {'name': 'NoDiskConflict'},
-                {'name': 'GeneralPredicates'},
-                {'name': 'PodToleratesNodeTaints'},
-                {'name': 'CheckNodeMemoryPressure'},
-                {'name': 'CheckNodeDiskPressure'},
-            ])
-
-        if short_version in ['3.7', '3.8']:
-            predicates.extend([
-                {'name': 'NoVolumeZoneConflict'},
-                {'name': 'MaxEBSVolumeCount'},
-                {'name': 'MaxGCEPDVolumeCount'},
-                {'name': 'MaxAzureDiskVolumeCount'},
-                {'name': 'MatchInterPodAffinity'},
-                {'name': 'NoDiskConflict'},
-                {'name': 'GeneralPredicates'},
-                {'name': 'PodToleratesNodeTaints'},
-                {'name': 'CheckNodeMemoryPressure'},
-                {'name': 'CheckNodeDiskPressure'},
-                {'name': 'NoVolumeNodeConflict'},
-            ])
-
-        if short_version in ['3.9', '3.10', '3.11', '4.0', '4.1', '4.2']:
-            predicates.extend([
-                {'name': 'NoVolumeZoneConflict'},
-                {'name': 'MaxEBSVolumeCount'},
-                {'name': 'MaxGCEPDVolumeCount'},
-                {'name': 'MaxAzureDiskVolumeCount'},
-                {'name': 'MatchInterPodAffinity'},
-                {'name': 'NoDiskConflict'},
-                {'name': 'GeneralPredicates'},
-                {'name': 'PodToleratesNodeTaints'},
-                {'name': 'CheckNodeMemoryPressure'},
-                {'name': 'CheckNodeDiskPressure'},
-                {'name': 'CheckVolumeBinding'},
-            ])
-
-        if regions_enabled:
-            region_predicate = {
-                'name': 'Region',
-                'argument': {
-                    'serviceAffinity': {
-                        'labels': ['region']
-                    }
-                }
-            }
-            predicates.append(region_predicate)
-
-        return predicates

+ 0 - 59
roles/lib_utils/lookup_plugins/openshift_master_facts_default_priorities.py

@@ -1,59 +0,0 @@
-# pylint: disable=missing-docstring
-
-from ansible.errors import AnsibleError
-from ansible.plugins.lookup import LookupBase
-
-
-class LookupModule(LookupBase):
-    # pylint: disable=too-many-branches,too-many-statements,too-many-arguments
-
-    def run(self, terms, variables=None, zones_enabled=True, short_version=None,
-            **kwargs):
-
-        priorities = []
-
-        if short_version is None:
-            if 'openshift_release' in variables:
-                release = variables['openshift_release']
-                if release.startswith('v'):
-                    short_version = release[1:]
-                else:
-                    short_version = release
-                short_version = '.'.join(short_version.split('.')[0:2])
-            elif 'openshift_version' in variables:
-                version = variables['openshift_version']
-                short_version = '.'.join(version.split('.')[0:2])
-            else:
-                # pylint: disable=line-too-long
-                raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
-
-        if short_version not in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '4.0', '4.1', '4.2', 'latest']:
-            raise AnsibleError("Unknown short_version %s" % short_version)
-
-        if short_version == 'latest':
-            short_version = '4.0'
-
-        if short_version in ['3.6', '3.7', '3.8', '3.9', '3.10', '3.11', '4.0', '4.1', '4.2']:
-            priorities.extend([
-                {'name': 'SelectorSpreadPriority', 'weight': 1},
-                {'name': 'InterPodAffinityPriority', 'weight': 1},
-                {'name': 'LeastRequestedPriority', 'weight': 1},
-                {'name': 'BalancedResourceAllocation', 'weight': 1},
-                {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
-                {'name': 'NodeAffinityPriority', 'weight': 1},
-                {'name': 'TaintTolerationPriority', 'weight': 1}
-            ])
-
-        if zones_enabled:
-            zone_priority = {
-                'name': 'Zone',
-                'argument': {
-                    'serviceAntiAffinity': {
-                        'label': 'zone'
-                    }
-                },
-                'weight': 2
-            }
-            priorities.append(zone_priority)
-
-        return priorities

+ 0 - 41
roles/lib_utils/src/ansible/repoquery.py

@@ -1,41 +0,0 @@
-# pylint: skip-file
-# flake8: noqa
-
-
-def main():
-    '''
-    ansible repoquery module
-    '''
-    module = AnsibleModule(
-        argument_spec=dict(
-            state=dict(default='list', type='str', choices=['list']),
-            name=dict(default=None, required=True, type='str'),
-            query_type=dict(default='repos', required=False, type='str',
-                            choices=[
-                                'installed', 'available', 'recent',
-                                'updates', 'extras', 'all', 'repos'
-                            ]),
-            verbose=dict(default=False, required=False, type='bool'),
-            show_duplicates=dict(default=False, required=False, type='bool'),
-            match_version=dict(default=None, required=False, type='str'),
-            ignore_excluders=dict(default=False, required=False, type='bool'),
-            retries=dict(default=4, required=False, type='int'),
-            retry_interval=dict(default=5, required=False, type='int'),
-        ),
-        supports_check_mode=False,
-        required_if=[('show_duplicates', True, ['name'])],
-    )
-
-    tries = 1
-    while True:
-        rval = Repoquery.run_ansible(module.params, module.check_mode)
-        if 'failed' not in rval:
-            module.exit_json(**rval)
-        elif tries > module.params['retries']:
-            module.fail_json(**rval)
-        tries += 1
-        time.sleep(module.params['retry_interval'])
-
-
-if __name__ == "__main__":
-    main()

+ 0 - 180
roles/lib_utils/src/class/repoquery.py

@@ -1,180 +0,0 @@
-# pylint: skip-file
-# flake8: noqa
-
-
-class Repoquery(RepoqueryCLI):
-    ''' Class to wrap the repoquery
-    '''
-    # pylint: disable=too-many-arguments,too-many-instance-attributes
-    def __init__(self, name, query_type, show_duplicates,
-                 match_version, ignore_excluders, verbose):
-        ''' Constructor for YumList '''
-        super(Repoquery, self).__init__(None)
-        self.name = name
-        self.query_type = query_type
-        self.show_duplicates = show_duplicates
-        self.match_version = match_version
-        self.ignore_excluders = ignore_excluders
-        self.verbose = verbose
-
-        if self.match_version:
-            self.show_duplicates = True
-
-        self.query_format = "%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}"
-
-        self.tmp_file = None
-
-    def build_cmd(self):
-        ''' build the repoquery cmd options '''
-
-        repo_cmd = []
-
-        repo_cmd.append("--pkgnarrow=" + self.query_type)
-        repo_cmd.append("--queryformat=" + self.query_format)
-
-        if self.show_duplicates:
-            repo_cmd.append('--show-duplicates')
-
-        if self.ignore_excluders:
-            repo_cmd.append('--config=' + self.tmp_file.name)
-
-        repo_cmd.append(self.name)
-
-        return repo_cmd
-
-    @staticmethod
-    def process_versions(query_output):
-        ''' format the package data into something that can be presented '''
-
-        version_dict = defaultdict(dict)
-
-        for version in query_output.decode().split('\n'):
-            pkg_info = version.split("|")
-
-            pkg_version = {}
-            pkg_version['version'] = pkg_info[0]
-            pkg_version['release'] = pkg_info[1]
-            pkg_version['arch'] = pkg_info[2]
-            pkg_version['repo'] = pkg_info[3]
-            pkg_version['version_release'] = pkg_info[4]
-
-            version_dict[pkg_info[4]] = pkg_version
-
-        return version_dict
-
-    def format_versions(self, formatted_versions):
-        ''' Gather and present the versions of each package '''
-
-        versions_dict = {}
-        versions_dict['available_versions_full'] = list(formatted_versions.keys())
-
-        # set the match version, if called
-        if self.match_version:
-            versions_dict['matched_versions_full'] = []
-            versions_dict['requested_match_version'] = self.match_version
-            versions_dict['matched_versions'] = []
-
-        # get the "full version (version - release)
-        versions_dict['available_versions_full'].sort(key=LooseVersion)
-        versions_dict['latest_full'] = versions_dict['available_versions_full'][-1]
-
-        # get the "short version (version)
-        versions_dict['available_versions'] = []
-        for version in versions_dict['available_versions_full']:
-            versions_dict['available_versions'].append(formatted_versions[version]['version'])
-
-            if self.match_version:
-                if version.startswith(self.match_version):
-                    versions_dict['matched_versions_full'].append(version)
-                    versions_dict['matched_versions'].append(formatted_versions[version]['version'])
-
-        versions_dict['available_versions'].sort(key=LooseVersion)
-        versions_dict['latest'] = versions_dict['available_versions'][-1]
-
-        # finish up the matched version
-        if self.match_version:
-            if versions_dict['matched_versions_full']:
-                versions_dict['matched_version_found'] = True
-                versions_dict['matched_versions'].sort(key=LooseVersion)
-                versions_dict['matched_version_latest'] = versions_dict['matched_versions'][-1]
-                versions_dict['matched_version_full_latest'] = versions_dict['matched_versions_full'][-1]
-            else:
-                versions_dict['matched_version_found'] = False
-                versions_dict['matched_versions'] = []
-                versions_dict['matched_version_latest'] = ""
-                versions_dict['matched_version_full_latest'] = ""
-
-        return versions_dict
-
-    def repoquery(self):
-        '''perform a repoquery '''
-
-        if self.ignore_excluders:
-            # Duplicate yum.conf and reset exclude= line to an empty string
-            # to clear a list of all excluded packages
-            self.tmp_file = tempfile.NamedTemporaryFile()
-
-            with open("/etc/yum.conf", "r") as file_handler:
-                yum_conf_lines = file_handler.readlines()
-
-            yum_conf_lines = [l for l in yum_conf_lines if not l.startswith("exclude=")]
-
-            with open(self.tmp_file.name, "w") as file_handler:
-                file_handler.writelines(yum_conf_lines)
-                file_handler.flush()
-
-        repoquery_cmd = self.build_cmd()
-
-        rval = self._repoquery_cmd(repoquery_cmd, True, 'raw')
-
-        # check to see if there are actual results
-        rval['package_name'] = self.name
-        if rval['results']:
-            processed_versions = Repoquery.process_versions(rval['results'].strip())
-            formatted_versions = self.format_versions(processed_versions)
-
-            rval['package_found'] = True
-            rval['versions'] = formatted_versions
-
-            if self.verbose:
-                rval['raw_versions'] = processed_versions
-            else:
-                del rval['results']
-
-        # No packages found
-        else:
-            rval['package_found'] = False
-
-        if self.ignore_excluders:
-            self.tmp_file.close()
-
-        return rval
-
-    @staticmethod
-    def run_ansible(params, check_mode):
-        '''run the ansible idempotent code'''
-
-        repoquery = Repoquery(
-            params['name'],
-            params['query_type'],
-            params['show_duplicates'],
-            params['match_version'],
-            params['ignore_excluders'],
-            params['verbose'],
-        )
-
-        state = params['state']
-
-        if state == 'list':
-            results = repoquery.repoquery()
-
-            if results['returncode'] != 0:
-                return {'failed': True,
-                        'msg': results}
-
-            return {'changed': False, 'results': results, 'state': 'list', 'check_mode': check_mode}
-
-        return {'failed': True,
-                'changed': False,
-                'msg': 'Unknown state passed. %s' % state,
-                'state': 'unknown'}

+ 0 - 275
roles/lib_utils/src/doc/repoquery

@@ -1,275 +0,0 @@
-# flake8: noqa
-# pylint: skip-file
-
-DOCUMENTATION = '''
----
-module: repoquery
-short_description: Query package information from Yum repositories
-description:
-  - Query package information from Yum repositories.
-options:
-  state:
-    description:
-    - The expected state. Currently only supports list.
-    required: false
-    default: list
-    choices: ["list"]
-    aliases: []
-  name:
-    description:
-    - The name of the package to query
-    required: true
-    default: None
-    aliases: []
-  query_type:
-    description:
-    - Narrows the packages queried based off of this value.
-    - If repos, it narrows the query to repositories defined on the machine.
-    - If installed, it narrows the query to only packages installed on the machine.
-    - If available, it narrows the query to packages that are available to be installed.
-    - If recent, it narrows the query to only recently edited packages.
-    - If updates, it narrows the query to only packages that are updates to existing installed packages.
-    - If extras, it narrows the query to packages that are not present in any of the available repositories.
-    - If all, it queries all of the above.
-    required: false
-    default: repos
-    aliases: []
-  verbose:
-    description:
-    - Shows more detail for the requested query.
-    required: false
-    default: false
-    aliases: []
-  show_duplicates:
-    description:
-    - Shows multiple versions of a package.
-    required: false
-    default: false
-    aliases: []
-  match_version:
-    description:
-    - Match the specific version given to the package.
-    required: false
-    default: None
-    aliases: []
-author:
-- "Matt Woodson <mwoodson@redhat.com>"
-extends_documentation_fragment: []
-'''
-
-EXAMPLES = '''
-# Example 1: Get bash versions
-  - name: Get bash version
-    repoquery:
-      name: bash
-      show_duplicates: True
-    register: bash_out
-
-# Results:
-#    ok: [localhost] => {
-#        "bash_out": {
-#            "changed": false,
-#            "results": {
-#                "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
-#                "package_found": true,
-#                "package_name": "bash",
-#                "returncode": 0,
-#                "versions": {
-#                    "available_versions": [
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46"
-#                    ],
-#                    "available_versions_full": [
-#                        "4.2.45-5.el7",
-#                        "4.2.45-5.el7_0.2",
-#                        "4.2.45-5.el7_0.4",
-#                        "4.2.46-12.el7",
-#                        "4.2.46-19.el7",
-#                        "4.2.46-20.el7_2",
-#                        "4.2.46-21.el7_3"
-#                    ],
-#                    "latest": "4.2.46",
-#                    "latest_full": "4.2.46-21.el7_3"
-#                }
-#            },
-#            "state": "present"
-#        }
-#    }
-
-
-
-# Example 2: Get bash versions verbosely
-  - name: Get bash versions verbosely
-    repoquery:
-      name: bash
-      show_duplicates: True
-      verbose: True
-    register: bash_out
-
-# Results:
-#    ok: [localhost] => {
-#        "bash_out": {
-#            "changed": false,
-#            "results": {
-#                "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates bash",
-#                "package_found": true,
-#                "package_name": "bash",
-#                "raw_versions": {
-#                    "4.2.45-5.el7": {
-#                        "arch": "x86_64",
-#                        "release": "5.el7",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.45",
-#                        "version_release": "4.2.45-5.el7"
-#                    },
-#                    "4.2.45-5.el7_0.2": {
-#                        "arch": "x86_64",
-#                        "release": "5.el7_0.2",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.45",
-#                        "version_release": "4.2.45-5.el7_0.2"
-#                    },
-#                    "4.2.45-5.el7_0.4": {
-#                        "arch": "x86_64",
-#                        "release": "5.el7_0.4",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.45",
-#                        "version_release": "4.2.45-5.el7_0.4"
-#                    },
-#                    "4.2.46-12.el7": {
-#                        "arch": "x86_64",
-#                        "release": "12.el7",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-12.el7"
-#                    },
-#                    "4.2.46-19.el7": {
-#                        "arch": "x86_64",
-#                        "release": "19.el7",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-19.el7"
-#                    },
-#                    "4.2.46-20.el7_2": {
-#                        "arch": "x86_64",
-#                        "release": "20.el7_2",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-20.el7_2"
-#                    },
-#                    "4.2.46-21.el7_3": {
-#                        "arch": "x86_64",
-#                        "release": "21.el7_3",
-#                        "repo": "rhel-7-server-rpms",
-#                        "version": "4.2.46",
-#                        "version_release": "4.2.46-21.el7_3"
-#                    }
-#                },
-#                "results": "4.2.45|5.el7|x86_64|rhel-7-server-rpms|4.2.45-5.el7\n4.2.45|5.el7_0.2|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.2\n4.2.45|5.el7_0.4|x86_64|rhel-7-server-rpms|4.2.45-5.el7_0.4\n4.2.46|12.el7|x86_64|rhel-7-server-rpms|4.2.46-12.el7\n4.2.46|19.el7|x86_64|rhel-7-server-rpms|4.2.46-19.el7\n4.2.46|20.el7_2|x86_64|rhel-7-server-rpms|4.2.46-20.el7_2\n4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3\n",
-#                "returncode": 0,
-#                "versions": {
-#                    "available_versions": [
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.45",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46",
-#                        "4.2.46"
-#                    ],
-#                    "available_versions_full": [
-#                        "4.2.45-5.el7",
-#                        "4.2.45-5.el7_0.2",
-#                        "4.2.45-5.el7_0.4",
-#                        "4.2.46-12.el7",
-#                        "4.2.46-19.el7",
-#                        "4.2.46-20.el7_2",
-#                        "4.2.46-21.el7_3"
-#                    ],
-#                    "latest": "4.2.46",
-#                    "latest_full": "4.2.46-21.el7_3"
-#                }
-#            },
-#            "state": "present"
-#        }
-#    }
-
-# Example 3: Match a specific version
-  - name: matched versions repoquery test
-    repoquery:
-      name: atomic-openshift
-      show_duplicates: True
-      match_version: 3.3
-    register: openshift_out
-
-# Result:
-
-#    ok: [localhost] => {
-#        "openshift_out": {
-#            "changed": false,
-#            "results": {
-#                "cmd": "/usr/bin/repoquery --quiet --pkgnarrow=repos --queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release} --show-duplicates atomic-openshift",
-#                "package_found": true,
-#                "package_name": "atomic-openshift",
-#                "returncode": 0,
-#                "versions": {
-#                    "available_versions": [
-#                        "3.2.0.43",
-#                        "3.2.1.23",
-#                        "3.3.0.32",
-#                        "3.3.0.34",
-#                        "3.3.0.35",
-#                        "3.3.1.3",
-#                        "3.3.1.4",
-#                        "3.3.1.5",
-#                        "3.3.1.7",
-#                        "3.4.0.39"
-#                    ],
-#                    "available_versions_full": [
-#                        "3.2.0.43-1.git.0.672599f.el7",
-#                        "3.2.1.23-1.git.0.88a7a1d.el7",
-#                        "3.3.0.32-1.git.0.37bd7ea.el7",
-#                        "3.3.0.34-1.git.0.83f306f.el7",
-#                        "3.3.0.35-1.git.0.d7bd9b6.el7",
-#                        "3.3.1.3-1.git.0.86dc49a.el7",
-#                        "3.3.1.4-1.git.0.7c8657c.el7",
-#                        "3.3.1.5-1.git.0.62700af.el7",
-#                        "3.3.1.7-1.git.0.0988966.el7",
-#                        "3.4.0.39-1.git.0.5f32f06.el7"
-#                    ],
-#                    "latest": "3.4.0.39",
-#                    "latest_full": "3.4.0.39-1.git.0.5f32f06.el7",
-#                    "matched_version_found": true,
-#                    "matched_version_full_latest": "3.3.1.7-1.git.0.0988966.el7",
-#                    "matched_version_latest": "3.3.1.7",
-#                    "matched_versions": [
-#                        "3.3.0.32",
-#                        "3.3.0.34",
-#                        "3.3.0.35",
-#                        "3.3.1.3",
-#                        "3.3.1.4",
-#                        "3.3.1.5",
-#                        "3.3.1.7"
-#                    ],
-#                    "matched_versions_full": [
-#                        "3.3.0.32-1.git.0.37bd7ea.el7",
-#                        "3.3.0.34-1.git.0.83f306f.el7",
-#                        "3.3.0.35-1.git.0.d7bd9b6.el7",
-#                        "3.3.1.3-1.git.0.86dc49a.el7",
-#                        "3.3.1.4-1.git.0.7c8657c.el7",
-#                        "3.3.1.5-1.git.0.62700af.el7",
-#                        "3.3.1.7-1.git.0.0988966.el7"
-#                    ],
-#                    "requested_match_version": "3.3"
-#                }
-#            },
-#            "state": "present"
-#        }
-#    }
-
-'''

+ 0 - 92
roles/lib_utils/src/lib/repoquery.py

@@ -1,92 +0,0 @@
-# pylint: skip-file
-# flake8: noqa
-
-'''
-   class that wraps the repoquery commands in a subprocess
-'''
-
-# pylint: disable=too-many-lines,wrong-import-position,wrong-import-order
-
-from collections import defaultdict  # noqa: E402
-
-
-# pylint: disable=no-name-in-module,import-error
-# Reason: pylint errors with "No name 'version' in module 'distutils'".
-#         This is a bug: https://github.com/PyCQA/pylint/issues/73
-from distutils.version import LooseVersion  # noqa: E402
-
-import subprocess  # noqa: E402
-
-
-class RepoqueryCLIError(Exception):
-    '''Exception class for repoquerycli'''
-    pass
-
-
-def _run(cmds):
-    ''' Actually executes the command. This makes mocking easier. '''
-    proc = subprocess.Popen(cmds,
-                            stdin=subprocess.PIPE,
-                            stdout=subprocess.PIPE,
-                            stderr=subprocess.PIPE)
-
-    stdout, stderr = proc.communicate()
-
-    return proc.returncode, stdout, stderr
-
-
-# pylint: disable=too-few-public-methods
-class RepoqueryCLI(object):
-    ''' Class to wrap the command line tools '''
-    def __init__(self,
-                 verbose=False):
-        ''' Constructor for RepoqueryCLI '''
-        self.verbose = verbose
-        self.verbose = True
-
-    def _repoquery_cmd(self, cmd, output=False, output_type='json'):
-        '''Base command for repoquery '''
-        cmds = ['/usr/bin/repoquery', '--plugins', '--quiet']
-
-        cmds.extend(cmd)
-
-        rval = {}
-        results = ''
-        err = None
-
-        if self.verbose:
-            print(' '.join(cmds))
-
-        returncode, stdout, stderr = _run(cmds)
-
-        rval = {
-            "returncode": returncode,
-            "results": results,
-            "cmd": ' '.join(cmds),
-        }
-
-        if returncode == 0:
-            if output:
-                if output_type == 'raw':
-                    rval['results'] = stdout
-
-            if self.verbose:
-                print(stdout)
-                print(stderr)
-
-            if err:
-                rval.update({
-                    "err": err,
-                    "stderr": stderr,
-                    "stdout": stdout,
-                    "cmd": cmds
-                })
-
-        else:
-            rval.update({
-                "stderr": stderr,
-                "stdout": stdout,
-                "results": {},
-            })
-
-        return rval

+ 0 - 9
roles/lib_utils/src/sources.yml

@@ -6,12 +6,3 @@ yedit.py:
 - doc/yedit
 - doc/yedit
 - class/yedit.py
 - class/yedit.py
 - ansible/yedit.py
 - ansible/yedit.py
-
-repoquery.py:
-- doc/generated
-- doc/license
-- lib/import.py
-- doc/repoquery
-- lib/repoquery.py
-- class/repoquery.py
-- ansible/repoquery.py

+ 0 - 136
roles/lib_utils/src/test/integration/repoquery.yml

@@ -1,136 +0,0 @@
-#!/usr/bin/ansible-playbook --module-path=../../../library/
----
-- hosts: localhost
-  gather_facts: no
-
-  tasks:
-  - name: basic query test - Act
-    repoquery:
-      name: bash
-    register: rq_out
-
-  - name: Set a real package version to be used later
-    set_fact:
-      latest_available_bash_version: "{{ rq_out.results.versions.latest }}"
-      latest_available_full_bash_version: "{{ rq_out.results.versions.latest_full }}"
-
-  - name: basic query test - Assert
-    assert:
-      that:
-      - "rq_out.state == 'list'"
-      - "rq_out.changed == False"
-      - "rq_out.results.returncode == 0"
-      - "rq_out.results.package_found == True"
-      - "rq_out.results.package_name == 'bash'"
-      - "rq_out.results.versions.available_versions | length == 1"
-      - "rq_out.results.versions.available_versions_full | length == 1"
-      - "rq_out.results.versions.latest is defined"
-      - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
-      - "rq_out.results.versions.latest_full is defined"
-      - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
-
-  - name: show_duplicates query test - Act
-    repoquery:
-      name: bash
-      show_duplicates: True
-    register: rq_out
-
-  - name: show_duplicates query test - Assert
-    assert:
-      that:
-      - "rq_out.state == 'list'"
-      - "rq_out.changed == False"
-      - "rq_out.results.returncode == 0"
-      - "rq_out.results.package_found == True"
-      - "rq_out.results.package_name == 'bash'"
-      - "rq_out.results.versions.available_versions | length >= 1"
-      - "rq_out.results.versions.available_versions_full | length >= 1"
-      - "rq_out.results.versions.latest is defined"
-      - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
-      - "rq_out.results.versions.latest_full is defined"
-      - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
-
-  - name: show_duplicates verbose query test - Act
-    repoquery:
-      name: bash
-      show_duplicates: True
-      verbose: True
-    register: rq_out
-
-  - name: show_duplicates verbose query test - Assert
-    assert:
-      that:
-      - "rq_out.state == 'list'"
-      - "rq_out.changed == False"
-      - "rq_out.results.returncode == 0"
-      - "rq_out.results.package_found == True"
-      - "rq_out.results.package_name == 'bash'"
-      - "rq_out.results.raw_versions | length > 0"
-      - "rq_out.results.versions.available_versions | length > 0"
-      - "rq_out.results.versions.available_versions_full | length > 0"
-      - "rq_out.results.versions.latest is defined"
-      - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
-      - "rq_out.results.versions.latest_full is defined"
-      - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
-
-  - name: query package does not exist query test - Act
-    repoquery:
-      name: somemadeuppackagenamethatwontmatch
-      show_duplicates: True
-    register: rq_out
-
-  - name: query package does not exist query test - Assert
-    assert:
-      that:
-      - "rq_out.state == 'list'"
-      - "rq_out.changed == False"
-      - "rq_out.results.returncode == 0"
-      - "rq_out.results.package_found == False"
-      - "rq_out.results.results == ''"
-
-
-  - name: query match_version does not exist query test - Act
-    repoquery:
-      name: bash
-      show_duplicates: True
-      match_version: somemadeupversionnotexist
-    register: rq_out
-
-  - name: query match_version does not exist query test - Assert
-    assert:
-      that:
-      - "rq_out.state == 'list'"
-      - "rq_out.changed == False"
-      - "rq_out.results.returncode == 0"
-      - "rq_out.results.package_found == True"
-      - "rq_out.results.package_name == 'bash'"
-      - "rq_out.results.versions.matched_version_found == False"
-      - "rq_out.results.versions.available_versions | length > 0"
-      - "rq_out.results.versions.available_versions_full | length > 0"
-      - "rq_out.results.versions.latest is defined"
-      - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
-      - "rq_out.results.versions.latest_full is defined"
-      - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"
-
-  - name: query match_version exists query test - Act
-    repoquery:
-      name: bash
-      show_duplicates: True
-      match_version: "{{ latest_available_bash_version }}"
-    register: rq_out
-
-  - name: query match_version exists query test - Assert
-    assert:
-      that:
-      - "rq_out.state == 'list'"
-      - "rq_out.changed == False"
-      - "rq_out.results.returncode == 0"
-      - "rq_out.results.package_found == True"
-      - "rq_out.results.package_name == 'bash'"
-      - "rq_out.results.versions.matched_version_found == True"
-      - "rq_out.results.versions.available_versions | length > 0"
-      - "rq_out.results.versions.available_versions_full | length > 0"
-      - "rq_out.results.versions.latest is defined"
-      - "rq_out.results.versions.latest in rq_out.results.versions.available_versions"
-      - "rq_out.results.versions.latest_full is defined"
-      - "rq_out.results.versions.latest_full in rq_out.results.versions.available_versions_full"

+ 0 - 68
roles/lib_utils/src/test/unit/test_repoquery.py

@@ -1,68 +0,0 @@
-'''
- Unit tests for repoquery
-'''
-
-import os
-import sys
-import unittest
-import mock
-
-# Removing invalid variable names for tests so that I can
-# keep them brief
-# pylint: disable=invalid-name,no-name-in-module
-# Disable import-error b/c our libraries aren't loaded in jenkins
-# pylint: disable=import-error,wrong-import-position
-# place class in our python path
-module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library')  # noqa: E501
-sys.path.insert(0, module_path)
-from repoquery import Repoquery  # noqa: E402
-
-
-class RepoQueryTest(unittest.TestCase):
-    '''
-     Test class for RepoQuery
-    '''
-
-    @mock.patch('repoquery._run')
-    def test_querying_a_package(self, mock_cmd):
-        ''' Testing querying a package '''
-
-        # Arrange
-
-        # run_ansible input parameters
-        params = {
-            'state': 'list',
-            'name': 'bash',
-            'query_type': 'repos',
-            'verbose': False,
-            'show_duplicates': False,
-            'match_version': None,
-            'ignore_excluders': False,
-        }
-
-        valid_stderr = '''Repo rhel-7-server-extras-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/3268107132875399464-key.pem
-        Repo rhel-7-server-rpms forced skip_if_unavailable=True due to: /etc/pki/entitlement/4128505182875899164-key.pem'''  # not real
-
-        # Return values of our mocked function call. These get returned once per call.
-        mock_cmd.side_effect = [
-            (0, b'4.2.46|21.el7_3|x86_64|rhel-7-server-rpms|4.2.46-21.el7_3', valid_stderr),  # first call to the mock
-        ]
-
-        # Act
-        results = Repoquery.run_ansible(params, False)
-
-        # Assert
-        self.assertEqual(results['state'], 'list')
-        self.assertFalse(results['changed'])
-        self.assertTrue(results['results']['package_found'])
-        self.assertEqual(results['results']['returncode'], 0)
-        self.assertEqual(results['results']['package_name'], 'bash')
-        self.assertEqual(results['results']['versions'], {'latest_full': '4.2.46-21.el7_3',
-                                                          'available_versions': ['4.2.46'],
-                                                          'available_versions_full': ['4.2.46-21.el7_3'],
-                                                          'latest': '4.2.46'})
-
-        # Making sure our mock was called as we expected
-        mock_cmd.assert_has_calls([
-            mock.call(['/usr/bin/repoquery', '--plugins', '--quiet', '--pkgnarrow=repos', '--queryformat=%{version}|%{release}|%{arch}|%{repo}|%{version}-%{release}', 'bash']),
-        ])

+ 0 - 172
roles/lib_utils/test/conftest.py

@@ -1,172 +0,0 @@
-# pylint: disable=missing-docstring,invalid-name,redefined-outer-name
-import os
-import pytest
-import sys
-
-from OpenSSL import crypto
-
-sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
-
-from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule  # noqa: E402
-from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule  # noqa: E402
-
-# Parameter list for valid_cert fixture
-VALID_CERTIFICATE_PARAMS = [
-    {
-        'short_name': 'client',
-        'cn': 'client.example.com',
-        'serial': 4,
-        'uses': b'clientAuth',
-        'dns': [],
-        'ip': [],
-    },
-    {
-        'short_name': 'server',
-        'cn': 'server.example.com',
-        'serial': 5,
-        'uses': b'serverAuth',
-        'dns': ['kubernetes', 'openshift'],
-        'ip': ['10.0.0.1', '192.168.0.1']
-    },
-    {
-        'short_name': 'combined',
-        'cn': 'combined.example.com',
-        # Verify that HUGE serials parse correctly.
-        # Frobs PARSING_HEX_SERIAL in _parse_cert
-        # See https://bugzilla.redhat.com/show_bug.cgi?id=1464240
-        'serial': 14449739080294792594019643629255165375,
-        'uses': b'clientAuth, serverAuth',
-        'dns': ['etcd'],
-        'ip': ['10.0.0.2', '192.168.0.2']
-    }
-]
-
-# Extract the short_name from VALID_CERTIFICATE_PARAMS to provide
-# friendly naming for the valid_cert fixture
-VALID_CERTIFICATE_IDS = [param['short_name'] for param in VALID_CERTIFICATE_PARAMS]
-
-
-@pytest.fixture(scope='session')
-def ca(tmpdir_factory):
-    ca_dir = tmpdir_factory.mktemp('ca')
-
-    key = crypto.PKey()
-    key.generate_key(crypto.TYPE_RSA, 2048)
-
-    cert = crypto.X509()
-    cert.set_version(3)
-    cert.set_serial_number(1)
-    cert.get_subject().commonName = 'test-signer'
-    cert.gmtime_adj_notBefore(0)
-    cert.gmtime_adj_notAfter(24 * 60 * 60)
-    cert.set_issuer(cert.get_subject())
-    cert.set_pubkey(key)
-    cert.add_extensions([
-        crypto.X509Extension(b'basicConstraints', True, b'CA:TRUE, pathlen:0'),
-        crypto.X509Extension(b'keyUsage', True,
-                             b'digitalSignature, keyEncipherment, keyCertSign, cRLSign'),
-        crypto.X509Extension(b'subjectKeyIdentifier', False, b'hash', subject=cert)
-    ])
-    cert.add_extensions([
-        crypto.X509Extension(b'authorityKeyIdentifier', False, b'keyid:always', issuer=cert)
-    ])
-    cert.sign(key, 'sha256')
-
-    return {
-        'dir': ca_dir,
-        'key': key,
-        'cert': cert,
-    }
-
-
-@pytest.fixture(scope='session',
-                ids=VALID_CERTIFICATE_IDS,
-                params=VALID_CERTIFICATE_PARAMS)
-def valid_cert(request, ca):
-    common_name = request.param['cn']
-
-    key = crypto.PKey()
-    key.generate_key(crypto.TYPE_RSA, 2048)
-
-    cert = crypto.X509()
-    cert.set_serial_number(request.param['serial'])
-    cert.gmtime_adj_notBefore(0)
-    cert.gmtime_adj_notAfter(24 * 60 * 60)
-    cert.set_issuer(ca['cert'].get_subject())
-    cert.set_pubkey(key)
-    cert.set_version(3)
-    cert.get_subject().commonName = common_name
-    cert.add_extensions([
-        crypto.X509Extension(b'basicConstraints', True, b'CA:FALSE'),
-        crypto.X509Extension(b'keyUsage', True, b'digitalSignature, keyEncipherment'),
-        crypto.X509Extension(b'extendedKeyUsage', False, request.param['uses']),
-    ])
-
-    if request.param['dns'] or request.param['ip']:
-        san_list = ['DNS:{}'.format(common_name)]
-        san_list.extend(['DNS:{}'.format(x) for x in request.param['dns']])
-        san_list.extend(['IP:{}'.format(x) for x in request.param['ip']])
-
-        cert.add_extensions([
-            crypto.X509Extension(b'subjectAltName', False, ', '.join(san_list).encode('utf8'))
-        ])
-    cert.sign(ca['key'], 'sha256')
-
-    cert_contents = crypto.dump_certificate(crypto.FILETYPE_PEM, cert)
-    cert_file = ca['dir'].join('{}.crt'.format(common_name))
-    cert_file.write_binary(cert_contents)
-
-    return {
-        'common_name': common_name,
-        'serial': request.param['serial'],
-        'dns': request.param['dns'],
-        'ip': request.param['ip'],
-        'uses': request.param['uses'],
-        'cert_file': cert_file,
-        'cert': cert
-    }
-
-
-@pytest.fixture()
-def predicates_lookup():
-    return PredicatesLookupModule()
-
-
-@pytest.fixture()
-def priorities_lookup():
-    return PrioritiesLookupModule()
-
-
-@pytest.fixture()
-def facts():
-    return {
-        'openshift': {
-            'common': {}
-        }
-    }
-
-
-@pytest.fixture(params=[True, False])
-def regions_enabled(request):
-    return request.param
-
-
-@pytest.fixture(params=[True, False])
-def zones_enabled(request):
-    return request.param
-
-
-def v_prefix(release):
-    """Prefix a release number with 'v'."""
-    return "v" + release
-
-
-def minor(release):
-    """Add a suffix to release, making 'X.Y' become 'X.Y.Z'."""
-    return release + ".1"
-
-
-@pytest.fixture(params=[str, v_prefix, minor])
-def release_mod(request):
-    """Modifies a release string to alternative valid values."""
-    return request.param

+ 0 - 57
roles/lib_utils/test/openshift_master_facts_bad_input_tests.py

@@ -1,57 +0,0 @@
-import copy
-import os
-import sys
-
-from ansible.errors import AnsibleError
-import pytest
-
-sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
-
-from openshift_master_facts_default_predicates import LookupModule  # noqa: E402
-
-
-class TestOpenShiftMasterFactsBadInput(object):
-    lookup = LookupModule()
-    default_facts = {
-        'openshift': {
-            'common': {}
-        }
-    }
-
-    def test_missing_openshift_facts(self):
-        with pytest.raises(AnsibleError):
-            facts = {}
-            self.lookup.run(None, variables=facts)
-
-    def test_missing_deployment_type(self):
-        with pytest.raises(AnsibleError):
-            facts = copy.deepcopy(self.default_facts)
-            facts['openshift']['common']['short_version'] = '10.10'
-            self.lookup.run(None, variables=facts)
-
-    def test_missing_short_version_and_missing_openshift_release(self):
-        with pytest.raises(AnsibleError):
-            facts = copy.deepcopy(self.default_facts)
-            facts['openshift']['common']['deployment_type'] = 'origin'
-            self.lookup.run(None, variables=facts)
-
-    def test_unknown_deployment_types(self):
-        with pytest.raises(AnsibleError):
-            facts = copy.deepcopy(self.default_facts)
-            facts['openshift']['common']['short_version'] = '1.1'
-            facts['openshift']['common']['deployment_type'] = 'bogus'
-            self.lookup.run(None, variables=facts)
-
-    def test_unknown_origin_version(self):
-        with pytest.raises(AnsibleError):
-            facts = copy.deepcopy(self.default_facts)
-            facts['openshift']['common']['short_version'] = '0.1'
-            facts['openshift']['common']['deployment_type'] = 'origin'
-            self.lookup.run(None, variables=facts)
-
-    def test_unknown_ocp_version(self):
-        with pytest.raises(AnsibleError):
-            facts = copy.deepcopy(self.default_facts)
-            facts['openshift']['common']['short_version'] = '0.1'
-            facts['openshift']['common']['deployment_type'] = 'openshift-enterprise'
-            self.lookup.run(None, variables=facts)

+ 0 - 54
roles/lib_utils/test/openshift_master_facts_conftest.py

@@ -1,54 +0,0 @@
-import os
-import sys
-
-import pytest
-
-sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "lookup_plugins"))
-
-from openshift_master_facts_default_predicates import LookupModule as PredicatesLookupModule  # noqa: E402
-from openshift_master_facts_default_priorities import LookupModule as PrioritiesLookupModule  # noqa: E402
-
-
-@pytest.fixture()
-def predicates_lookup():
-    return PredicatesLookupModule()
-
-
-@pytest.fixture()
-def priorities_lookup():
-    return PrioritiesLookupModule()
-
-
-@pytest.fixture()
-def facts():
-    return {
-        'openshift': {
-            'common': {}
-        }
-    }
-
-
-@pytest.fixture(params=[True, False])
-def regions_enabled(request):
-    return request.param
-
-
-@pytest.fixture(params=[True, False])
-def zones_enabled(request):
-    return request.param
-
-
-def v_prefix(release):
-    """Prefix a release number with 'v'."""
-    return "v" + release
-
-
-def minor(release):
-    """Add a suffix to release, making 'X.Y' become 'X.Y.Z'."""
-    return release + ".1"
-
-
-@pytest.fixture(params=[str, v_prefix, minor])
-def release_mod(request):
-    """Modifies a release string to alternative valid values."""
-    return request.param

+ 0 - 114
roles/lib_utils/test/openshift_master_facts_default_predicates_tests.py

@@ -1,114 +0,0 @@
-import pytest
-
-
-# Predicates ordered according to OpenShift Origin source:
-# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
-
-DEFAULT_PREDICATES_3_6 = [
-    {'name': 'NoVolumeZoneConflict'},
-    {'name': 'MaxEBSVolumeCount'},
-    {'name': 'MaxGCEPDVolumeCount'},
-    {'name': 'MatchInterPodAffinity'},
-    {'name': 'NoDiskConflict'},
-    {'name': 'GeneralPredicates'},
-    {'name': 'PodToleratesNodeTaints'},
-    {'name': 'CheckNodeMemoryPressure'},
-    {'name': 'CheckNodeDiskPressure'},
-]
-
-DEFAULT_PREDICATES_3_7 = [
-    {'name': 'NoVolumeZoneConflict'},
-    {'name': 'MaxEBSVolumeCount'},
-    {'name': 'MaxGCEPDVolumeCount'},
-    {'name': 'MaxAzureDiskVolumeCount'},
-    {'name': 'MatchInterPodAffinity'},
-    {'name': 'NoDiskConflict'},
-    {'name': 'GeneralPredicates'},
-    {'name': 'PodToleratesNodeTaints'},
-    {'name': 'CheckNodeMemoryPressure'},
-    {'name': 'CheckNodeDiskPressure'},
-    {'name': 'NoVolumeNodeConflict'},
-]
-
-DEFAULT_PREDICATES_3_8 = DEFAULT_PREDICATES_3_7
-
-DEFAULT_PREDICATES_3_9 = [
-    {'name': 'NoVolumeZoneConflict'},
-    {'name': 'MaxEBSVolumeCount'},
-    {'name': 'MaxGCEPDVolumeCount'},
-    {'name': 'MaxAzureDiskVolumeCount'},
-    {'name': 'MatchInterPodAffinity'},
-    {'name': 'NoDiskConflict'},
-    {'name': 'GeneralPredicates'},
-    {'name': 'PodToleratesNodeTaints'},
-    {'name': 'CheckNodeMemoryPressure'},
-    {'name': 'CheckNodeDiskPressure'},
-    {'name': 'CheckVolumeBinding'},
-]
-
-DEFAULT_PREDICATES_4_0 = DEFAULT_PREDICATES_3_11 = DEFAULT_PREDICATES_3_10 = DEFAULT_PREDICATES_3_9
-
-REGION_PREDICATE = {
-    'name': 'Region',
-    'argument': {
-        'serviceAffinity': {
-            'labels': ['region']
-        }
-    }
-}
-
-TEST_VARS = [
-    ('3.6', DEFAULT_PREDICATES_3_6),
-    ('3.7', DEFAULT_PREDICATES_3_7),
-    ('3.8', DEFAULT_PREDICATES_3_8),
-    ('3.9', DEFAULT_PREDICATES_3_9),
-    ('3.10', DEFAULT_PREDICATES_3_10),
-    ('3.11', DEFAULT_PREDICATES_3_11),
-    ('4.0', DEFAULT_PREDICATES_4_0),
-]
-
-
-def assert_ok(predicates_lookup, default_predicates, regions_enabled, **kwargs):
-    results = predicates_lookup.run(None, regions_enabled=regions_enabled, **kwargs)
-    if regions_enabled:
-        assert results == default_predicates + [REGION_PREDICATE]
-    else:
-        assert results == default_predicates
-
-
-def test_openshift_version(predicates_lookup, openshift_version_fixture, regions_enabled):
-    facts, default_predicates = openshift_version_fixture
-    assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled)
-
-
-@pytest.fixture(params=TEST_VARS)
-def openshift_version_fixture(request, facts):
-    version, default_predicates = request.param
-    version += '.1'
-    facts['openshift_version'] = version
-    return facts, default_predicates
-
-
-def test_openshift_release(predicates_lookup, openshift_release_fixture, regions_enabled):
-    facts, default_predicates = openshift_release_fixture
-    assert_ok(predicates_lookup, default_predicates, variables=facts, regions_enabled=regions_enabled)
-
-
-@pytest.fixture(params=TEST_VARS)
-def openshift_release_fixture(request, facts, release_mod):
-    release, default_predicates = request.param
-    facts['openshift_release'] = release_mod(release)
-    return facts, default_predicates
-
-
-def test_short_version_kwarg(predicates_lookup, short_version_kwarg_fixture, regions_enabled):
-    facts, short_version, default_predicates = short_version_kwarg_fixture
-    assert_ok(
-        predicates_lookup, default_predicates, variables=facts,
-        regions_enabled=regions_enabled, short_version=short_version)
-
-
-@pytest.fixture(params=TEST_VARS)
-def short_version_kwarg_fixture(request, facts):
-    short_version, default_predicates = request.param
-    return facts, short_version, default_predicates

+ 0 - 80
roles/lib_utils/test/openshift_master_facts_default_priorities_tests.py

@@ -1,80 +0,0 @@
-import pytest
-
-
-DEFAULT_PRIORITIES_3_6 = [
-    {'name': 'SelectorSpreadPriority', 'weight': 1},
-    {'name': 'InterPodAffinityPriority', 'weight': 1},
-    {'name': 'LeastRequestedPriority', 'weight': 1},
-    {'name': 'BalancedResourceAllocation', 'weight': 1},
-    {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
-    {'name': 'NodeAffinityPriority', 'weight': 1},
-    {'name': 'TaintTolerationPriority', 'weight': 1}
-]
-DEFAULT_PRIORITIES_3_8 = DEFAULT_PRIORITIES_3_7 = DEFAULT_PRIORITIES_3_6
-DEFAULT_PRIORITIES_4_0 = DEFAULT_PRIORITIES_3_11 = DEFAULT_PRIORITIES_3_10 = DEFAULT_PRIORITIES_3_9 = DEFAULT_PRIORITIES_3_8
-
-ZONE_PRIORITY = {
-    'name': 'Zone',
-    'argument': {
-        'serviceAntiAffinity': {
-            'label': 'zone'
-        }
-    },
-    'weight': 2
-}
-
-TEST_VARS = [
-    ('3.6', DEFAULT_PRIORITIES_3_6),
-    ('3.7', DEFAULT_PRIORITIES_3_7),
-    ('3.8', DEFAULT_PRIORITIES_3_8),
-    ('3.9', DEFAULT_PRIORITIES_3_9),
-    ('3.10', DEFAULT_PRIORITIES_3_10),
-    ('3.11', DEFAULT_PRIORITIES_3_11),
-    ('4.0', DEFAULT_PRIORITIES_4_0),
-]
-
-
-def assert_ok(priorities_lookup, default_priorities, zones_enabled, **kwargs):
-    results = priorities_lookup.run(None, zones_enabled=zones_enabled, **kwargs)
-    if zones_enabled:
-        assert results == default_priorities + [ZONE_PRIORITY]
-    else:
-        assert results == default_priorities
-
-
-def test_openshift_version(priorities_lookup, openshift_version_fixture, zones_enabled):
-    facts, default_priorities = openshift_version_fixture
-    assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled)
-
-
-@pytest.fixture(params=TEST_VARS)
-def openshift_version_fixture(request, facts):
-    version, default_priorities = request.param
-    version += '.1'
-    facts['openshift_version'] = version
-    return facts, default_priorities
-
-
-def test_openshift_release(priorities_lookup, openshift_release_fixture, zones_enabled):
-    facts, default_priorities = openshift_release_fixture
-    assert_ok(priorities_lookup, default_priorities, variables=facts, zones_enabled=zones_enabled)
-
-
-@pytest.fixture(params=TEST_VARS)
-def openshift_release_fixture(request, facts, release_mod):
-    release, default_priorities = request.param
-    facts['openshift_release'] = release_mod(release)
-    return facts, default_priorities
-
-
-def test_short_version_kwarg(priorities_lookup, short_version_kwarg_fixture, zones_enabled):
-    facts, short_version, default_priorities = short_version_kwarg_fixture
-    assert_ok(
-        priorities_lookup, default_priorities, variables=facts,
-        zones_enabled=zones_enabled, short_version=short_version)
-
-
-@pytest.fixture(params=TEST_VARS)
-def short_version_kwarg_fixture(request, facts):
-    short_version, default_priorities = request.param
-    return facts, short_version, default_priorities

+ 0 - 110
roles/lib_utils/test/sanity_check_test.py

@@ -1,110 +0,0 @@
-import os
-import pytest
-import sys
-
-from ansible.playbook.play_context import PlayContext
-from ansible.template import Templar
-from ansible import errors
-
-sys.path.insert(1, os.path.join(os.path.dirname(__file__), os.pardir, "action_plugins"))
-from sanity_checks import ActionModule  # noqa: E402
-
-
-@pytest.mark.parametrize('hostvars, host, varname, result', [
-    ({"example.com": {"param": 3.11}}, "example.com", "param", 3.11),
-    ({"example.com": {"param": 3.11}}, "example.com", "another_param", None)
-])
-def test_template_var(hostvars, host, varname, result):
-    task = FakeTask('sanity_checks', {'checks': []})
-    plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None)
-    check = plugin.template_var(hostvars, host, varname)
-    assert check == result
-
-
-@pytest.mark.parametrize('hostvars, host, result', [
-    ({"example.com": {"openshift_pkg_version": "-3.6.0"}}, "example.com", None),
-    ({"example.com": {"openshift_pkg_version": "-3.7.0-0.126.0.git.0.9351aae.el7"}}, "example.com", None),
-    ({"example.com": {"openshift_pkg_version": "-3.9.0-2.fc28"}}, "example.com", None),
-    ({"example.com": {"openshift_pkg_version": "-3.11*"}}, "example.com", None),
-    ({"example.com": {"openshift_pkg_version": "-3"}}, "example.com", None),
-])
-def test_valid_check_pkg_version_format(hostvars, host, result):
-    task = FakeTask('sanity_checks', {'checks': []})
-    plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None)
-    check = plugin.check_pkg_version_format(hostvars, host)
-    assert check == result
-
-
-@pytest.mark.parametrize('hostvars, host, result', [
-    ({"example.com": {"openshift_pkg_version": "3.11.0"}}, "example.com", None),
-    ({"example.com": {"openshift_pkg_version": "v3.11.0"}}, "example.com", None),
-])
-def test_invalid_check_pkg_version_format(hostvars, host, result):
-    with pytest.raises(errors.AnsibleModuleError):
-        task = FakeTask('sanity_checks', {'checks': []})
-        plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None)
-        plugin.check_pkg_version_format(hostvars, host)
-
-
-@pytest.mark.parametrize('hostvars, host, result', [
-    ({"example.com": {"openshift_release": "v3"}}, "example.com", None),
-    ({"example.com": {"openshift_release": "v3.11"}}, "example.com", None),
-    ({"example.com": {"openshift_release": "v3.11.0"}}, "example.com", None),
-    ({"example.com": {"openshift_release": "3.11"}}, "example.com", None),
-])
-def test_valid_check_release_format(hostvars, host, result):
-    task = FakeTask('sanity_checks', {'checks': []})
-    plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None)
-    check = plugin.check_release_format(hostvars, host)
-    assert check == result
-
-
-@pytest.mark.parametrize('hostvars, host, result', [
-    ({"example.com": {"openshift_release": "-3.11.0"}}, "example.com", None),
-    ({"example.com": {"openshift_release": "-3.7.0-0.126.0.git.0.9351aae.el7"}}, "example.com", None),
-    ({"example.com": {"openshift_release": "3.1.2.3"}}, "example.com", None),
-])
-def test_invalid_check_release_format(hostvars, host, result):
-    with pytest.raises(errors.AnsibleModuleError):
-        task = FakeTask('sanity_checks', {'checks': []})
-        plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None)
-        plugin.check_release_format(hostvars, host)
-
-
-@pytest.mark.parametrize('hostvars, host, result', [
-    ({"example.com": {"openshift_builddefaults_json": "{}"}}, "example.com", None),
-    ({"example.com": {"openshift_builddefaults_json": '[]'}}, "example.com", None),
-    ({"example.com": {"openshift_builddefaults_json": '{"a": []}'}}, "example.com", None),
-    ({"example.com": {"openshift_builddefaults_json": '{"a": [], "b": "c"}'}}, "example.com", None),
-    ({"example.com": {"openshift_builddefaults_json": '{"a": [], "b": {"c": "d"}}'}}, "example.com", None),
-    ({"example.com": {"openshift_builddefaults_json": '["a", "b", "c"]'}}, "example.com", None),
-    ({"example.com": {"NOT_IN_JSON_FORMAT_VARIABLES": '{"invalid"}'}}, "example.com", None),
-])
-def test_valid_valid_json_format_vars(hostvars, host, result):
-    task = FakeTask('sanity_checks', {'checks': []})
-    plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None)
-    check = plugin.validate_json_format_vars(hostvars, host)
-    assert check == result
-
-
-@pytest.mark.parametrize('hostvars, host, result', [
-    ({"example.com": {"openshift_builddefaults_json": '{"a"}'}}, "example.com", None),
-    ({"example.com": {"openshift_builddefaults_json": '{"a": { '}}, "example.com", None),
-    ({"example.com": {"openshift_builddefaults_json": '{"a": [ }'}}, "example.com", None),
-])
-def test_invalid_valid_json_format_vars(hostvars, host, result):
-    with pytest.raises(errors.AnsibleModuleError):
-        task = FakeTask('sanity_checks', {'checks': []})
-        plugin = ActionModule(task, None, PlayContext(), None, Templar(None, None, None), None)
-        plugin.validate_json_format_vars(hostvars, host)
-
-
-def fake_execute_module(*args):
-    raise AssertionError('this function should not be called')
-
-
-class FakeTask(object):
-    def __init__(self, action, args):
-        self.action = action
-        self.args = args
-        self.async = 0

+ 0 - 90
roles/lib_utils/test/test_fakeopensslclasses.py

@@ -1,90 +0,0 @@
-'''
- Unit tests for the FakeOpenSSL classes
-'''
-import os
-import subprocess
-import sys
-
-import pytest
-
-MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library'))
-sys.path.insert(1, MODULE_PATH)
-
-# pylint: disable=import-error,wrong-import-position,missing-docstring
-# pylint: disable=invalid-name,redefined-outer-name
-from openshift_cert_expiry import FakeOpenSSLCertificate  # noqa: E402
-
-
-@pytest.fixture(scope='module')
-def fake_valid_cert(valid_cert):
-    cmd = ['openssl', 'x509', '-in', str(valid_cert['cert_file']), '-text',
-           '-nameopt', 'oneline']
-    cert = subprocess.check_output(cmd)
-    return FakeOpenSSLCertificate(cert.decode('utf8'))
-
-
-def test_not_after(valid_cert, fake_valid_cert):
-    ''' Validate value returned back from get_notAfter() '''
-    real_cert = valid_cert['cert']
-
-    # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate
-    # is text, so decode the result from pyOpenSSL prior to comparing
-    assert real_cert.get_notAfter().decode('utf8') == fake_valid_cert.get_notAfter()
-
-
-def test_serial(valid_cert, fake_valid_cert):
-    ''' Validate value returned back form get_serialnumber() '''
-    real_cert = valid_cert['cert']
-    assert real_cert.get_serial_number() == fake_valid_cert.get_serial_number()
-
-
-def test_get_subject(valid_cert, fake_valid_cert):
-    ''' Validate the certificate subject '''
-
-    # Gather the subject components and create a list of colon separated strings.
-    # Since the internal representation of pyOpenSSL uses bytes, we need to decode
-    # the results before comparing.
-    c_subjects = valid_cert['cert'].get_subject().get_components()
-    c_subj = ', '.join(['{}:{}'.format(x.decode('utf8'), y.decode('utf8')) for x, y in c_subjects])
-    f_subjects = fake_valid_cert.get_subject().get_components()
-    f_subj = ', '.join(['{}:{}'.format(x, y) for x, y in f_subjects])
-    assert c_subj == f_subj
-
-
-def get_san_extension(cert):
-    # Internal representation of pyOpenSSL is bytes, while FakeOpenSSLCertificate
-    # is text, so we need to set the value to search for accordingly.
-    if isinstance(cert, FakeOpenSSLCertificate):
-        san_short_name = 'subjectAltName'
-    else:
-        san_short_name = b'subjectAltName'
-
-    for i in range(cert.get_extension_count()):
-        ext = cert.get_extension(i)
-        if ext.get_short_name() == san_short_name:
-            # return the string representation to compare the actual SAN
-            # values instead of the data types
-            return str(ext)
-
-    return None
-
-
-def test_subject_alt_names(valid_cert, fake_valid_cert):
-    real_cert = valid_cert['cert']
-
-    san = get_san_extension(real_cert)
-    f_san = get_san_extension(fake_valid_cert)
-
-    assert san == f_san
-
-    # If there are either dns or ip sans defined, verify common_name present
-    if valid_cert['ip'] or valid_cert['dns']:
-        assert 'DNS:' + valid_cert['common_name'] in f_san
-
-    # Verify all ip sans are present
-    for ip in valid_cert['ip']:
-        assert 'IP Address:' + ip in f_san
-
-    # Verify all dns sans are present
-    for name in valid_cert['dns']:
-        assert 'DNS:' + name in f_san

+ 0 - 157
roles/lib_utils/test/test_glusterfs_check_containerized.py

@@ -1,157 +0,0 @@
-import os
-import sys
-
-import pytest
-
-try:
-    # python3, mock is built in.
-    from unittest.mock import patch
-except ImportError:
-    # In python2, mock is installed via pip.
-    from mock import patch
-
-MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library'))
-sys.path.insert(1, MODULE_PATH)
-
-import glusterfs_check_containerized  # noqa
-
-
-NODE_LIST_STD_OUT_1 = ("""
-NAME                       STATUS    ROLES                  AGE       VERSION
-fedora1.openshift.io   Ready     compute,infra,master   1d        v1.11.0+d4cacc0
-fedora2.openshift.io   Ready     infra                  1d        v1.11.0+d4cacc0
-fedora3.openshift.io   Ready     infra                  1d        v1.11.0+d4cacc0
-""")
-
-NODE_LIST_STD_OUT_2 = ("""
-NAME                       STATUS    ROLES                  AGE       VERSION
-fedora1.openshift.io   Ready     compute,infra,master   1d        v1.11.0+d4cacc0
-fedora2.openshift.io   NotReady     infra                  1d        v1.11.0+d4cacc0
-fedora3.openshift.io   Ready     infra                  1d        v1.11.0+d4cacc0
-""")
-
-NODE_LIST_STD_OUT_3 = ("""
-NAME                       STATUS    ROLES                  AGE       VERSION
-fedora1.openshift.io   Ready     compute,infra,master   1d        v1.11.0+d4cacc0
-fedora2.openshift.io   NotReady     infra                  1d        v1.11.0+d4cacc0
-fedora3.openshift.io   Invalid     infra                  1d        v1.11.0+d4cacc0
-""")
-
-POD_SELECT_STD_OUT = ("""NAME                                          READY     STATUS    RESTARTS   AGE       IP                NODE
-glusterblock-storage-provisioner-dc-1-ks5zt   1/1       Running   0          1d        10.130.0.5        fedora3.openshift.io
-glusterfs-storage-fzdn2                       1/1       Running   0          1d        192.168.124.175   fedora1.openshift.io
-glusterfs-storage-mp9nk                       1/1       Running   4          1d        192.168.124.233   fedora2.openshift.io
-glusterfs-storage-t9c6d                       1/1       Running   0          1d        192.168.124.50    fedora3.openshift.io
-heketi-storage-1-rgj8b                        1/1       Running   0          1d        10.130.0.4        fedora3.openshift.io""")
-
-# Need to ensure we have extra empty lines in this output;
-# thus the quotes are one line above and below the text.
-VOLUME_LIST_STDOUT = ("""
-heketidbstorage
-volume1
-""")
-
-VOLUME_HEAL_INFO_GOOD = ("""
-Brick 192.168.124.233:/var/lib/heketi/mounts/vg_936ddf24061d55788f50496757d2f3b2/brick_9df1b6229025ea45521ab1b370d24a06/brick
-Status: Connected
-Number of entries: 0
-
-Brick 192.168.124.175:/var/lib/heketi/mounts/vg_95975e77a6dc7a8e45586eac556b0f24/brick_172b6be6704a3d9f706535038f7f2e52/brick
-Status: Connected
-Number of entries: 0
-
-Brick 192.168.124.50:/var/lib/heketi/mounts/vg_6523756fe1becfefd3224d3082373344/brick_359e4cf44cd1b82674f7d931cb5c481e/brick
-Status: Connected
-Number of entries: 0
-""")
-
-VOLUME_HEAL_INFO_BAD = ("""
-Brick 192.168.124.233:/var/lib/heketi/mounts/vg_936ddf24061d55788f50496757d2f3b2/brick_9df1b6229025ea45521ab1b370d24a06/brick
-Status: Connected
-Number of entries: 0
-
-Brick 192.168.124.175:/var/lib/heketi/mounts/vg_95975e77a6dc7a8e45586eac556b0f24/brick_172b6be6704a3d9f706535038f7f2e52/brick
-Status: Connected
-Number of entries: 0
-
-Brick 192.168.124.50:/var/lib/heketi/mounts/vg_6523756fe1becfefd3224d3082373344/brick_359e4cf44cd1b82674f7d931cb5c481e/brick
-Status: Connected
-Number of entries: -
-""")
-
-
-class DummyModule(object):
-    def exit_json(*args, **kwargs):
-        return 0
-
-    def fail_json(*args, **kwargs):
-        raise Exception(kwargs['msg'])
-
-
-def test_get_valid_nodes():
-    with patch('glusterfs_check_containerized.call_or_fail') as call_mock:
-        module = DummyModule()
-        oc_exec = []
-        exclude_node = "fedora1.openshift.io"
-
-        call_mock.return_value = NODE_LIST_STD_OUT_1
-        valid_nodes = glusterfs_check_containerized.get_valid_nodes(module, oc_exec, exclude_node)
-        assert valid_nodes == ['fedora2.openshift.io', 'fedora3.openshift.io']
-
-        call_mock.return_value = NODE_LIST_STD_OUT_2
-        valid_nodes = glusterfs_check_containerized.get_valid_nodes(module, oc_exec, exclude_node)
-        assert valid_nodes == ['fedora3.openshift.io']
-
-        call_mock.return_value = NODE_LIST_STD_OUT_3
-        with pytest.raises(Exception) as err:
-            valid_nodes = glusterfs_check_containerized.get_valid_nodes(module, oc_exec, exclude_node)
-        assert 'Exception: Unable to find suitable node in get nodes output' in str(err)
-
-
-def test_select_pod():
-    with patch('glusterfs_check_containerized.call_or_fail') as call_mock:
-        module = DummyModule()
-        oc_exec = []
-        cluster_name = "storage"
-        valid_nodes = ["fedora2.openshift.io", "fedora3.openshift.io"]
-        call_mock.return_value = POD_SELECT_STD_OUT
-        # Should select first valid podname in call_or_fail output.
-        pod_name = glusterfs_check_containerized.select_pod(module, oc_exec, cluster_name, valid_nodes)
-        assert pod_name == 'glusterfs-storage-mp9nk'
-        with pytest.raises(Exception) as err:
-            pod_name = glusterfs_check_containerized.select_pod(module, oc_exec, "does not exist", valid_nodes)
-        assert 'Exception: Unable to find suitable pod in get pods output' in str(err)
-
-
-def test_get_volume_list():
-    with patch('glusterfs_check_containerized.call_or_fail') as call_mock:
-        module = DummyModule()
-        oc_exec = []
-        pod_name = ''
-        call_mock.return_value = VOLUME_LIST_STDOUT
-        volume_list = glusterfs_check_containerized.get_volume_list(module, oc_exec, pod_name)
-        assert volume_list == ['heketidbstorage', 'volume1']
-
-
-def test_check_volume_health_info():
-    with patch('glusterfs_check_containerized.call_or_fail') as call_mock:
-        module = DummyModule()
-        oc_exec = []
-        pod_name = ''
-        volume = 'somevolume'
-        call_mock.return_value = VOLUME_HEAL_INFO_GOOD
-        # this should just complete quietly.
-        glusterfs_check_containerized.check_volume_health_info(module, oc_exec, pod_name, volume)
-
-        call_mock.return_value = VOLUME_HEAL_INFO_BAD
-        expected_error = 'volume {} is not ready'.format(volume)
-        with pytest.raises(Exception) as err:
-            glusterfs_check_containerized.check_volume_health_info(module, oc_exec, pod_name, volume)
-        assert expected_error in str(err)
-
-
-if __name__ == '__main__':
-    test_get_valid_nodes()
-    test_select_pod()
-    test_get_volume_list()
-    test_check_volume_health_info()

+ 0 - 67
roles/lib_utils/test/test_load_and_handle_cert.py

@@ -1,67 +0,0 @@
-'''
- Unit tests for the load_and_handle_cert method
-'''
-import datetime
-import os
-import sys
-
-import pytest
-
-MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'library'))
-sys.path.insert(1, MODULE_PATH)
-
-# pylint: disable=import-error,wrong-import-position,missing-docstring
-# pylint: disable=invalid-name,redefined-outer-name
-import openshift_cert_expiry  # noqa: E402
-
-# TODO: More testing on the results of the load_and_handle_cert function
-# could be implemented here as well, such as verifying subjects
-# match up.
-
-
-@pytest.fixture(params=['OpenSSLCertificate', 'FakeOpenSSLCertificate'])
-def loaded_cert(request, valid_cert):
-    """ parameterized fixture to provide load_and_handle_cert results
-        for both OpenSSL and FakeOpenSSL parsed certificates
-    """
-    now = datetime.datetime.now()
-
-    openshift_cert_expiry.HAS_OPENSSL = request.param == 'OpenSSLCertificate'
-
-    # valid_cert['cert_file'] is a `py.path.LocalPath` object and
-    # provides a read_text() method for reading the file contents.
-    cert_string = valid_cert['cert_file'].read_text('utf8')
-
-    (subject,
-     expiry_date,
-     time_remaining,
-     serial) = openshift_cert_expiry.load_and_handle_cert(cert_string, now)
-
-    return {
-        'now': now,
-        'subject': subject,
-        'expiry_date': expiry_date,
-        'time_remaining': time_remaining,
-        'serial': serial,
-    }
-
-
-def test_serial(loaded_cert, valid_cert):
-    """Params:
-
-    * `loaded_cert` comes from the `loaded_cert` fixture in this file
-    * `valid_cert` comes from the 'valid_cert' fixture in conftest.py
-    """
-    valid_cert_serial = valid_cert['cert'].get_serial_number()
-    assert loaded_cert['serial'] == valid_cert_serial
-
-
-def test_expiry(loaded_cert):
-    """Params:
-
-    * `loaded_cert` comes from the `loaded_cert` fixture in this file
-    """
-    expiry_date = loaded_cert['expiry_date']
-    time_remaining = loaded_cert['time_remaining']
-    now = loaded_cert['now']
-    assert expiry_date == now + time_remaining

+ 0 - 82
roles/lib_utils/test/test_master_check_paths_in_config.py

@@ -1,82 +0,0 @@
-'''
- Unit tests for the master_check_paths_in_config action plugin
-'''
-import os
-import sys
-
-from ansible import errors
-import pytest
-
-
-MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'action_plugins'))
-sys.path.insert(1, MODULE_PATH)
-
-# pylint: disable=import-error,wrong-import-position,missing-docstring
-# pylint: disable=invalid-name,redefined-outer-name
-import master_check_paths_in_config  # noqa: E402
-
-
-@pytest.fixture()
-def loaded_config():
-    """return testing master config"""
-    data = {
-        'apiVersion': 'v1',
-        'oauthConfig':
-        {'identityProviders':
-            ['1', '2', '/this/will/fail']},
-        'fake_top_item':
-        {'fake_item':
-            {'fake_item2':
-                ["some string",
-                    {"fake_item3":
-                        ["some string 2",
-                            {"fake_item4":
-                                {"some_key": "deeply_nested_string"}}]}]}}
-    }
-    return data
-
-
-def test_pop_migrated(loaded_config):
-    """Params:
-
-    * `loaded_config` comes from the `loaded_config` fixture in this file
-    """
-    # Ensure we actually loaded a valid config
-    assert loaded_config['apiVersion'] == 'v1'
-
-    # Test that migrated key is removed
-    master_check_paths_in_config.pop_migrated_fields(loaded_config)
-    assert loaded_config['oauthConfig'] is not None
-    assert loaded_config['oauthConfig'].get('identityProviders') is None
-
-
-def test_walk_mapping(loaded_config):
-    """Params:
-    * `loaded_config` comes from the `loaded_config` fixture in this file
-    """
-    # Ensure we actually loaded a valid config
-    fake_top_item = loaded_config['fake_top_item']
-    stc = []
-    expected_keys = ("some string", "some string 2", "deeply_nested_string")
-
-    # Test that we actually extract all the strings from complicated nested
-    # structures
-    master_check_paths_in_config.walk_mapping(fake_top_item, stc)
-    assert len(stc) == 3
-    for item in expected_keys:
-        assert item in stc
-
-
-def test_check_strings():
-    stc_good = ('/etc/origin/master/good', 'some/child/dir')
-    # This should not raise
-    master_check_paths_in_config.check_strings(stc_good)
-
-    # This is a string we should alert on
-    stc_bad = ('goodfile.txt', '/root/somefile')
-    with pytest.raises(errors.AnsibleModuleError):
-        master_check_paths_in_config.check_strings(stc_bad)
-
-    stc_bad_relative = ('goodfile.txt', '../node/otherfile')
-    with pytest.raises(errors.AnsibleModuleError):
-        master_check_paths_in_config.check_strings(stc_bad_relative)

+ 0 - 37
roles/lib_utils/test/test_oo_filters.py

@@ -1,37 +0,0 @@
-'''
- Unit tests for oo_filters
-'''
-import os
-import sys
-
-MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'filter_plugins'))
-sys.path.insert(0, MODULE_PATH)
-
-# pylint: disable=import-error,wrong-import-position,missing-docstring
-import oo_filters   # noqa: E402
-
-
-def test_lib_utils_oo_oreg_image():
-    default_url = "quay.io/coreos/etcd:v99"
-
-    oreg_url = "None"
-    output_image = oo_filters.lib_utils_oo_oreg_image(default_url, oreg_url)
-    assert output_image == default_url
-
-    oreg_url = "example.com/openshift/origin-${component}:${version}"
-    expected_output = "example.com/coreos/etcd:v99"
-    output_image = oo_filters.lib_utils_oo_oreg_image(default_url, oreg_url)
-    assert output_image == expected_output
-
-    oreg_url = "example.com/subdir/openshift/origin-${component}:${version}"
-    expected_output = "example.com/subdir/coreos/etcd:v99"
-    output_image = oo_filters.lib_utils_oo_oreg_image(default_url, oreg_url)
-    assert output_image == expected_output
-
-
-def main():
-    test_lib_utils_oo_oreg_image()
-
-
-if __name__ == '__main__':
-    main()

+ 0 - 52
roles/lib_utils/test/test_sanity_checks.py

@@ -1,52 +0,0 @@
-'''
- Unit tests for wildcard
-'''
-import os
-import sys
-
-MODULE_PATH = os.path.realpath(os.path.join(__file__, os.pardir, os.pardir, 'action_plugins'))
-sys.path.insert(0, MODULE_PATH)
-
-# pylint: disable=import-error,wrong-import-position,missing-docstring
-from sanity_checks import is_registry_match   # noqa: E402
-
-
-def test_is_registry_match():
-    '''
-     Test for is_registry_match
-    '''
-    pat_allowall = "*"
-    pat_docker = "docker.io"
-    pat_subdomain = "*.example.com"
-    pat_matchport = "registry:80"
-
-    assert is_registry_match("docker.io/repo/my", pat_allowall)
-    assert is_registry_match("example.com:4000/repo/my", pat_allowall)
-    assert is_registry_match("172.192.222.10:4000/a/b/c", pat_allowall)
-    assert is_registry_match("https://registry.com", pat_allowall)
-    assert is_registry_match("example.com/openshift3/ose-${component}:${version}", pat_allowall)
-
-    assert is_registry_match("docker.io/repo/my", pat_docker)
-    assert is_registry_match("docker.io:443/repo/my", pat_docker)
-    assert is_registry_match("docker.io/openshift3/ose-${component}:${version}", pat_allowall)
-    assert not is_registry_match("example.com:4000/repo/my", pat_docker)
-    assert not is_registry_match("index.docker.io/a/b/c", pat_docker)
-    assert not is_registry_match("https://registry.com", pat_docker)
-    assert not is_registry_match("example.com/openshift3/ose-${component}:${version}", pat_docker)
-
-    assert is_registry_match("apps.foo.example.com/prefix", pat_subdomain)
-    assert is_registry_match("sub.example.com:80", pat_subdomain)
-    assert not is_registry_match("https://example.com:443/prefix", pat_subdomain)
-    assert not is_registry_match("docker.io/library/my", pat_subdomain)
-    assert not is_registry_match("https://hello.example.bar", pat_subdomain)
-
-    assert is_registry_match("registry:80/prefix", pat_matchport)
-    assert is_registry_match("registry/myapp", pat_matchport)
-    assert is_registry_match("registry:443/myap", pat_matchport)
-    assert not is_registry_match("https://example.com:443/prefix", pat_matchport)
-    assert not is_registry_match("docker.io/library/my", pat_matchport)
-    assert not is_registry_match("https://hello.registry/myapp", pat_matchport)
-
-
-if __name__ == '__main__':
-    test_is_registry_match()

+ 0 - 37
test/unit/modify_yaml_tests.py

@@ -1,37 +0,0 @@
-""" Tests for the modify_yaml Ansible module. """
-# pylint: disable=missing-docstring,invalid-name
-
-import os
-import sys
-import unittest
-
-sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../../library/")] + sys.path
-
-# pylint: disable=import-error
-from modify_yaml import set_key  # noqa: E402
-
-
-class ModifyYamlTests(unittest.TestCase):
-
-    def test_simple_nested_value(self):
-        cfg = {"section": {"a": 1, "b": 2}}
-        changes = set_key(cfg, 'section.c', 3)
-        self.assertEquals(1, len(changes))
-        self.assertEquals(3, cfg['section']['c'])
-
-    # Tests a previous bug where property would land in section above where it should,
-    # if the destination section did not yet exist:
-    def test_nested_property_in_new_section(self):
-        cfg = {
-            "masterClients": {
-                "externalKubernetesKubeConfig": "",
-                "openshiftLoopbackKubeConfig": "openshift-master.kubeconfig",
-            },
-        }
-
-        yaml_key = 'masterClients.externalKubernetesClientConnectionOverrides.acceptContentTypes'
-        yaml_value = 'application/vnd.kubernetes.protobuf,application/json'
-        set_key(cfg, yaml_key, yaml_value)
-        self.assertEquals(yaml_value, cfg['masterClients']
-                          ['externalKubernetesClientConnectionOverrides']
-                          ['acceptContentTypes'])