Browse Source

openshift_checks: refactor to internalize task_vars

Move task_vars into instance variable so we don't have to pass it
around everywhere. Also store tmp. Make sure both are filled in on
execute_module.

In the process, is_active became an instance method, and task_vars is
basically never used directly outside of test code.
Luke Meyer 8 years ago
parent
commit
210fc2d384
39 changed files with 439 additions and 480 deletions
  1. 5 5
      roles/openshift_health_checker/action_plugins/openshift_health_check.py
  2. 54 27
      roles/openshift_health_checker/openshift_checks/__init__.py
  3. 9 10
      roles/openshift_health_checker/openshift_checks/disk_availability.py
  4. 27 30
      roles/openshift_health_checker/openshift_checks/docker_image_availability.py
  5. 18 18
      roles/openshift_health_checker/openshift_checks/docker_storage.py
  6. 15 14
      roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py
  7. 8 11
      roles/openshift_health_checker/openshift_checks/etcd_traffic.py
  8. 10 13
      roles/openshift_health_checker/openshift_checks/etcd_volume.py
  9. 2 5
      roles/openshift_health_checker/openshift_checks/logging/curator.py
  10. 19 24
      roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
  11. 20 22
      roles/openshift_health_checker/openshift_checks/logging/fluentd.py
  12. 17 20
      roles/openshift_health_checker/openshift_checks/logging/kibana.py
  13. 14 19
      roles/openshift_health_checker/openshift_checks/logging/logging.py
  14. 13 15
      roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
  15. 9 10
      roles/openshift_health_checker/openshift_checks/memory_availability.py
  16. 10 15
      roles/openshift_health_checker/openshift_checks/mixins.py
  17. 11 12
      roles/openshift_health_checker/openshift_checks/ovs_version.py
  18. 7 8
      roles/openshift_health_checker/openshift_checks/package_availability.py
  19. 2 2
      roles/openshift_health_checker/openshift_checks/package_update.py
  20. 17 18
      roles/openshift_health_checker/openshift_checks/package_version.py
  21. 8 9
      roles/openshift_health_checker/test/action_plugin_test.py
  22. 4 7
      roles/openshift_health_checker/test/disk_availability_test.py
  23. 18 19
      roles/openshift_health_checker/test/docker_image_availability_test.py
  24. 15 21
      roles/openshift_health_checker/test/docker_storage_test.py
  25. 13 13
      roles/openshift_health_checker/test/elasticsearch_test.py
  26. 10 10
      roles/openshift_health_checker/test/etcd_imagedata_size_test.py
  27. 5 11
      roles/openshift_health_checker/test/etcd_traffic_test.py
  28. 3 6
      roles/openshift_health_checker/test/etcd_volume_test.py
  29. 2 2
      roles/openshift_health_checker/test/fluentd_test.py
  30. 9 9
      roles/openshift_health_checker/test/kibana_test.py
  31. 6 8
      roles/openshift_health_checker/test/logging_check_test.py
  32. 10 22
      roles/openshift_health_checker/test/logging_index_time_test.py
  33. 3 5
      roles/openshift_health_checker/test/memory_availability_test.py
  34. 2 2
      roles/openshift_health_checker/test/mixins_test.py
  35. 19 8
      roles/openshift_health_checker/test/openshift_check_test.py
  36. 7 10
      roles/openshift_health_checker/test/ovs_version_test.py
  37. 3 4
      roles/openshift_health_checker/test/package_availability_test.py
  38. 2 3
      roles/openshift_health_checker/test/package_update_test.py
  39. 13 13
      roles/openshift_health_checker/test/package_version_test.py

+ 5 - 5
roles/openshift_health_checker/action_plugins/openshift_health_check.py

@@ -37,7 +37,7 @@ class ActionModule(ActionBase):
             return result
             return result
 
 
         try:
         try:
-            known_checks = self.load_known_checks()
+            known_checks = self.load_known_checks(tmp, task_vars)
             args = self._task.args
             args = self._task.args
             resolved_checks = resolve_checks(args.get("checks", []), known_checks.values())
             resolved_checks = resolve_checks(args.get("checks", []), known_checks.values())
         except OpenShiftCheckException as e:
         except OpenShiftCheckException as e:
@@ -56,13 +56,13 @@ class ActionModule(ActionBase):
             display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
             display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
             check = known_checks[check_name]
             check = known_checks[check_name]
 
 
-            if not check.is_active(task_vars):
+            if not check.is_active():
                 r = dict(skipped=True, skipped_reason="Not active for this host")
                 r = dict(skipped=True, skipped_reason="Not active for this host")
             elif check_name in user_disabled_checks:
             elif check_name in user_disabled_checks:
                 r = dict(skipped=True, skipped_reason="Disabled by user request")
                 r = dict(skipped=True, skipped_reason="Disabled by user request")
             else:
             else:
                 try:
                 try:
-                    r = check.run(tmp, task_vars)
+                    r = check.run()
                 except OpenShiftCheckException as e:
                 except OpenShiftCheckException as e:
                     r = dict(
                     r = dict(
                         failed=True,
                         failed=True,
@@ -78,7 +78,7 @@ class ActionModule(ActionBase):
         result["changed"] = any(r.get("changed", False) for r in check_results.values())
         result["changed"] = any(r.get("changed", False) for r in check_results.values())
         return result
         return result
 
 
-    def load_known_checks(self):
+    def load_known_checks(self, tmp, task_vars):
         load_checks()
         load_checks()
 
 
         known_checks = {}
         known_checks = {}
@@ -91,7 +91,7 @@ class ActionModule(ActionBase):
                         check_name,
                         check_name,
                         cls.__module__, cls.__name__,
                         cls.__module__, cls.__name__,
                         other_cls.__module__, other_cls.__name__))
                         other_cls.__module__, other_cls.__name__))
-            known_checks[check_name] = cls(execute_module=self._execute_module)
+            known_checks[check_name] = cls(execute_module=self._execute_module, tmp=tmp, task_vars=task_vars)
         return known_checks
         return known_checks
 
 
 
 

+ 54 - 27
roles/openshift_health_checker/openshift_checks/__init__.py

@@ -19,14 +19,21 @@ class OpenShiftCheckException(Exception):
 
 
 @six.add_metaclass(ABCMeta)
 @six.add_metaclass(ABCMeta)
 class OpenShiftCheck(object):
 class OpenShiftCheck(object):
-    """A base class for defining checks for an OpenShift cluster environment."""
+    """
+    A base class for defining checks for an OpenShift cluster environment.
+
+    Expect optional params: method execute_module, dict task_vars, and string tmp.
+    execute_module is expected to have a signature compatible with _execute_module
+    from ansible plugins/action/__init__.py, e.g.:
+    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *args):
+    This is stored so that it can be invoked in subclasses via check.execute_module("name", args)
+    which provides the check's stored task_vars and tmp.
+    """
 
 
-    def __init__(self, execute_module=None):
-        def placeholder(*_):
-            """Fail tests more helpfully when execute_module not provided."""
-            raise TypeError(self.__class__.__name__ +
-                            " invoked execute_module without providing the method at initialization.")
-        self.execute_module = execute_module or placeholder
+    def __init__(self, execute_module=None, task_vars=None, tmp=None):
+        self._execute_module = execute_module
+        self.task_vars = task_vars or {}
+        self.tmp = tmp
 
 
     @abstractproperty
     @abstractproperty
     def name(self):
     def name(self):
@@ -42,13 +49,13 @@ class OpenShiftCheck(object):
         """
         """
         return []
         return []
 
 
-    @classmethod
-    def is_active(cls, task_vars):  # pylint: disable=unused-argument
+    @staticmethod
+    def is_active():
         """Returns true if this check applies to the ansible-playbook run."""
         """Returns true if this check applies to the ansible-playbook run."""
         return True
         return True
 
 
     @abstractmethod
     @abstractmethod
-    def run(self, tmp, task_vars):
+    def run(self):
         """Executes a check, normally implemented as a module."""
         """Executes a check, normally implemented as a module."""
         return {}
         return {}
 
 
@@ -61,6 +68,43 @@ class OpenShiftCheck(object):
             for subclass in subclass.subclasses():
             for subclass in subclass.subclasses():
                 yield subclass
                 yield subclass
 
 
+    def execute_module(self, module_name=None, module_args=None):
+        """Invoke an Ansible module from a check.
+
+        Invoke stored _execute_module, normally copied from the action
+        plugin, with its params and the task_vars and tmp given at
+        check initialization. No positional parameters beyond these
+        are specified. If it's necessary to specify any of the other
+        parameters to _execute_module then that should just be invoked
+        directly (with awareness of changes in method signature per
+        Ansible version).
+
+        So e.g. check.execute_module("foo", dict(arg1=...))
+        Return: result hash from module execution.
+        """
+        if self._execute_module is None:
+            raise NotImplementedError(
+                self.__class__.__name__ +
+                " invoked execute_module without providing the method at initialization."
+            )
+        return self._execute_module(module_name, module_args, self.tmp, self.task_vars)
+
+    def get_var(self, *keys, **kwargs):
+        """Get deeply nested values from task_vars.
+
+        Ansible task_vars structures are Python dicts, often mapping strings to
+        other dicts. This helper makes it easier to get a nested value, raising
+        OpenShiftCheckException when a key is not found or returning a default value
+        provided as a keyword argument.
+        """
+        try:
+            value = reduce(operator.getitem, keys, self.task_vars)
+        except (KeyError, TypeError):
+            if "default" in kwargs:
+                return kwargs["default"]
+            raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
+        return value
+
 
 
 LOADER_EXCLUDES = (
 LOADER_EXCLUDES = (
     "__init__.py",
     "__init__.py",
@@ -85,20 +129,3 @@ def load_checks(path=None, subpkg=""):
             modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
             modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
 
 
     return modules
     return modules
-
-
-def get_var(task_vars, *keys, **kwargs):
-    """Helper function to get deeply nested values from task_vars.
-
-    Ansible task_vars structures are Python dicts, often mapping strings to
-    other dicts. This helper makes it easier to get a nested value, raising
-    OpenShiftCheckException when a key is not found or returning a default value
-    provided as a keyword argument.
-    """
-    try:
-        value = reduce(operator.getitem, keys, task_vars)
-    except (KeyError, TypeError):
-        if "default" in kwargs:
-            return kwargs["default"]
-        raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
-    return value

+ 9 - 10
roles/openshift_health_checker/openshift_checks/disk_availability.py

@@ -3,7 +3,7 @@
 import os.path
 import os.path
 import tempfile
 import tempfile
 
 
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 
 
 
 
 class DiskAvailability(OpenShiftCheck):
 class DiskAvailability(OpenShiftCheck):
@@ -35,22 +35,21 @@ class DiskAvailability(OpenShiftCheck):
         },
         },
     }
     }
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Skip hosts that do not have recommended disk space requirements."""
         """Skip hosts that do not have recommended disk space requirements."""
-        group_names = get_var(task_vars, "group_names", default=[])
+        group_names = self.get_var("group_names", default=[])
         active_groups = set()
         active_groups = set()
-        for recommendation in cls.recommended_disk_space_bytes.values():
+        for recommendation in self.recommended_disk_space_bytes.values():
             active_groups.update(recommendation.keys())
             active_groups.update(recommendation.keys())
         has_disk_space_recommendation = bool(active_groups.intersection(group_names))
         has_disk_space_recommendation = bool(active_groups.intersection(group_names))
-        return super(DiskAvailability, cls).is_active(task_vars) and has_disk_space_recommendation
+        return super(DiskAvailability, self).is_active() and has_disk_space_recommendation
 
 
-    def run(self, tmp, task_vars):
-        group_names = get_var(task_vars, "group_names")
-        ansible_mounts = get_var(task_vars, "ansible_mounts")
+    def run(self):
+        group_names = self.get_var("group_names")
+        ansible_mounts = self.get_var("ansible_mounts")
         ansible_mounts = {mount['mount']: mount for mount in ansible_mounts}
         ansible_mounts = {mount['mount']: mount for mount in ansible_mounts}
 
 
-        user_config = get_var(task_vars, "openshift_check_min_host_disk_gb", default={})
+        user_config = self.get_var("openshift_check_min_host_disk_gb", default={})
         try:
         try:
             # For backwards-compatibility, if openshift_check_min_host_disk_gb
             # For backwards-compatibility, if openshift_check_min_host_disk_gb
             # is a number, then it overrides the required config for '/var'.
             # is a number, then it overrides the required config for '/var'.

+ 27 - 30
roles/openshift_health_checker/openshift_checks/docker_image_availability.py

@@ -1,6 +1,6 @@
 """Check that required Docker images are available."""
 """Check that required Docker images are available."""
 
 
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck
 from openshift_checks.mixins import DockerHostMixin
 from openshift_checks.mixins import DockerHostMixin
 
 
 
 
@@ -33,16 +33,15 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
     # to look for images available remotely without waiting to pull them.
     # to look for images available remotely without waiting to pull them.
     dependencies = ["python-docker-py", "skopeo"]
     dependencies = ["python-docker-py", "skopeo"]
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Skip hosts with unsupported deployment types."""
         """Skip hosts with unsupported deployment types."""
-        deployment_type = get_var(task_vars, "openshift_deployment_type")
+        deployment_type = self.get_var("openshift_deployment_type")
         has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO
         has_valid_deployment_type = deployment_type in DEPLOYMENT_IMAGE_INFO
 
 
-        return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type
+        return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type
 
 
-    def run(self, tmp, task_vars):
-        msg, failed, changed = self.ensure_dependencies(task_vars)
+    def run(self):
+        msg, failed, changed = self.ensure_dependencies()
         if failed:
         if failed:
             return {
             return {
                 "failed": True,
                 "failed": True,
@@ -50,18 +49,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
                 "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
                 "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
             }
             }
 
 
-        required_images = self.required_images(task_vars)
-        missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
+        required_images = self.required_images()
+        missing_images = set(required_images) - set(self.local_images(required_images))
 
 
         # exit early if all images were found locally
         # exit early if all images were found locally
         if not missing_images:
         if not missing_images:
             return {"changed": changed}
             return {"changed": changed}
 
 
-        registries = self.known_docker_registries(task_vars)
+        registries = self.known_docker_registries()
         if not registries:
         if not registries:
             return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
             return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
 
 
-        available_images = self.available_images(missing_images, registries, task_vars)
+        available_images = self.available_images(missing_images, registries)
         unavailable_images = set(missing_images) - set(available_images)
         unavailable_images = set(missing_images) - set(available_images)
 
 
         if unavailable_images:
         if unavailable_images:
@@ -76,8 +75,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
 
 
         return {"changed": changed}
         return {"changed": changed}
 
 
-    @staticmethod
-    def required_images(task_vars):
+    def required_images(self):
         """
         """
         Determine which images we expect to need for this host.
         Determine which images we expect to need for this host.
         Returns: a set of required images like 'openshift/origin:v3.6'
         Returns: a set of required images like 'openshift/origin:v3.6'
@@ -94,17 +92,17 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
         Registry is not included in constructed images. It may be in oreg_url or etcd image.
         Registry is not included in constructed images. It may be in oreg_url or etcd image.
         """
         """
         required = set()
         required = set()
-        deployment_type = get_var(task_vars, "openshift_deployment_type")
-        host_groups = get_var(task_vars, "group_names")
+        deployment_type = self.get_var("openshift_deployment_type")
+        host_groups = self.get_var("group_names")
         # containerized etcd may not have openshift_image_tag, see bz 1466622
         # containerized etcd may not have openshift_image_tag, see bz 1466622
-        image_tag = get_var(task_vars, "openshift_image_tag", default="latest")
+        image_tag = self.get_var("openshift_image_tag", default="latest")
         image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
         image_info = DEPLOYMENT_IMAGE_INFO[deployment_type]
         if not image_info:
         if not image_info:
             return required
             return required
 
 
         # template for images that run on top of OpenShift
         # template for images that run on top of OpenShift
         image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
         image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
-        image_url = get_var(task_vars, "oreg_url", default="") or image_url
+        image_url = self.get_var("oreg_url", default="") or image_url
         if 'nodes' in host_groups:
         if 'nodes' in host_groups:
             for suffix in NODE_IMAGE_SUFFIXES:
             for suffix in NODE_IMAGE_SUFFIXES:
                 required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
                 required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
@@ -114,7 +112,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
             required.add(image_info["registry_console_image"])
             required.add(image_info["registry_console_image"])
 
 
         # images for containerized components
         # images for containerized components
-        if get_var(task_vars, "openshift", "common", "is_containerized"):
+        if self.get_var("openshift", "common", "is_containerized"):
             components = set()
             components = set()
             if 'nodes' in host_groups:
             if 'nodes' in host_groups:
                 components.update(["node", "openvswitch"])
                 components.update(["node", "openvswitch"])
@@ -127,28 +125,27 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
 
 
         return required
         return required
 
 
-    def local_images(self, images, task_vars):
+    def local_images(self, images):
         """Filter a list of images and return those available locally."""
         """Filter a list of images and return those available locally."""
         return [
         return [
             image for image in images
             image for image in images
-            if self.is_image_local(image, task_vars)
+            if self.is_image_local(image)
         ]
         ]
 
 
-    def is_image_local(self, image, task_vars):
+    def is_image_local(self, image):
         """Check if image is already in local docker index."""
         """Check if image is already in local docker index."""
-        result = self.execute_module("docker_image_facts", {"name": image}, task_vars=task_vars)
+        result = self.execute_module("docker_image_facts", {"name": image})
         if result.get("failed", False):
         if result.get("failed", False):
             return False
             return False
 
 
         return bool(result.get("images", []))
         return bool(result.get("images", []))
 
 
-    @staticmethod
-    def known_docker_registries(task_vars):
+    def known_docker_registries(self):
         """Build a list of docker registries available according to inventory vars."""
         """Build a list of docker registries available according to inventory vars."""
-        docker_facts = get_var(task_vars, "openshift", "docker")
+        docker_facts = self.get_var("openshift", "docker")
         regs = set(docker_facts["additional_registries"])
         regs = set(docker_facts["additional_registries"])
 
 
-        deployment_type = get_var(task_vars, "openshift_deployment_type")
+        deployment_type = self.get_var("openshift_deployment_type")
         if deployment_type == "origin":
         if deployment_type == "origin":
             regs.update(["docker.io"])
             regs.update(["docker.io"])
         elif "enterprise" in deployment_type:
         elif "enterprise" in deployment_type:
@@ -156,14 +153,14 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
 
 
         return list(regs)
         return list(regs)
 
 
-    def available_images(self, images, default_registries, task_vars):
+    def available_images(self, images, default_registries):
         """Search remotely for images. Returns: list of images found."""
         """Search remotely for images. Returns: list of images found."""
         return [
         return [
             image for image in images
             image for image in images
-            if self.is_available_skopeo_image(image, default_registries, task_vars)
+            if self.is_available_skopeo_image(image, default_registries)
         ]
         ]
 
 
-    def is_available_skopeo_image(self, image, default_registries, task_vars):
+    def is_available_skopeo_image(self, image, default_registries):
         """Use Skopeo to determine if required image exists in known registry(s)."""
         """Use Skopeo to determine if required image exists in known registry(s)."""
         registries = default_registries
         registries = default_registries
 
 
@@ -174,7 +171,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
 
 
         for registry in registries:
         for registry in registries:
             args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)}
             args = {"_raw_params": "skopeo inspect --tls-verify=false docker://{}/{}".format(registry, image)}
-            result = self.execute_module("command", args, task_vars=task_vars)
+            result = self.execute_module("command", args)
             if result.get("rc", 0) == 0 and not result.get("failed"):
             if result.get("rc", 0) == 0 and not result.get("failed"):
                 return True
                 return True
 
 

+ 18 - 18
roles/openshift_health_checker/openshift_checks/docker_storage.py

@@ -2,7 +2,7 @@
 import json
 import json
 import os.path
 import os.path
 import re
 import re
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 from openshift_checks.mixins import DockerHostMixin
 from openshift_checks.mixins import DockerHostMixin
 
 
 
 
@@ -42,8 +42,8 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
         ),
         ),
     ]
     ]
 
 
-    def run(self, tmp, task_vars):
-        msg, failed, changed = self.ensure_dependencies(task_vars)
+    def run(self):
+        msg, failed, changed = self.ensure_dependencies()
         if failed:
         if failed:
             return {
             return {
                 "failed": True,
                 "failed": True,
@@ -52,7 +52,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
             }
             }
 
 
         # attempt to get the docker info hash from the API
         # attempt to get the docker info hash from the API
-        docker_info = self.execute_module("docker_info", {}, task_vars=task_vars)
+        docker_info = self.execute_module("docker_info", {})
         if docker_info.get("failed"):
         if docker_info.get("failed"):
             return {"failed": True, "changed": changed,
             return {"failed": True, "changed": changed,
                     "msg": "Failed to query Docker API. Is docker running on this host?"}
                     "msg": "Failed to query Docker API. Is docker running on this host?"}
@@ -76,15 +76,15 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
         result = {}
         result = {}
 
 
         if driver == "devicemapper":
         if driver == "devicemapper":
-            result = self.check_devicemapper_support(driver_status, task_vars)
+            result = self.check_devicemapper_support(driver_status)
 
 
         if driver in ['overlay', 'overlay2']:
         if driver in ['overlay', 'overlay2']:
-            result = self.check_overlay_support(docker_info, driver_status, task_vars)
+            result = self.check_overlay_support(docker_info, driver_status)
 
 
         result['changed'] = result.get('changed', False) or changed
         result['changed'] = result.get('changed', False) or changed
         return result
         return result
 
 
-    def check_devicemapper_support(self, driver_status, task_vars):
+    def check_devicemapper_support(self, driver_status):
         """Check if dm storage driver is supported as configured. Return: result dict."""
         """Check if dm storage driver is supported as configured. Return: result dict."""
         if driver_status.get("Data loop file"):
         if driver_status.get("Data loop file"):
             msg = (
             msg = (
@@ -94,10 +94,10 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
                 "See http://red.ht/2rNperO for further information."
                 "See http://red.ht/2rNperO for further information."
             )
             )
             return {"failed": True, "msg": msg}
             return {"failed": True, "msg": msg}
-        result = self.check_dm_usage(driver_status, task_vars)
+        result = self.check_dm_usage(driver_status)
         return result
         return result
 
 
-    def check_dm_usage(self, driver_status, task_vars):
+    def check_dm_usage(self, driver_status):
         """Check usage thresholds for Docker dm storage driver. Return: result dict.
         """Check usage thresholds for Docker dm storage driver. Return: result dict.
         Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
         Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
         implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
         implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
@@ -109,7 +109,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
         could run out of space first; so we check both.
         could run out of space first; so we check both.
         """
         """
         vals = dict(
         vals = dict(
-            vg_free=self.get_vg_free(driver_status.get("Pool Name"), task_vars),
+            vg_free=self.get_vg_free(driver_status.get("Pool Name")),
             data_used=driver_status.get("Data Space Used"),
             data_used=driver_status.get("Data Space Used"),
             data_total=driver_status.get("Data Space Total"),
             data_total=driver_status.get("Data Space Total"),
             metadata_used=driver_status.get("Metadata Space Used"),
             metadata_used=driver_status.get("Metadata Space Used"),
@@ -130,7 +130,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
         # determine the threshold percentages which usage should not exceed
         # determine the threshold percentages which usage should not exceed
         for name, default in [("data", self.max_thinpool_data_usage_percent),
         for name, default in [("data", self.max_thinpool_data_usage_percent),
                               ("metadata", self.max_thinpool_meta_usage_percent)]:
                               ("metadata", self.max_thinpool_meta_usage_percent)]:
-            percent = get_var(task_vars, "max_thinpool_" + name + "_usage_percent", default=default)
+            percent = self.get_var("max_thinpool_" + name + "_usage_percent", default=default)
             try:
             try:
                 vals[name + "_threshold"] = float(percent)
                 vals[name + "_threshold"] = float(percent)
             except ValueError:
             except ValueError:
@@ -157,7 +157,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
         vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
         vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
         return vals
         return vals
 
 
-    def get_vg_free(self, pool, task_vars):
+    def get_vg_free(self, pool):
         """Determine which VG to examine according to the pool name. Return: size vgs reports.
         """Determine which VG to examine according to the pool name. Return: size vgs reports.
         Pool name is the only indicator currently available from the Docker API driver info.
         Pool name is the only indicator currently available from the Docker API driver info.
         We assume a name that looks like "vg--name-docker--pool";
         We assume a name that looks like "vg--name-docker--pool";
@@ -174,7 +174,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
         vgs_cmd = "/sbin/vgs --noheadings -o vg_free --units g --select vg_name=" + vg_name
         vgs_cmd = "/sbin/vgs --noheadings -o vg_free --units g --select vg_name=" + vg_name
         # should return free space like "  12.00g" if the VG exists; empty if it does not
         # should return free space like "  12.00g" if the VG exists; empty if it does not
 
 
-        ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars=task_vars)
+        ret = self.execute_module("command", {"_raw_params": vgs_cmd})
         if ret.get("failed") or ret.get("rc", 0) != 0:
         if ret.get("failed") or ret.get("rc", 0) != 0:
             raise OpenShiftCheckException(
             raise OpenShiftCheckException(
                 "Is LVM installed? Failed to run /sbin/vgs "
                 "Is LVM installed? Failed to run /sbin/vgs "
@@ -213,7 +213,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
 
 
         return float(number) * multiplier
         return float(number) * multiplier
 
 
-    def check_overlay_support(self, docker_info, driver_status, task_vars):
+    def check_overlay_support(self, docker_info, driver_status):
         """Check if overlay storage driver is supported for this host. Return: result dict."""
         """Check if overlay storage driver is supported for this host. Return: result dict."""
         # check for xfs as backing store
         # check for xfs as backing store
         backing_fs = driver_status.get("Backing Filesystem", "[NONE]")
         backing_fs = driver_status.get("Backing Filesystem", "[NONE]")
@@ -239,13 +239,13 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
             # NOTE: we could check for --selinux-enabled here but docker won't even start with
             # NOTE: we could check for --selinux-enabled here but docker won't even start with
             # that option until it's supported in the kernel so we don't need to.
             # that option until it's supported in the kernel so we don't need to.
 
 
-        return self.check_overlay_usage(docker_info, task_vars)
+        return self.check_overlay_usage(docker_info)
 
 
-    def check_overlay_usage(self, docker_info, task_vars):
+    def check_overlay_usage(self, docker_info):
         """Check disk usage on OverlayFS backing store volume. Return: result dict."""
         """Check disk usage on OverlayFS backing store volume. Return: result dict."""
         path = docker_info.get("DockerRootDir", "/var/lib/docker") + "/" + docker_info["Driver"]
         path = docker_info.get("DockerRootDir", "/var/lib/docker") + "/" + docker_info["Driver"]
 
 
-        threshold = get_var(task_vars, "max_overlay_usage_percent", default=self.max_overlay_usage_percent)
+        threshold = self.get_var("max_overlay_usage_percent", default=self.max_overlay_usage_percent)
         try:
         try:
             threshold = float(threshold)
             threshold = float(threshold)
         except ValueError:
         except ValueError:
@@ -254,7 +254,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
                 "msg": "Specified 'max_overlay_usage_percent' is not a percentage: {}".format(threshold),
                 "msg": "Specified 'max_overlay_usage_percent' is not a percentage: {}".format(threshold),
             }
             }
 
 
-        mount = self.find_ansible_mount(path, get_var(task_vars, "ansible_mounts"))
+        mount = self.find_ansible_mount(path, self.get_var("ansible_mounts"))
         try:
         try:
             free_bytes = mount['size_available']
             free_bytes = mount['size_available']
             total_bytes = mount['size_total']
             total_bytes = mount['size_total']

+ 15 - 14
roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py

@@ -2,7 +2,7 @@
 Ansible module for determining if the size of OpenShift image data exceeds a specified limit in an etcd cluster.
 Ansible module for determining if the size of OpenShift image data exceeds a specified limit in an etcd cluster.
 """
 """
 
 
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 
 
 
 
 class EtcdImageDataSize(OpenShiftCheck):
 class EtcdImageDataSize(OpenShiftCheck):
@@ -11,24 +11,25 @@ class EtcdImageDataSize(OpenShiftCheck):
     name = "etcd_imagedata_size"
     name = "etcd_imagedata_size"
     tags = ["etcd"]
     tags = ["etcd"]
 
 
-    def run(self, tmp, task_vars):
-        etcd_mountpath = self._get_etcd_mountpath(get_var(task_vars, "ansible_mounts"))
+    def run(self):
+        etcd_mountpath = self._get_etcd_mountpath(self.get_var("ansible_mounts"))
         etcd_avail_diskspace = etcd_mountpath["size_available"]
         etcd_avail_diskspace = etcd_mountpath["size_available"]
         etcd_total_diskspace = etcd_mountpath["size_total"]
         etcd_total_diskspace = etcd_mountpath["size_total"]
 
 
-        etcd_imagedata_size_limit = get_var(task_vars,
-                                            "etcd_max_image_data_size_bytes",
-                                            default=int(0.5 * float(etcd_total_diskspace - etcd_avail_diskspace)))
+        etcd_imagedata_size_limit = self.get_var(
+            "etcd_max_image_data_size_bytes",
+            default=int(0.5 * float(etcd_total_diskspace - etcd_avail_diskspace))
+        )
 
 
-        etcd_is_ssl = get_var(task_vars, "openshift", "master", "etcd_use_ssl", default=False)
-        etcd_port = get_var(task_vars, "openshift", "master", "etcd_port", default=2379)
-        etcd_hosts = get_var(task_vars, "openshift", "master", "etcd_hosts")
+        etcd_is_ssl = self.get_var("openshift", "master", "etcd_use_ssl", default=False)
+        etcd_port = self.get_var("openshift", "master", "etcd_port", default=2379)
+        etcd_hosts = self.get_var("openshift", "master", "etcd_hosts")
 
 
-        config_base = get_var(task_vars, "openshift", "common", "config_base")
+        config_base = self.get_var("openshift", "common", "config_base")
 
 
-        cert = task_vars.get("etcd_client_cert", config_base + "/master/master.etcd-client.crt")
-        key = task_vars.get("etcd_client_key", config_base + "/master/master.etcd-client.key")
-        ca_cert = task_vars.get("etcd_client_ca_cert", config_base + "/master/master.etcd-ca.crt")
+        cert = self.get_var("etcd_client_cert", default=config_base + "/master/master.etcd-client.crt")
+        key = self.get_var("etcd_client_key", default=config_base + "/master/master.etcd-client.key")
+        ca_cert = self.get_var("etcd_client_ca_cert", default=config_base + "/master/master.etcd-ca.crt")
 
 
         for etcd_host in list(etcd_hosts):
         for etcd_host in list(etcd_hosts):
             args = {
             args = {
@@ -46,7 +47,7 @@ class EtcdImageDataSize(OpenShiftCheck):
                 },
                 },
             }
             }
 
 
-            etcdkeysize = self.execute_module("etcdkeysize", args, task_vars)
+            etcdkeysize = self.execute_module("etcdkeysize", args)
 
 
             if etcdkeysize.get("rc", 0) != 0 or etcdkeysize.get("failed"):
             if etcdkeysize.get("rc", 0) != 0 or etcdkeysize.get("failed"):
                 msg = 'Failed to retrieve stats for etcd host "{host}": {reason}'
                 msg = 'Failed to retrieve stats for etcd host "{host}": {reason}'

+ 8 - 11
roles/openshift_health_checker/openshift_checks/etcd_traffic.py

@@ -1,6 +1,6 @@
 """Check that scans journalctl for messages caused as a symptom of increased etcd traffic."""
 """Check that scans journalctl for messages caused as a symptom of increased etcd traffic."""
 
 
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck
 
 
 
 
 class EtcdTraffic(OpenShiftCheck):
 class EtcdTraffic(OpenShiftCheck):
@@ -9,19 +9,18 @@ class EtcdTraffic(OpenShiftCheck):
     name = "etcd_traffic"
     name = "etcd_traffic"
     tags = ["health", "etcd"]
     tags = ["health", "etcd"]
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Skip hosts that do not have etcd in their group names."""
         """Skip hosts that do not have etcd in their group names."""
-        group_names = get_var(task_vars, "group_names", default=[])
+        group_names = self.get_var("group_names", default=[])
         valid_group_names = "etcd" in group_names
         valid_group_names = "etcd" in group_names
 
 
-        version = get_var(task_vars, "openshift", "common", "short_version")
+        version = self.get_var("openshift", "common", "short_version")
         valid_version = version in ("3.4", "3.5", "1.4", "1.5")
         valid_version = version in ("3.4", "3.5", "1.4", "1.5")
 
 
-        return super(EtcdTraffic, cls).is_active(task_vars) and valid_group_names and valid_version
+        return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version
 
 
-    def run(self, tmp, task_vars):
-        is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+    def run(self):
+        is_containerized = self.get_var("openshift", "common", "is_containerized")
         unit = "etcd_container" if is_containerized else "etcd"
         unit = "etcd_container" if is_containerized else "etcd"
 
 
         log_matchers = [{
         log_matchers = [{
@@ -30,9 +29,7 @@ class EtcdTraffic(OpenShiftCheck):
             "unit": unit
             "unit": unit
         }]
         }]
 
 
-        match = self.execute_module("search_journalctl", {
-            "log_matchers": log_matchers,
-        }, task_vars)
+        match = self.execute_module("search_journalctl", {"log_matchers": log_matchers})
 
 
         if match.get("matched"):
         if match.get("matched"):
             msg = ("Higher than normal etcd traffic detected.\n"
             msg = ("Higher than normal etcd traffic detected.\n"

+ 10 - 13
roles/openshift_health_checker/openshift_checks/etcd_volume.py

@@ -1,6 +1,6 @@
 """A health check for OpenShift clusters."""
 """A health check for OpenShift clusters."""
 
 
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 
 
 
 
 class EtcdVolume(OpenShiftCheck):
 class EtcdVolume(OpenShiftCheck):
@@ -14,21 +14,18 @@ class EtcdVolume(OpenShiftCheck):
     # Where to find ectd data, higher priority first.
     # Where to find ectd data, higher priority first.
     supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
     supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
 
 
-    @classmethod
-    def is_active(cls, task_vars):
-        etcd_hosts = get_var(task_vars, "groups", "etcd", default=[]) or get_var(task_vars, "groups", "masters",
-                                                                                 default=[]) or []
-        is_etcd_host = get_var(task_vars, "ansible_ssh_host") in etcd_hosts
-        return super(EtcdVolume, cls).is_active(task_vars) and is_etcd_host
+    def is_active(self):
+        etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
+        is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts
+        return super(EtcdVolume, self).is_active() and is_etcd_host
 
 
-    def run(self, tmp, task_vars):
-        mount_info = self._etcd_mount_info(task_vars)
+    def run(self):
+        mount_info = self._etcd_mount_info()
         available = mount_info["size_available"]
         available = mount_info["size_available"]
         total = mount_info["size_total"]
         total = mount_info["size_total"]
         used = total - available
         used = total - available
 
 
-        threshold = get_var(
-            task_vars,
+        threshold = self.get_var(
             "etcd_device_usage_threshold_percent",
             "etcd_device_usage_threshold_percent",
             default=self.default_threshold_percent
             default=self.default_threshold_percent
         )
         )
@@ -45,8 +42,8 @@ class EtcdVolume(OpenShiftCheck):
 
 
         return {"changed": False}
         return {"changed": False}
 
 
-    def _etcd_mount_info(self, task_vars):
-        ansible_mounts = get_var(task_vars, "ansible_mounts")
+    def _etcd_mount_info(self):
+        ansible_mounts = self.get_var("ansible_mounts")
         mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
         mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
 
 
         for path in self.supported_mount_paths:
         for path in self.supported_mount_paths:

+ 2 - 5
roles/openshift_health_checker/openshift_checks/logging/curator.py

@@ -1,6 +1,5 @@
 """Check for an aggregated logging Curator deployment"""
 """Check for an aggregated logging Curator deployment"""
 
 
-from openshift_checks import get_var
 from openshift_checks.logging.logging import LoggingCheck
 from openshift_checks.logging.logging import LoggingCheck
 
 
 
 
@@ -12,13 +11,11 @@ class Curator(LoggingCheck):
 
 
     logging_namespace = None
     logging_namespace = None
 
 
-    def run(self, tmp, task_vars):
-        self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+    def run(self):
+        self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
         curator_pods, error = super(Curator, self).get_pods_for_component(
         curator_pods, error = super(Curator, self).get_pods_for_component(
-            self.execute_module,
             self.logging_namespace,
             self.logging_namespace,
             "curator",
             "curator",
-            task_vars
         )
         )
         if error:
         if error:
             return {"failed": True, "changed": False, "msg": error}
             return {"failed": True, "changed": False, "msg": error}

+ 19 - 24
roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py

@@ -3,7 +3,6 @@
 import json
 import json
 import re
 import re
 
 
-from openshift_checks import get_var
 from openshift_checks.logging.logging import LoggingCheck
 from openshift_checks.logging.logging import LoggingCheck
 
 
 
 
@@ -15,19 +14,17 @@ class Elasticsearch(LoggingCheck):
 
 
     logging_namespace = None
     logging_namespace = None
 
 
-    def run(self, tmp, task_vars):
+    def run(self):
         """Check various things and gather errors. Returns: result as hash"""
         """Check various things and gather errors. Returns: result as hash"""
 
 
-        self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+        self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
         es_pods, error = super(Elasticsearch, self).get_pods_for_component(
         es_pods, error = super(Elasticsearch, self).get_pods_for_component(
-            self.execute_module,
             self.logging_namespace,
             self.logging_namespace,
             "es",
             "es",
-            task_vars,
         )
         )
         if error:
         if error:
             return {"failed": True, "changed": False, "msg": error}
             return {"failed": True, "changed": False, "msg": error}
-        check_error = self.check_elasticsearch(es_pods, task_vars)
+        check_error = self.check_elasticsearch(es_pods)
 
 
         if check_error:
         if check_error:
             msg = ("The following Elasticsearch deployment issue was found:"
             msg = ("The following Elasticsearch deployment issue was found:"
@@ -52,7 +49,7 @@ class Elasticsearch(LoggingCheck):
             ))]
             ))]
         return not_running, []
         return not_running, []
 
 
-    def check_elasticsearch(self, es_pods, task_vars):
+    def check_elasticsearch(self, es_pods):
         """Various checks for elasticsearch. Returns: error string"""
         """Various checks for elasticsearch. Returns: error string"""
         not_running_pods, error_msgs = self._not_running_elasticsearch_pods(es_pods)
         not_running_pods, error_msgs = self._not_running_elasticsearch_pods(es_pods)
         running_pods = [pod for pod in es_pods if pod not in not_running_pods]
         running_pods = [pod for pod in es_pods if pod not in not_running_pods]
@@ -63,10 +60,10 @@ class Elasticsearch(LoggingCheck):
         }
         }
         if not pods_by_name:
         if not pods_by_name:
             return 'No logging Elasticsearch pods were found. Is logging deployed?'
             return 'No logging Elasticsearch pods were found. Is logging deployed?'
-        error_msgs += self._check_elasticsearch_masters(pods_by_name, task_vars)
-        error_msgs += self._check_elasticsearch_node_list(pods_by_name, task_vars)
-        error_msgs += self._check_es_cluster_health(pods_by_name, task_vars)
-        error_msgs += self._check_elasticsearch_diskspace(pods_by_name, task_vars)
+        error_msgs += self._check_elasticsearch_masters(pods_by_name)
+        error_msgs += self._check_elasticsearch_node_list(pods_by_name)
+        error_msgs += self._check_es_cluster_health(pods_by_name)
+        error_msgs += self._check_elasticsearch_diskspace(pods_by_name)
         return '\n'.join(error_msgs)
         return '\n'.join(error_msgs)
 
 
     @staticmethod
     @staticmethod
@@ -74,14 +71,14 @@ class Elasticsearch(LoggingCheck):
         base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'"
         base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'"
         return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url)
         return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url)
 
 
-    def _check_elasticsearch_masters(self, pods_by_name, task_vars):
+    def _check_elasticsearch_masters(self, pods_by_name):
         """Check that Elasticsearch masters are sane. Returns: list of error strings"""
         """Check that Elasticsearch masters are sane. Returns: list of error strings"""
         es_master_names = set()
         es_master_names = set()
         error_msgs = []
         error_msgs = []
         for pod_name in pods_by_name.keys():
         for pod_name in pods_by_name.keys():
             # Compare what each ES node reports as master and compare for split brain
             # Compare what each ES node reports as master and compare for split brain
             get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
             get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
-            master_name_str = self._exec_oc(get_master_cmd, [], task_vars)
+            master_name_str = self._exec_oc(get_master_cmd, [])
             master_names = (master_name_str or '').split(' ')
             master_names = (master_name_str or '').split(' ')
             if len(master_names) > 1:
             if len(master_names) > 1:
                 es_master_names.add(master_names[1])
                 es_master_names.add(master_names[1])
@@ -106,7 +103,7 @@ class Elasticsearch(LoggingCheck):
 
 
         return error_msgs
         return error_msgs
 
 
-    def _check_elasticsearch_node_list(self, pods_by_name, task_vars):
+    def _check_elasticsearch_node_list(self, pods_by_name):
         """Check that reported ES masters are accounted for by pods. Returns: list of error strings"""
         """Check that reported ES masters are accounted for by pods. Returns: list of error strings"""
 
 
         if not pods_by_name:
         if not pods_by_name:
@@ -114,7 +111,7 @@ class Elasticsearch(LoggingCheck):
 
 
         # get ES cluster nodes
         # get ES cluster nodes
         node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
         node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
-        cluster_node_data = self._exec_oc(node_cmd, [], task_vars)
+        cluster_node_data = self._exec_oc(node_cmd, [])
         try:
         try:
             cluster_nodes = json.loads(cluster_node_data)['nodes']
             cluster_nodes = json.loads(cluster_node_data)['nodes']
         except (ValueError, KeyError):
         except (ValueError, KeyError):
@@ -136,12 +133,12 @@ class Elasticsearch(LoggingCheck):
 
 
         return error_msgs
         return error_msgs
 
 
-    def _check_es_cluster_health(self, pods_by_name, task_vars):
+    def _check_es_cluster_health(self, pods_by_name):
         """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
         """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
         error_msgs = []
         error_msgs = []
         for pod_name in pods_by_name.keys():
         for pod_name in pods_by_name.keys():
             cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
             cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
-            cluster_health_data = self._exec_oc(cluster_health_cmd, [], task_vars)
+            cluster_health_data = self._exec_oc(cluster_health_cmd, [])
             try:
             try:
                 health_res = json.loads(cluster_health_data)
                 health_res = json.loads(cluster_health_data)
                 if not health_res or not health_res.get('status'):
                 if not health_res or not health_res.get('status'):
@@ -160,7 +157,7 @@ class Elasticsearch(LoggingCheck):
 
 
         return error_msgs
         return error_msgs
 
 
-    def _check_elasticsearch_diskspace(self, pods_by_name, task_vars):
+    def _check_elasticsearch_diskspace(self, pods_by_name):
         """
         """
         Exec into an ES pod and query the diskspace on the persistent volume.
         Exec into an ES pod and query the diskspace on the persistent volume.
         Returns: list of errors
         Returns: list of errors
@@ -168,7 +165,7 @@ class Elasticsearch(LoggingCheck):
         error_msgs = []
         error_msgs = []
         for pod_name in pods_by_name.keys():
         for pod_name in pods_by_name.keys():
             df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
             df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
-            disk_output = self._exec_oc(df_cmd, [], task_vars)
+            disk_output = self._exec_oc(df_cmd, [])
             lines = disk_output.splitlines()
             lines = disk_output.splitlines()
             # expecting one header looking like 'IUse% Use%' and one body line
             # expecting one header looking like 'IUse% Use%' and one body line
             body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
             body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
@@ -180,7 +177,7 @@ class Elasticsearch(LoggingCheck):
                 continue
                 continue
             inode_pct, disk_pct = re.match(body_re, lines[1]).groups()
             inode_pct, disk_pct = re.match(body_re, lines[1]).groups()
 
 
-            inode_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_inode_pct', default='90')
+            inode_pct_thresh = self.get_var('openshift_check_efk_es_inode_pct', default='90')
             if int(inode_pct) >= int(inode_pct_thresh):
             if int(inode_pct) >= int(inode_pct_thresh):
                 error_msgs.append(
                 error_msgs.append(
                     'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
                     'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
@@ -191,7 +188,7 @@ class Elasticsearch(LoggingCheck):
                         limit=str(inode_pct_thresh),
                         limit=str(inode_pct_thresh),
                         param='openshift_check_efk_es_inode_pct',
                         param='openshift_check_efk_es_inode_pct',
                     ))
                     ))
-            disk_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_storage_pct', default='80')
+            disk_pct_thresh = self.get_var('openshift_check_efk_es_storage_pct', default='80')
             if int(disk_pct) >= int(disk_pct_thresh):
             if int(disk_pct) >= int(disk_pct_thresh):
                 error_msgs.append(
                 error_msgs.append(
                     'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
                     'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
@@ -205,11 +202,9 @@ class Elasticsearch(LoggingCheck):
 
 
         return error_msgs
         return error_msgs
 
 
-    def _exec_oc(self, cmd_str, extra_args, task_vars):
+    def _exec_oc(self, cmd_str, extra_args):
         return super(Elasticsearch, self).exec_oc(
         return super(Elasticsearch, self).exec_oc(
-            self.execute_module,
             self.logging_namespace,
             self.logging_namespace,
             cmd_str,
             cmd_str,
             extra_args,
             extra_args,
-            task_vars,
         )
         )

+ 20 - 22
roles/openshift_health_checker/openshift_checks/logging/fluentd.py

@@ -2,7 +2,6 @@
 
 
 import json
 import json
 
 
-from openshift_checks import get_var
 from openshift_checks.logging.logging import LoggingCheck
 from openshift_checks.logging.logging import LoggingCheck
 
 
 
 
@@ -14,19 +13,17 @@ class Fluentd(LoggingCheck):
 
 
     logging_namespace = None
     logging_namespace = None
 
 
-    def run(self, tmp, task_vars):
+    def run(self):
         """Check various things and gather errors. Returns: result as hash"""
         """Check various things and gather errors. Returns: result as hash"""
 
 
-        self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+        self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
         fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
         fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
-            self.execute_module,
             self.logging_namespace,
             self.logging_namespace,
             "fluentd",
             "fluentd",
-            task_vars,
         )
         )
         if error:
         if error:
             return {"failed": True, "changed": False, "msg": error}
             return {"failed": True, "changed": False, "msg": error}
-        check_error = self.check_fluentd(fluentd_pods, task_vars)
+        check_error = self.check_fluentd(fluentd_pods)
 
 
         if check_error:
         if check_error:
             msg = ("The following Fluentd deployment issue was found:"
             msg = ("The following Fluentd deployment issue was found:"
@@ -52,10 +49,9 @@ class Fluentd(LoggingCheck):
             ).format(label=node_selector)
             ).format(label=node_selector)
         return fluentd_nodes, None
         return fluentd_nodes, None
 
 
-    @staticmethod
-    def _check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars):
+    def _check_node_labeling(self, nodes_by_name, fluentd_nodes, node_selector):
         """Note if nodes are not labeled as expected. Returns: error string"""
         """Note if nodes are not labeled as expected. Returns: error string"""
-        intended_nodes = get_var(task_vars, 'openshift_logging_fluentd_hosts', default=['--all'])
+        intended_nodes = self.get_var('openshift_logging_fluentd_hosts', default=['--all'])
         if not intended_nodes or '--all' in intended_nodes:
         if not intended_nodes or '--all' in intended_nodes:
             intended_nodes = nodes_by_name.keys()
             intended_nodes = nodes_by_name.keys()
         nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
         nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
@@ -113,13 +109,15 @@ class Fluentd(LoggingCheck):
             ))
             ))
         return None
         return None
 
 
-    def check_fluentd(self, pods, task_vars):
+    def check_fluentd(self, pods):
         """Verify fluentd is running everywhere. Returns: error string"""
         """Verify fluentd is running everywhere. Returns: error string"""
 
 
-        node_selector = get_var(task_vars, 'openshift_logging_fluentd_nodeselector',
-                                default='logging-infra-fluentd=true')
+        node_selector = self.get_var(
+            'openshift_logging_fluentd_nodeselector',
+            default='logging-infra-fluentd=true'
+        )
 
 
-        nodes_by_name, error = self.get_nodes_by_name(task_vars)
+        nodes_by_name, error = self.get_nodes_by_name()
 
 
         if error:
         if error:
             return error
             return error
@@ -128,7 +126,7 @@ class Fluentd(LoggingCheck):
             return error
             return error
 
 
         error_msgs = []
         error_msgs = []
-        error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars)
+        error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector)
         if error:
         if error:
             error_msgs.append(error)
             error_msgs.append(error)
         error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
         error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
@@ -147,9 +145,9 @@ class Fluentd(LoggingCheck):
 
 
         return '\n'.join(error_msgs)
         return '\n'.join(error_msgs)
 
 
-    def get_nodes_by_name(self, task_vars):
+    def get_nodes_by_name(self):
         """Retrieve all the node definitions. Returns: dict(name: node), error string"""
         """Retrieve all the node definitions. Returns: dict(name: node), error string"""
-        nodes_json = self._exec_oc("get nodes -o json", [], task_vars)
+        nodes_json = self._exec_oc("get nodes -o json", [])
         try:
         try:
             nodes = json.loads(nodes_json)
             nodes = json.loads(nodes_json)
         except ValueError:  # no valid json - should not happen
         except ValueError:  # no valid json - should not happen
@@ -161,9 +159,9 @@ class Fluentd(LoggingCheck):
             for node in nodes['items']
             for node in nodes['items']
         }, None
         }, None
 
 
-    def _exec_oc(self, cmd_str, extra_args, task_vars):
-        return super(Fluentd, self).exec_oc(self.execute_module,
-                                            self.logging_namespace,
-                                            cmd_str,
-                                            extra_args,
-                                            task_vars)
+    def _exec_oc(self, cmd_str, extra_args):
+        return super(Fluentd, self).exec_oc(
+            self.logging_namespace,
+            cmd_str,
+            extra_args,
+        )

+ 17 - 20
roles/openshift_health_checker/openshift_checks/logging/kibana.py

@@ -12,7 +12,6 @@ except ImportError:
     from urllib.error import HTTPError, URLError
     from urllib.error import HTTPError, URLError
     import urllib.request as urllib2
     import urllib.request as urllib2
 
 
-from openshift_checks import get_var
 from openshift_checks.logging.logging import LoggingCheck
 from openshift_checks.logging.logging import LoggingCheck
 
 
 
 
@@ -24,22 +23,20 @@ class Kibana(LoggingCheck):
 
 
     logging_namespace = None
     logging_namespace = None
 
 
-    def run(self, tmp, task_vars):
+    def run(self):
         """Check various things and gather errors. Returns: result as hash"""
         """Check various things and gather errors. Returns: result as hash"""
 
 
-        self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+        self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
         kibana_pods, error = super(Kibana, self).get_pods_for_component(
         kibana_pods, error = super(Kibana, self).get_pods_for_component(
-            self.execute_module,
             self.logging_namespace,
             self.logging_namespace,
             "kibana",
             "kibana",
-            task_vars,
         )
         )
         if error:
         if error:
             return {"failed": True, "changed": False, "msg": error}
             return {"failed": True, "changed": False, "msg": error}
         check_error = self.check_kibana(kibana_pods)
         check_error = self.check_kibana(kibana_pods)
 
 
         if not check_error:
         if not check_error:
-            check_error = self._check_kibana_route(task_vars)
+            check_error = self._check_kibana_route()
 
 
         if check_error:
         if check_error:
             msg = ("The following Kibana deployment issue was found:"
             msg = ("The following Kibana deployment issue was found:"
@@ -50,7 +47,7 @@ class Kibana(LoggingCheck):
         # TODO(lmeyer): run it all again for the ops cluster
         # TODO(lmeyer): run it all again for the ops cluster
         return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
         return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
 
 
-    def _verify_url_internal(self, url, task_vars):
+    def _verify_url_internal(self, url):
         """
         """
         Try to reach a URL from the host.
         Try to reach a URL from the host.
         Returns: success (bool), reason (for failure)
         Returns: success (bool), reason (for failure)
@@ -62,7 +59,7 @@ class Kibana(LoggingCheck):
             # TODO(lmeyer): give users option to validate certs
             # TODO(lmeyer): give users option to validate certs
             status_code=302,
             status_code=302,
         )
         )
-        result = self.execute_module('uri', args, None, task_vars)
+        result = self.execute_module('uri', args)
         if result.get('failed'):
         if result.get('failed'):
             return result['msg']
             return result['msg']
         return None
         return None
@@ -114,14 +111,14 @@ class Kibana(LoggingCheck):
 
 
         return None
         return None
 
 
-    def _get_kibana_url(self, task_vars):
+    def _get_kibana_url(self):
         """
         """
         Get kibana route or report error.
         Get kibana route or report error.
         Returns: url (or empty), reason for failure
         Returns: url (or empty), reason for failure
         """
         """
 
 
         # Get logging url
         # Get logging url
-        get_route = self._exec_oc("get route logging-kibana -o json", [], task_vars)
+        get_route = self._exec_oc("get route logging-kibana -o json", [])
         if not get_route:
         if not get_route:
             return None, 'no_route_exists'
             return None, 'no_route_exists'
 
 
@@ -139,7 +136,7 @@ class Kibana(LoggingCheck):
 
 
         return 'https://{}/'.format(host), None
         return 'https://{}/'.format(host), None
 
 
-    def _check_kibana_route(self, task_vars):
+    def _check_kibana_route(self):
         """
         """
         Check to see if kibana route is up and working.
         Check to see if kibana route is up and working.
         Returns: error string
         Returns: error string
@@ -160,12 +157,12 @@ class Kibana(LoggingCheck):
             ),
             ),
         )
         )
 
 
-        kibana_url, error = self._get_kibana_url(task_vars)
+        kibana_url, error = self._get_kibana_url()
         if not kibana_url:
         if not kibana_url:
             return known_errors.get(error, error)
             return known_errors.get(error, error)
 
 
         # first, check that kibana is reachable from the master.
         # first, check that kibana is reachable from the master.
-        error = self._verify_url_internal(kibana_url, task_vars)
+        error = self._verify_url_internal(kibana_url)
         if error:
         if error:
             if 'urlopen error [Errno 111] Connection refused' in error:
             if 'urlopen error [Errno 111] Connection refused' in error:
                 error = (
                 error = (
@@ -190,7 +187,7 @@ class Kibana(LoggingCheck):
 
 
         # in production we would like the kibana route to work from outside the
         # in production we would like the kibana route to work from outside the
         # cluster too; but that may not be the case, so allow disabling just this part.
         # cluster too; but that may not be the case, so allow disabling just this part.
-        if not get_var(task_vars, "openshift_check_efk_kibana_external", default=True):
+        if not self.get_var("openshift_check_efk_kibana_external", default=True):
             return None
             return None
         error = self._verify_url_external(kibana_url)
         error = self._verify_url_external(kibana_url)
         if error:
         if error:
@@ -221,9 +218,9 @@ class Kibana(LoggingCheck):
             return error
             return error
         return None
         return None
 
 
-    def _exec_oc(self, cmd_str, extra_args, task_vars):
-        return super(Kibana, self).exec_oc(self.execute_module,
-                                           self.logging_namespace,
-                                           cmd_str,
-                                           extra_args,
-                                           task_vars)
+    def _exec_oc(self, cmd_str, extra_args):
+        return super(Kibana, self).exec_oc(
+            self.logging_namespace,
+            cmd_str,
+            extra_args,
+        )

+ 14 - 19
roles/openshift_health_checker/openshift_checks/logging/logging.py

@@ -5,7 +5,7 @@ Util functions for performing checks on an Elasticsearch, Fluentd, and Kibana st
 import json
 import json
 import os
 import os
 
 
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 
 
 
 
 class LoggingCheck(OpenShiftCheck):
 class LoggingCheck(OpenShiftCheck):
@@ -14,31 +14,27 @@ class LoggingCheck(OpenShiftCheck):
     name = "logging"
     name = "logging"
     logging_namespace = "logging"
     logging_namespace = "logging"
 
 
-    @classmethod
-    def is_active(cls, task_vars):
-        logging_deployed = get_var(task_vars, "openshift_hosted_logging_deploy", default=False)
-        return super(LoggingCheck, cls).is_active(task_vars) and cls.is_first_master(task_vars) and logging_deployed
+    def is_active(self):
+        logging_deployed = self.get_var("openshift_hosted_logging_deploy", default=False)
+        return logging_deployed and super(LoggingCheck, self).is_active() and self.is_first_master()
 
 
-    @staticmethod
-    def is_first_master(task_vars):
-        """Run only on first master. Returns: bool"""
+    def is_first_master(self):
+        """Determine if running on first master. Returns: bool"""
         # Note: It would be nice to use membership in oo_first_master group, however for now it
         # Note: It would be nice to use membership in oo_first_master group, however for now it
         # seems best to avoid requiring that setup and just check this is the first master.
         # seems best to avoid requiring that setup and just check this is the first master.
-        hostname = get_var(task_vars, "ansible_ssh_host") or [None]
-        masters = get_var(task_vars, "groups", "masters", default=None) or [None]
-        return masters and masters[0] == hostname
+        hostname = self.get_var("ansible_ssh_host") or [None]
+        masters = self.get_var("groups", "masters", default=None) or [None]
+        return masters[0] == hostname
 
 
-    def run(self, tmp, task_vars):
+    def run(self):
         pass
         pass
 
 
-    def get_pods_for_component(self, execute_module, namespace, logging_component, task_vars):
+    def get_pods_for_component(self, namespace, logging_component):
         """Get all pods for a given component. Returns: list of pods for component, error string"""
         """Get all pods for a given component. Returns: list of pods for component, error string"""
         pod_output = self.exec_oc(
         pod_output = self.exec_oc(
-            execute_module,
             namespace,
             namespace,
             "get pods -l component={} -o json".format(logging_component),
             "get pods -l component={} -o json".format(logging_component),
             [],
             [],
-            task_vars
         )
         )
         try:
         try:
             pods = json.loads(pod_output)
             pods = json.loads(pod_output)
@@ -64,14 +60,13 @@ class LoggingCheck(OpenShiftCheck):
             )
             )
         ]
         ]
 
 
-    @staticmethod
-    def exec_oc(execute_module=None, namespace="logging", cmd_str="", extra_args=None, task_vars=None):
+    def exec_oc(self, namespace="logging", cmd_str="", extra_args=None):
         """
         """
         Execute an 'oc' command in the remote host.
         Execute an 'oc' command in the remote host.
         Returns: output of command and namespace,
         Returns: output of command and namespace,
         or raises OpenShiftCheckException on error
         or raises OpenShiftCheckException on error
         """
         """
-        config_base = get_var(task_vars, "openshift", "common", "config_base")
+        config_base = self.get_var("openshift", "common", "config_base")
         args = {
         args = {
             "namespace": namespace,
             "namespace": namespace,
             "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
             "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
@@ -79,7 +74,7 @@ class LoggingCheck(OpenShiftCheck):
             "extra_args": list(extra_args) if extra_args else [],
             "extra_args": list(extra_args) if extra_args else [],
         }
         }
 
 
-        result = execute_module("ocutil", args, None, task_vars)
+        result = self.execute_module("ocutil", args)
         if result.get("failed"):
         if result.get("failed"):
             msg = (
             msg = (
                 'Unexpected error using `oc` to validate the logging stack components.\n'
                 'Unexpected error using `oc` to validate the logging stack components.\n'

+ 13 - 15
roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py

@@ -7,7 +7,7 @@ import time
 
 
 from uuid import uuid4
 from uuid import uuid4
 
 
-from openshift_checks import get_var, OpenShiftCheckException
+from openshift_checks import OpenShiftCheckException
 from openshift_checks.logging.logging import LoggingCheck
 from openshift_checks.logging.logging import LoggingCheck
 
 
 
 
@@ -21,11 +21,11 @@ class LoggingIndexTime(LoggingCheck):
 
 
     logging_namespace = "logging"
     logging_namespace = "logging"
 
 
-    def run(self, tmp, task_vars):
+    def run(self):
         """Add log entry by making unique request to Kibana. Check for unique entry in the ElasticSearch pod logs."""
         """Add log entry by making unique request to Kibana. Check for unique entry in the ElasticSearch pod logs."""
         try:
         try:
             log_index_timeout = int(
             log_index_timeout = int(
-                get_var(task_vars, "openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS)
+                self.get_var("openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS)
             )
             )
         except ValueError:
         except ValueError:
             return {
             return {
@@ -37,11 +37,9 @@ class LoggingIndexTime(LoggingCheck):
         running_component_pods = dict()
         running_component_pods = dict()
 
 
         # get all component pods
         # get all component pods
-        self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default=self.logging_namespace)
+        self.logging_namespace = self.get_var("openshift_logging_namespace", default=self.logging_namespace)
         for component, name in (['kibana', 'Kibana'], ['es', 'Elasticsearch']):
         for component, name in (['kibana', 'Kibana'], ['es', 'Elasticsearch']):
-            pods, error = self.get_pods_for_component(
-                self.execute_module, self.logging_namespace, component, task_vars,
-            )
+            pods, error = self.get_pods_for_component(self.logging_namespace, component)
 
 
             if error:
             if error:
                 msg = 'Unable to retrieve pods for the {} logging component: {}'
                 msg = 'Unable to retrieve pods for the {} logging component: {}'
@@ -56,29 +54,29 @@ class LoggingIndexTime(LoggingCheck):
 
 
             running_component_pods[component] = running_pods
             running_component_pods[component] = running_pods
 
 
-        uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0], task_vars)
-        self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout, task_vars)
+        uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0])
+        self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout)
         return {}
         return {}
 
 
-    def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs, task_vars):
+    def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs):
         """Retry an Elasticsearch query every second until query success, or a defined
         """Retry an Elasticsearch query every second until query success, or a defined
         length of time has passed."""
         length of time has passed."""
         deadline = time.time() + timeout_secs
         deadline = time.time() + timeout_secs
         interval = 1
         interval = 1
-        while not self.query_es_from_es(es_pod, uuid, task_vars):
+        while not self.query_es_from_es(es_pod, uuid):
             if time.time() + interval > deadline:
             if time.time() + interval > deadline:
                 msg = "expecting match in Elasticsearch for message with uuid {}, but no matches were found after {}s."
                 msg = "expecting match in Elasticsearch for message with uuid {}, but no matches were found after {}s."
                 raise OpenShiftCheckException(msg.format(uuid, timeout_secs))
                 raise OpenShiftCheckException(msg.format(uuid, timeout_secs))
             time.sleep(interval)
             time.sleep(interval)
 
 
-    def curl_kibana_with_uuid(self, kibana_pod, task_vars):
+    def curl_kibana_with_uuid(self, kibana_pod):
         """curl Kibana with a unique uuid."""
         """curl Kibana with a unique uuid."""
         uuid = self.generate_uuid()
         uuid = self.generate_uuid()
         pod_name = kibana_pod["metadata"]["name"]
         pod_name = kibana_pod["metadata"]["name"]
         exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}"
         exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}"
         exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid)
         exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid)
 
 
-        error_str = self.exec_oc(self.execute_module, self.logging_namespace, exec_cmd, [], task_vars)
+        error_str = self.exec_oc(self.logging_namespace, exec_cmd, [])
 
 
         try:
         try:
             error_code = json.loads(error_str)["statusCode"]
             error_code = json.loads(error_str)["statusCode"]
@@ -97,7 +95,7 @@ class LoggingIndexTime(LoggingCheck):
 
 
         return uuid
         return uuid
 
 
-    def query_es_from_es(self, es_pod, uuid, task_vars):
+    def query_es_from_es(self, es_pod, uuid):
         """curl the Elasticsearch pod and look for a unique uuid in its logs."""
         """curl the Elasticsearch pod and look for a unique uuid in its logs."""
         pod_name = es_pod["metadata"]["name"]
         pod_name = es_pod["metadata"]["name"]
         exec_cmd = (
         exec_cmd = (
@@ -108,7 +106,7 @@ class LoggingIndexTime(LoggingCheck):
             "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
             "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}"
         )
         )
         exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace, uuid=uuid)
         exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace, uuid=uuid)
-        result = self.exec_oc(self.execute_module, self.logging_namespace, exec_cmd, [], task_vars)
+        result = self.exec_oc(self.logging_namespace, exec_cmd, [])
 
 
         try:
         try:
             count = json.loads(result)["count"]
             count = json.loads(result)["count"]

+ 9 - 10
roles/openshift_health_checker/openshift_checks/memory_availability.py

@@ -1,5 +1,5 @@
 """Check that recommended memory is available."""
 """Check that recommended memory is available."""
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck
 
 
 MIB = 2**20
 MIB = 2**20
 GIB = 2**30
 GIB = 2**30
@@ -21,19 +21,18 @@ class MemoryAvailability(OpenShiftCheck):
     # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
     # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
     memtotal_adjustment = 1 * GIB
     memtotal_adjustment = 1 * GIB
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Skip hosts that do not have recommended memory requirements."""
         """Skip hosts that do not have recommended memory requirements."""
-        group_names = get_var(task_vars, "group_names", default=[])
-        has_memory_recommendation = bool(set(group_names).intersection(cls.recommended_memory_bytes))
-        return super(MemoryAvailability, cls).is_active(task_vars) and has_memory_recommendation
+        group_names = self.get_var("group_names", default=[])
+        has_memory_recommendation = bool(set(group_names).intersection(self.recommended_memory_bytes))
+        return super(MemoryAvailability, self).is_active() and has_memory_recommendation
 
 
-    def run(self, tmp, task_vars):
-        group_names = get_var(task_vars, "group_names")
-        total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * MIB
+    def run(self):
+        group_names = self.get_var("group_names")
+        total_memory_bytes = self.get_var("ansible_memtotal_mb") * MIB
 
 
         recommended_min = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
         recommended_min = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
-        configured_min = float(get_var(task_vars, "openshift_check_min_host_memory_gb", default=0)) * GIB
+        configured_min = float(self.get_var("openshift_check_min_host_memory_gb", default=0)) * GIB
         min_memory_bytes = configured_min or recommended_min
         min_memory_bytes = configured_min or recommended_min
 
 
         if total_memory_bytes + self.memtotal_adjustment < min_memory_bytes:
         if total_memory_bytes + self.memtotal_adjustment < min_memory_bytes:

+ 10 - 15
roles/openshift_health_checker/openshift_checks/mixins.py

@@ -2,19 +2,16 @@
 Mixin classes meant to be used with subclasses of OpenShiftCheck.
 Mixin classes meant to be used with subclasses of OpenShiftCheck.
 """
 """
 
 
-from openshift_checks import get_var
-
 
 
 class NotContainerizedMixin(object):
 class NotContainerizedMixin(object):
     """Mixin for checks that are only active when not in containerized mode."""
     """Mixin for checks that are only active when not in containerized mode."""
     # permanent # pylint: disable=too-few-public-methods
     # permanent # pylint: disable=too-few-public-methods
     # Reason: The mixin is not intended to stand on its own as a class.
     # Reason: The mixin is not intended to stand on its own as a class.
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Only run on non-containerized hosts."""
         """Only run on non-containerized hosts."""
-        is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
-        return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
+        is_containerized = self.get_var("openshift", "common", "is_containerized")
+        return super(NotContainerizedMixin, self).is_active() and not is_containerized
 
 
 
 
 class DockerHostMixin(object):
 class DockerHostMixin(object):
@@ -22,28 +19,26 @@ class DockerHostMixin(object):
 
 
     dependencies = []
     dependencies = []
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Only run on hosts that depend on Docker."""
         """Only run on hosts that depend on Docker."""
-        is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
-        is_node = "nodes" in get_var(task_vars, "group_names", default=[])
-        return super(DockerHostMixin, cls).is_active(task_vars) and (is_containerized or is_node)
+        is_containerized = self.get_var("openshift", "common", "is_containerized")
+        is_node = "nodes" in self.get_var("group_names", default=[])
+        return super(DockerHostMixin, self).is_active() and (is_containerized or is_node)
 
 
-    def ensure_dependencies(self, task_vars):
+    def ensure_dependencies(self):
         """
         """
         Ensure that docker-related packages exist, but not on atomic hosts
         Ensure that docker-related packages exist, but not on atomic hosts
         (which would not be able to install but should already have them).
         (which would not be able to install but should already have them).
         Returns: msg, failed, changed
         Returns: msg, failed, changed
         """
         """
-        if get_var(task_vars, "openshift", "common", "is_atomic"):
+        if self.get_var("openshift", "common", "is_atomic"):
             return "", False, False
             return "", False, False
 
 
         # NOTE: we would use the "package" module but it's actually an action plugin
         # NOTE: we would use the "package" module but it's actually an action plugin
         # and it's not clear how to invoke one of those. This is about the same anyway:
         # and it's not clear how to invoke one of those. This is about the same anyway:
         result = self.execute_module(
         result = self.execute_module(
-            get_var(task_vars, "ansible_pkg_mgr", default="yum"),
+            self.get_var("ansible_pkg_mgr", default="yum"),
             {"name": self.dependencies, "state": "present"},
             {"name": self.dependencies, "state": "present"},
-            task_vars=task_vars,
         )
         )
         msg = result.get("msg", "")
         msg = result.get("msg", "")
         if result.get("failed"):
         if result.get("failed"):

+ 11 - 12
roles/openshift_health_checker/openshift_checks/ovs_version.py

@@ -3,7 +3,7 @@ Ansible module for determining if an installed version of Open vSwitch is incomp
 currently installed version of OpenShift.
 currently installed version of OpenShift.
 """
 """
 
 
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 from openshift_checks.mixins import NotContainerizedMixin
 from openshift_checks.mixins import NotContainerizedMixin
 
 
 
 
@@ -27,27 +27,26 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
         "1": "3",
         "1": "3",
     }
     }
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Skip hosts that do not have package requirements."""
         """Skip hosts that do not have package requirements."""
-        group_names = get_var(task_vars, "group_names", default=[])
+        group_names = self.get_var("group_names", default=[])
         master_or_node = 'masters' in group_names or 'nodes' in group_names
         master_or_node = 'masters' in group_names or 'nodes' in group_names
-        return super(OvsVersion, cls).is_active(task_vars) and master_or_node
+        return super(OvsVersion, self).is_active() and master_or_node
 
 
-    def run(self, tmp, task_vars):
+    def run(self):
         args = {
         args = {
             "package_list": [
             "package_list": [
                 {
                 {
                     "name": "openvswitch",
                     "name": "openvswitch",
-                    "version": self.get_required_ovs_version(task_vars),
+                    "version": self.get_required_ovs_version(),
                 },
                 },
             ],
             ],
         }
         }
-        return self.execute_module("rpm_version", args, task_vars=task_vars)
+        return self.execute_module("rpm_version", args)
 
 
-    def get_required_ovs_version(self, task_vars):
+    def get_required_ovs_version(self):
         """Return the correct Open vSwitch version for the current OpenShift version"""
         """Return the correct Open vSwitch version for the current OpenShift version"""
-        openshift_version = self._get_openshift_version(task_vars)
+        openshift_version = self._get_openshift_version()
 
 
         if float(openshift_version) < 3.5:
         if float(openshift_version) < 3.5:
             return self.openshift_to_ovs_version["3.4"]
             return self.openshift_to_ovs_version["3.4"]
@@ -59,8 +58,8 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
         msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
         msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
         raise OpenShiftCheckException(msg.format(openshift_version))
         raise OpenShiftCheckException(msg.format(openshift_version))
 
 
-    def _get_openshift_version(self, task_vars):
-        openshift_version = get_var(task_vars, "openshift_image_tag")
+    def _get_openshift_version(self):
+        openshift_version = self.get_var("openshift_image_tag")
         if openshift_version and openshift_version[0] == 'v':
         if openshift_version and openshift_version[0] == 'v':
             openshift_version = openshift_version[1:]
             openshift_version = openshift_version[1:]
 
 

+ 7 - 8
roles/openshift_health_checker/openshift_checks/package_availability.py

@@ -1,6 +1,6 @@
 """Check that required RPM packages are available."""
 """Check that required RPM packages are available."""
 
 
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck
 from openshift_checks.mixins import NotContainerizedMixin
 from openshift_checks.mixins import NotContainerizedMixin
 
 
 
 
@@ -10,14 +10,13 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
     name = "package_availability"
     name = "package_availability"
     tags = ["preflight"]
     tags = ["preflight"]
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Run only when yum is the package manager as the code is specific to it."""
         """Run only when yum is the package manager as the code is specific to it."""
-        return super(PackageAvailability, cls).is_active(task_vars) and task_vars["ansible_pkg_mgr"] == "yum"
+        return super(PackageAvailability, self).is_active() and self.get_var("ansible_pkg_mgr") == "yum"
 
 
-    def run(self, tmp, task_vars):
-        rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
-        group_names = get_var(task_vars, "group_names", default=[])
+    def run(self):
+        rpm_prefix = self.get_var("openshift", "common", "service_type")
+        group_names = self.get_var("group_names", default=[])
 
 
         packages = set()
         packages = set()
 
 
@@ -27,7 +26,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
             packages.update(self.node_packages(rpm_prefix))
             packages.update(self.node_packages(rpm_prefix))
 
 
         args = {"packages": sorted(set(packages))}
         args = {"packages": sorted(set(packages))}
-        return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
+        return self.execute_module("check_yum_update", args)
 
 
     @staticmethod
     @staticmethod
     def master_packages(rpm_prefix):
     def master_packages(rpm_prefix):

+ 2 - 2
roles/openshift_health_checker/openshift_checks/package_update.py

@@ -9,6 +9,6 @@ class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
     name = "package_update"
     name = "package_update"
     tags = ["preflight"]
     tags = ["preflight"]
 
 
-    def run(self, tmp, task_vars):
+    def run(self):
         args = {"packages": []}
         args = {"packages": []}
-        return self.execute_module("check_yum_update", args, tmp=tmp, task_vars=task_vars)
+        return self.execute_module("check_yum_update", args)

+ 17 - 18
roles/openshift_health_checker/openshift_checks/package_version.py

@@ -1,5 +1,5 @@
 """Check that available RPM packages match the required versions."""
 """Check that available RPM packages match the required versions."""
-from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 from openshift_checks.mixins import NotContainerizedMixin
 from openshift_checks.mixins import NotContainerizedMixin
 
 
 
 
@@ -28,29 +28,28 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
         "1": "3",
         "1": "3",
     }
     }
 
 
-    @classmethod
-    def is_active(cls, task_vars):
+    def is_active(self):
         """Skip hosts that do not have package requirements."""
         """Skip hosts that do not have package requirements."""
-        group_names = get_var(task_vars, "group_names", default=[])
+        group_names = self.get_var("group_names", default=[])
         master_or_node = 'masters' in group_names or 'nodes' in group_names
         master_or_node = 'masters' in group_names or 'nodes' in group_names
-        return super(PackageVersion, cls).is_active(task_vars) and master_or_node
+        return super(PackageVersion, self).is_active() and master_or_node
 
 
-    def run(self, tmp, task_vars):
-        rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
-        openshift_release = get_var(task_vars, "openshift_release", default='')
-        deployment_type = get_var(task_vars, "openshift_deployment_type")
+    def run(self):
+        rpm_prefix = self.get_var("openshift", "common", "service_type")
+        openshift_release = self.get_var("openshift_release", default='')
+        deployment_type = self.get_var("openshift_deployment_type")
         check_multi_minor_release = deployment_type in ['openshift-enterprise']
         check_multi_minor_release = deployment_type in ['openshift-enterprise']
 
 
         args = {
         args = {
             "package_list": [
             "package_list": [
                 {
                 {
                     "name": "openvswitch",
                     "name": "openvswitch",
-                    "version": self.get_required_ovs_version(task_vars),
+                    "version": self.get_required_ovs_version(),
                     "check_multi": False,
                     "check_multi": False,
                 },
                 },
                 {
                 {
                     "name": "docker",
                     "name": "docker",
-                    "version": self.get_required_docker_version(task_vars),
+                    "version": self.get_required_docker_version(),
                     "check_multi": False,
                     "check_multi": False,
                 },
                 },
                 {
                 {
@@ -71,13 +70,13 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
             ],
             ],
         }
         }
 
 
-        return self.execute_module("aos_version", args, tmp=tmp, task_vars=task_vars)
+        return self.execute_module("aos_version", args)
 
 
-    def get_required_ovs_version(self, task_vars):
+    def get_required_ovs_version(self):
         """Return the correct Open vSwitch version for the current OpenShift version.
         """Return the correct Open vSwitch version for the current OpenShift version.
         If the current OpenShift version is >= 3.5, ensure Open vSwitch version 2.6,
         If the current OpenShift version is >= 3.5, ensure Open vSwitch version 2.6,
         Else ensure Open vSwitch version 2.4"""
         Else ensure Open vSwitch version 2.4"""
-        openshift_version = self.get_openshift_version(task_vars)
+        openshift_version = self.get_openshift_version()
 
 
         if float(openshift_version) < 3.5:
         if float(openshift_version) < 3.5:
             return self.openshift_to_ovs_version["3.4"]
             return self.openshift_to_ovs_version["3.4"]
@@ -89,12 +88,12 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
         msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
         msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
         raise OpenShiftCheckException(msg.format(openshift_version))
         raise OpenShiftCheckException(msg.format(openshift_version))
 
 
-    def get_required_docker_version(self, task_vars):
+    def get_required_docker_version(self):
         """Return the correct Docker version for the current OpenShift version.
         """Return the correct Docker version for the current OpenShift version.
         If the OpenShift version is 3.1, ensure Docker version 1.8.
         If the OpenShift version is 3.1, ensure Docker version 1.8.
         If the OpenShift version is 3.2 or 3.3, ensure Docker version 1.10.
         If the OpenShift version is 3.2 or 3.3, ensure Docker version 1.10.
         If the current OpenShift version is >= 3.4, ensure Docker version 1.12."""
         If the current OpenShift version is >= 3.4, ensure Docker version 1.12."""
-        openshift_version = self.get_openshift_version(task_vars)
+        openshift_version = self.get_openshift_version()
 
 
         if float(openshift_version) >= 3.4:
         if float(openshift_version) >= 3.4:
             return self.openshift_to_docker_version["3.4"]
             return self.openshift_to_docker_version["3.4"]
@@ -106,9 +105,9 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
         msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
         msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
         raise OpenShiftCheckException(msg.format(openshift_version))
         raise OpenShiftCheckException(msg.format(openshift_version))
 
 
-    def get_openshift_version(self, task_vars):
+    def get_openshift_version(self):
         """Return received image tag as a normalized X.Y minor version string."""
         """Return received image tag as a normalized X.Y minor version string."""
-        openshift_version = get_var(task_vars, "openshift_image_tag")
+        openshift_version = self.get_var("openshift_image_tag")
         if openshift_version and openshift_version[0] == 'v':
         if openshift_version and openshift_version[0] == 'v':
             openshift_version = openshift_version[1:]
             openshift_version = openshift_version[1:]
 
 

+ 8 - 9
roles/openshift_health_checker/test/action_plugin_test.py

@@ -15,14 +15,13 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
         name = _name
         name = _name
         tags = _tags or []
         tags = _tags or []
 
 
-        def __init__(self, execute_module=None):
+        def __init__(self, execute_module=None, task_vars=None, tmp=None):
             pass
             pass
 
 
-        @classmethod
-        def is_active(cls, task_vars):
+        def is_active(self):
             return is_active
             return is_active
 
 
-        def run(self, tmp, task_vars):
+        def run(self):
             if run_exception is not None:
             if run_exception is not None:
                 raise run_exception
                 raise run_exception
             return run_return
             return run_return
@@ -124,7 +123,7 @@ def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
 def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
 def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
     check_return_value = {'ok': 'test'}
     check_return_value = {'ok': 'test'}
     check_class = fake_check(run_return=check_return_value)
     check_class = fake_check(run_return=check_return_value)
-    monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+    monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
 
 
     result = plugin.run(tmp=None, task_vars=task_vars)
     result = plugin.run(tmp=None, task_vars=task_vars)
@@ -138,7 +137,7 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
 def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
 def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
     check_return_value = {'ok': 'test', 'changed': True}
     check_return_value = {'ok': 'test', 'changed': True}
     check_class = fake_check(run_return=check_return_value)
     check_class = fake_check(run_return=check_return_value)
-    monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+    monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
 
 
     result = plugin.run(tmp=None, task_vars=task_vars)
     result = plugin.run(tmp=None, task_vars=task_vars)
@@ -152,7 +151,7 @@ def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
 def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
 def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
     check_return_value = {'failed': True}
     check_return_value = {'failed': True}
     check_class = fake_check(run_return=check_return_value)
     check_class = fake_check(run_return=check_return_value)
-    monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+    monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
 
 
     result = plugin.run(tmp=None, task_vars=task_vars)
     result = plugin.run(tmp=None, task_vars=task_vars)
@@ -167,7 +166,7 @@ def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
     exception_msg = 'fake check has an exception'
     exception_msg = 'fake check has an exception'
     run_exception = OpenShiftCheckException(exception_msg)
     run_exception = OpenShiftCheckException(exception_msg)
     check_class = fake_check(run_exception=run_exception)
     check_class = fake_check(run_exception=run_exception)
-    monkeypatch.setattr(plugin, 'load_known_checks', lambda: {'fake_check': check_class()})
+    monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
 
 
     result = plugin.run(tmp=None, task_vars=task_vars)
     result = plugin.run(tmp=None, task_vars=task_vars)
@@ -179,7 +178,7 @@ def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
 
 
 
 
 def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
 def test_action_plugin_resolve_checks_exception(plugin, task_vars, monkeypatch):
-    monkeypatch.setattr(plugin, 'load_known_checks', lambda: {})
+    monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {})
 
 
     result = plugin.run(tmp=None, task_vars=task_vars)
     result = plugin.run(tmp=None, task_vars=task_vars)
 
 

+ 4 - 7
roles/openshift_health_checker/test/disk_availability_test.py

@@ -17,7 +17,7 @@ def test_is_active(group_names, is_active):
     task_vars = dict(
     task_vars = dict(
         group_names=group_names,
         group_names=group_names,
     )
     )
-    assert DiskAvailability.is_active(task_vars=task_vars) == is_active
+    assert DiskAvailability(None, task_vars).is_active() == is_active
 
 
 
 
 @pytest.mark.parametrize('ansible_mounts,extra_words', [
 @pytest.mark.parametrize('ansible_mounts,extra_words', [
@@ -30,10 +30,9 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
         group_names=['masters'],
         group_names=['masters'],
         ansible_mounts=ansible_mounts,
         ansible_mounts=ansible_mounts,
     )
     )
-    check = DiskAvailability(execute_module=fake_execute_module)
 
 
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.run(tmp=None, task_vars=task_vars)
+        DiskAvailability(fake_execute_module, task_vars).run()
 
 
     for word in 'determine disk availability'.split() + extra_words:
     for word in 'determine disk availability'.split() + extra_words:
         assert word in str(excinfo.value)
         assert word in str(excinfo.value)
@@ -93,8 +92,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
         ansible_mounts=ansible_mounts,
         ansible_mounts=ansible_mounts,
     )
     )
 
 
-    check = DiskAvailability(execute_module=fake_execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = DiskAvailability(fake_execute_module, task_vars).run()
 
 
     assert not result.get('failed', False)
     assert not result.get('failed', False)
 
 
@@ -168,8 +166,7 @@ def test_fails_with_insufficient_disk_space(group_names, configured_min, ansible
         ansible_mounts=ansible_mounts,
         ansible_mounts=ansible_mounts,
     )
     )
 
 
-    check = DiskAvailability(execute_module=fake_execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = DiskAvailability(fake_execute_module, task_vars).run()
 
 
     assert result['failed']
     assert result['failed']
     for word in 'below recommended'.split() + extra_words:
     for word in 'below recommended'.split() + extra_words:

+ 18 - 19
roles/openshift_health_checker/test/docker_image_availability_test.py

@@ -21,7 +21,7 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
         openshift_deployment_type=deployment_type,
         openshift_deployment_type=deployment_type,
         group_names=group_names,
         group_names=group_names,
     )
     )
-    assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
+    assert DockerImageAvailability(None, task_vars).is_active() == expect_active
 
 
 
 
 @pytest.mark.parametrize("is_containerized,is_atomic", [
 @pytest.mark.parametrize("is_containerized,is_atomic", [
@@ -31,7 +31,7 @@ def test_is_active(deployment_type, is_containerized, group_names, expect_active
     (False, True),
     (False, True),
 ])
 ])
 def test_all_images_available_locally(is_containerized, is_atomic):
 def test_all_images_available_locally(is_containerized, is_atomic):
-    def execute_module(module_name, module_args, task_vars):
+    def execute_module(module_name, module_args, *_):
         if module_name == "yum":
         if module_name == "yum":
             return {"changed": True}
             return {"changed": True}
 
 
@@ -42,7 +42,7 @@ def test_all_images_available_locally(is_containerized, is_atomic):
             'images': [module_args['name']],
             'images': [module_args['name']],
         }
         }
 
 
-    result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+    result = DockerImageAvailability(execute_module, task_vars=dict(
         openshift=dict(
         openshift=dict(
             common=dict(
             common=dict(
                 service_type='origin',
                 service_type='origin',
@@ -54,7 +54,7 @@ def test_all_images_available_locally(is_containerized, is_atomic):
         openshift_deployment_type='origin',
         openshift_deployment_type='origin',
         openshift_image_tag='3.4',
         openshift_image_tag='3.4',
         group_names=['nodes', 'masters'],
         group_names=['nodes', 'masters'],
-    ))
+    )).run()
 
 
     assert not result.get('failed', False)
     assert not result.get('failed', False)
 
 
@@ -64,12 +64,12 @@ def test_all_images_available_locally(is_containerized, is_atomic):
     True,
     True,
 ])
 ])
 def test_all_images_available_remotely(available_locally):
 def test_all_images_available_remotely(available_locally):
-    def execute_module(module_name, module_args, task_vars):
+    def execute_module(module_name, *_):
         if module_name == 'docker_image_facts':
         if module_name == 'docker_image_facts':
             return {'images': [], 'failed': available_locally}
             return {'images': [], 'failed': available_locally}
         return {'changed': False}
         return {'changed': False}
 
 
-    result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+    result = DockerImageAvailability(execute_module, task_vars=dict(
         openshift=dict(
         openshift=dict(
             common=dict(
             common=dict(
                 service_type='origin',
                 service_type='origin',
@@ -81,13 +81,13 @@ def test_all_images_available_remotely(available_locally):
         openshift_deployment_type='origin',
         openshift_deployment_type='origin',
         openshift_image_tag='v3.4',
         openshift_image_tag='v3.4',
         group_names=['nodes', 'masters'],
         group_names=['nodes', 'masters'],
-    ))
+    )).run()
 
 
     assert not result.get('failed', False)
     assert not result.get('failed', False)
 
 
 
 
 def test_all_images_unavailable():
 def test_all_images_unavailable():
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, *_):
         if module_name == "command":
         if module_name == "command":
             return {
             return {
                 'failed': True,
                 'failed': True,
@@ -97,8 +97,7 @@ def test_all_images_unavailable():
             'changed': False,
             'changed': False,
         }
         }
 
 
-    check = DockerImageAvailability(execute_module=execute_module)
-    actual = check.run(tmp=None, task_vars=dict(
+    actual = DockerImageAvailability(execute_module, task_vars=dict(
         openshift=dict(
         openshift=dict(
             common=dict(
             common=dict(
                 service_type='origin',
                 service_type='origin',
@@ -110,7 +109,7 @@ def test_all_images_unavailable():
         openshift_deployment_type="openshift-enterprise",
         openshift_deployment_type="openshift-enterprise",
         openshift_image_tag='latest',
         openshift_image_tag='latest',
         group_names=['nodes', 'masters'],
         group_names=['nodes', 'masters'],
-    ))
+    )).run()
 
 
     assert actual['failed']
     assert actual['failed']
     assert "required Docker images are not available" in actual['msg']
     assert "required Docker images are not available" in actual['msg']
@@ -127,7 +126,7 @@ def test_all_images_unavailable():
     ),
     ),
 ])
 ])
 def test_skopeo_update_failure(message, extra_words):
 def test_skopeo_update_failure(message, extra_words):
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, *_):
         if module_name == "yum":
         if module_name == "yum":
             return {
             return {
                 "failed": True,
                 "failed": True,
@@ -137,7 +136,7 @@ def test_skopeo_update_failure(message, extra_words):
 
 
         return {'changed': False}
         return {'changed': False}
 
 
-    actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+    actual = DockerImageAvailability(execute_module, task_vars=dict(
         openshift=dict(
         openshift=dict(
             common=dict(
             common=dict(
                 service_type='origin',
                 service_type='origin',
@@ -149,7 +148,7 @@ def test_skopeo_update_failure(message, extra_words):
         openshift_deployment_type="openshift-enterprise",
         openshift_deployment_type="openshift-enterprise",
         openshift_image_tag='',
         openshift_image_tag='',
         group_names=['nodes', 'masters'],
         group_names=['nodes', 'masters'],
-    ))
+    )).run()
 
 
     assert actual["failed"]
     assert actual["failed"]
     for word in extra_words:
     for word in extra_words:
@@ -162,12 +161,12 @@ def test_skopeo_update_failure(message, extra_words):
     ("openshift-enterprise", []),
     ("openshift-enterprise", []),
 ])
 ])
 def test_registry_availability(deployment_type, registries):
 def test_registry_availability(deployment_type, registries):
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, *_):
         return {
         return {
             'changed': False,
             'changed': False,
         }
         }
 
 
-    actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+    actual = DockerImageAvailability(execute_module, task_vars=dict(
         openshift=dict(
         openshift=dict(
             common=dict(
             common=dict(
                 service_type='origin',
                 service_type='origin',
@@ -179,7 +178,7 @@ def test_registry_availability(deployment_type, registries):
         openshift_deployment_type=deployment_type,
         openshift_deployment_type=deployment_type,
         openshift_image_tag='',
         openshift_image_tag='',
         group_names=['nodes', 'masters'],
         group_names=['nodes', 'masters'],
-    ))
+    )).run()
 
 
     assert not actual.get("failed", False)
     assert not actual.get("failed", False)
 
 
@@ -258,7 +257,7 @@ def test_required_images(deployment_type, is_containerized, groups, oreg_url, ex
         openshift_image_tag='vtest',
         openshift_image_tag='vtest',
     )
     )
 
 
-    assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
+    assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()
 
 
 
 
 def test_containerized_etcd():
 def test_containerized_etcd():
@@ -272,4 +271,4 @@ def test_containerized_etcd():
         group_names=['etcd'],
         group_names=['etcd'],
     )
     )
     expected = set(['registry.access.redhat.com/rhel7/etcd'])
     expected = set(['registry.access.redhat.com/rhel7/etcd'])
-    assert expected == DockerImageAvailability("DUMMY").required_images(task_vars)
+    assert expected == DockerImageAvailability("DUMMY", task_vars).required_images()

+ 15 - 21
roles/openshift_health_checker/test/docker_storage_test.py

@@ -4,12 +4,6 @@ from openshift_checks import OpenShiftCheckException
 from openshift_checks.docker_storage import DockerStorage
 from openshift_checks.docker_storage import DockerStorage
 
 
 
 
-def dummy_check(execute_module=None):
-    def dummy_exec(self, status, task_vars):
-        raise Exception("dummy executor called")
-    return DockerStorage(execute_module=execute_module or dummy_exec)
-
-
 @pytest.mark.parametrize('is_containerized, group_names, is_active', [
 @pytest.mark.parametrize('is_containerized, group_names, is_active', [
     (False, ["masters", "etcd"], False),
     (False, ["masters", "etcd"], False),
     (False, ["masters", "nodes"], True),
     (False, ["masters", "nodes"], True),
@@ -20,7 +14,7 @@ def test_is_active(is_containerized, group_names, is_active):
         openshift=dict(common=dict(is_containerized=is_containerized)),
         openshift=dict(common=dict(is_containerized=is_containerized)),
         group_names=group_names,
         group_names=group_names,
     )
     )
-    assert DockerStorage.is_active(task_vars=task_vars) == is_active
+    assert DockerStorage(None, task_vars).is_active() == is_active
 
 
 
 
 def non_atomic_task_vars():
 def non_atomic_task_vars():
@@ -99,17 +93,17 @@ def non_atomic_task_vars():
     ),
     ),
 ])
 ])
 def test_check_storage_driver(docker_info, failed, expect_msg):
 def test_check_storage_driver(docker_info, failed, expect_msg):
-    def execute_module(module_name, module_args, tmp=None, task_vars=None):
+    def execute_module(module_name, *_):
         if module_name == "yum":
         if module_name == "yum":
             return {}
             return {}
         if module_name != "docker_info":
         if module_name != "docker_info":
             raise ValueError("not expecting module " + module_name)
             raise ValueError("not expecting module " + module_name)
         return docker_info
         return docker_info
 
 
-    check = dummy_check(execute_module=execute_module)
-    check.check_dm_usage = lambda status, task_vars: dict()  # stub out for this test
-    check.check_overlay_usage = lambda info, task_vars: dict()  # stub out for this test
-    result = check.run(tmp=None, task_vars=non_atomic_task_vars())
+    check = DockerStorage(execute_module, non_atomic_task_vars())
+    check.check_dm_usage = lambda status: dict()  # stub out for this test
+    check.check_overlay_usage = lambda info: dict()  # stub out for this test
+    result = check.run()
 
 
     if failed:
     if failed:
         assert result["failed"]
         assert result["failed"]
@@ -168,9 +162,9 @@ not_enough_space = {
     ),
     ),
 ])
 ])
 def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
 def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
-    check = dummy_check()
-    check.get_vg_free = lambda pool, task_vars: vg_free
-    result = check.check_dm_usage(driver_status, task_vars)
+    check = DockerStorage(None, task_vars)
+    check.get_vg_free = lambda pool: vg_free
+    result = check.check_dm_usage(driver_status)
     result_success = not result.get("failed")
     result_success = not result.get("failed")
 
 
     assert result_success is success
     assert result_success is success
@@ -210,18 +204,18 @@ def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
     )
     )
 ])
 ])
 def test_vg_free(pool, command_returns, raises, returns):
 def test_vg_free(pool, command_returns, raises, returns):
-    def execute_module(module_name, module_args, tmp=None, task_vars=None):
+    def execute_module(module_name, *_):
         if module_name != "command":
         if module_name != "command":
             raise ValueError("not expecting module " + module_name)
             raise ValueError("not expecting module " + module_name)
         return command_returns
         return command_returns
 
 
-    check = dummy_check(execute_module=execute_module)
+    check = DockerStorage(execute_module)
     if raises:
     if raises:
         with pytest.raises(OpenShiftCheckException) as err:
         with pytest.raises(OpenShiftCheckException) as err:
-            check.get_vg_free(pool, {})
+            check.get_vg_free(pool)
         assert raises in str(err.value)
         assert raises in str(err.value)
     else:
     else:
-        ret = check.get_vg_free(pool, {})
+        ret = check.get_vg_free(pool)
         assert ret == returns
         assert ret == returns
 
 
 
 
@@ -298,13 +292,13 @@ ansible_mounts_zero_size = [{
     ),
     ),
 ])
 ])
 def test_overlay_usage(ansible_mounts, threshold, expect_fail, expect_msg):
 def test_overlay_usage(ansible_mounts, threshold, expect_fail, expect_msg):
-    check = dummy_check()
     task_vars = non_atomic_task_vars()
     task_vars = non_atomic_task_vars()
     task_vars["ansible_mounts"] = ansible_mounts
     task_vars["ansible_mounts"] = ansible_mounts
     if threshold is not None:
     if threshold is not None:
         task_vars["max_overlay_usage_percent"] = threshold
         task_vars["max_overlay_usage_percent"] = threshold
+    check = DockerStorage(None, task_vars)
     docker_info = dict(DockerRootDir="/var/lib/docker", Driver="overlay")
     docker_info = dict(DockerRootDir="/var/lib/docker", Driver="overlay")
-    result = check.check_overlay_usage(docker_info, task_vars)
+    result = check.check_overlay_usage(docker_info)
 
 
     assert expect_fail == bool(result.get("failed"))
     assert expect_fail == bool(result.get("failed"))
     for msg in expect_msg:
     for msg in expect_msg:

+ 13 - 13
roles/openshift_health_checker/test/elasticsearch_test.py

@@ -6,9 +6,9 @@ from openshift_checks.logging.elasticsearch import Elasticsearch
 task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
 task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
 
 
 
 
-def canned_elasticsearch(exec_oc=None):
+def canned_elasticsearch(task_vars=None, exec_oc=None):
     """Create an Elasticsearch check object with canned exec_oc method"""
     """Create an Elasticsearch check object with canned exec_oc method"""
-    check = Elasticsearch("dummy")  # fails if a module is actually invoked
+    check = Elasticsearch("dummy", task_vars or {})  # fails if a module is actually invoked
     if exec_oc:
     if exec_oc:
         check._exec_oc = exec_oc
         check._exec_oc = exec_oc
     return check
     return check
@@ -50,10 +50,10 @@ split_es_pod = {
 
 
 
 
 def test_check_elasticsearch():
 def test_check_elasticsearch():
-    assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([], {})
+    assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([])
 
 
     # canned oc responses to match so all the checks pass
     # canned oc responses to match so all the checks pass
-    def _exec_oc(cmd, args, task_vars):
+    def _exec_oc(cmd, args):
         if '_cat/master' in cmd:
         if '_cat/master' in cmd:
             return 'name logging-es'
             return 'name logging-es'
         elif '/_nodes' in cmd:
         elif '/_nodes' in cmd:
@@ -65,7 +65,7 @@ def test_check_elasticsearch():
         else:
         else:
             raise Exception(cmd)
             raise Exception(cmd)
 
 
-    assert not canned_elasticsearch(_exec_oc).check_elasticsearch([plain_es_pod], {})
+    assert not canned_elasticsearch({}, _exec_oc).check_elasticsearch([plain_es_pod])
 
 
 
 
 def pods_by_name(pods):
 def pods_by_name(pods):
@@ -88,9 +88,9 @@ def pods_by_name(pods):
 ])
 ])
 def test_check_elasticsearch_masters(pods, expect_error):
 def test_check_elasticsearch_masters(pods, expect_error):
     test_pods = list(pods)
     test_pods = list(pods)
-    check = canned_elasticsearch(lambda cmd, args, task_vars: test_pods.pop(0)['_test_master_name_str'])
+    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: test_pods.pop(0)['_test_master_name_str'])
 
 
-    errors = check._check_elasticsearch_masters(pods_by_name(pods), task_vars_config_base)
+    errors = check._check_elasticsearch_masters(pods_by_name(pods))
     assert_error(''.join(errors), expect_error)
     assert_error(''.join(errors), expect_error)
 
 
 
 
@@ -124,9 +124,9 @@ es_node_list = {
     ),
     ),
 ])
 ])
 def test_check_elasticsearch_node_list(pods, node_list, expect_error):
 def test_check_elasticsearch_node_list(pods, node_list, expect_error):
-    check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(node_list))
+    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
 
 
-    errors = check._check_elasticsearch_node_list(pods_by_name(pods), task_vars_config_base)
+    errors = check._check_elasticsearch_node_list(pods_by_name(pods))
     assert_error(''.join(errors), expect_error)
     assert_error(''.join(errors), expect_error)
 
 
 
 
@@ -149,9 +149,9 @@ def test_check_elasticsearch_node_list(pods, node_list, expect_error):
 ])
 ])
 def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
 def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
     test_health_data = list(health_data)
     test_health_data = list(health_data)
-    check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(test_health_data.pop(0)))
+    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(test_health_data.pop(0)))
 
 
-    errors = check._check_es_cluster_health(pods_by_name(pods), task_vars_config_base)
+    errors = check._check_es_cluster_health(pods_by_name(pods))
     assert_error(''.join(errors), expect_error)
     assert_error(''.join(errors), expect_error)
 
 
 
 
@@ -174,7 +174,7 @@ def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
     ),
     ),
 ])
 ])
 def test_check_elasticsearch_diskspace(disk_data, expect_error):
 def test_check_elasticsearch_diskspace(disk_data, expect_error):
-    check = canned_elasticsearch(lambda cmd, args, task_vars: disk_data)
+    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: disk_data)
 
 
-    errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]), task_vars_config_base)
+    errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
     assert_error(''.join(errors), expect_error)
     assert_error(''.join(errors), expect_error)

+ 10 - 10
roles/openshift_health_checker/test/etcd_imagedata_size_test.py

@@ -51,10 +51,10 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
     task_vars = dict(
     task_vars = dict(
         ansible_mounts=ansible_mounts,
         ansible_mounts=ansible_mounts,
     )
     )
-    check = EtcdImageDataSize(execute_module=fake_execute_module)
+    check = EtcdImageDataSize(fake_execute_module, task_vars)
 
 
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.run(tmp=None, task_vars=task_vars)
+        check.run()
 
 
     for word in 'determine valid etcd mountpath'.split() + extra_words:
     for word in 'determine valid etcd mountpath'.split() + extra_words:
         assert word in str(excinfo.value)
         assert word in str(excinfo.value)
@@ -111,14 +111,14 @@ def test_cannot_determine_available_mountpath(ansible_mounts, extra_words):
     )
     )
 ])
 ])
 def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size_limit, should_fail, extra_words):
 def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size_limit, should_fail, extra_words):
-    def execute_module(module_name, args, tmp=None, task_vars=None):
+    def execute_module(module_name, module_args, *_):
         if module_name != "etcdkeysize":
         if module_name != "etcdkeysize":
             return {
             return {
                 "changed": False,
                 "changed": False,
             }
             }
 
 
         client = fake_etcd_client(tree)
         client = fake_etcd_client(tree)
-        s, limit_exceeded = check_etcd_key_size(client, tree["key"], args["size_limit_bytes"])
+        s, limit_exceeded = check_etcd_key_size(client, tree["key"], module_args["size_limit_bytes"])
 
 
         return {"size_limit_exceeded": limit_exceeded}
         return {"size_limit_exceeded": limit_exceeded}
 
 
@@ -133,7 +133,7 @@ def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size
     if size_limit is None:
     if size_limit is None:
         task_vars.pop("etcd_max_image_data_size_bytes")
         task_vars.pop("etcd_max_image_data_size_bytes")
 
 
-    check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+    check = EtcdImageDataSize(execute_module, task_vars).run()
 
 
     if should_fail:
     if should_fail:
         assert check["failed"]
         assert check["failed"]
@@ -267,14 +267,14 @@ def test_check_etcd_key_size_calculates_correct_limit(ansible_mounts, tree, size
     ),
     ),
 ])
 ])
 def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_path, expected_size, extra_words):
 def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_path, expected_size, extra_words):
-    def execute_module(module_name, args, tmp=None, task_vars=None):
+    def execute_module(module_name, module_args, *_):
         if module_name != "etcdkeysize":
         if module_name != "etcdkeysize":
             return {
             return {
                 "changed": False,
                 "changed": False,
             }
             }
 
 
         client = fake_etcd_client(tree)
         client = fake_etcd_client(tree)
-        size, limit_exceeded = check_etcd_key_size(client, root_path, args["size_limit_bytes"])
+        size, limit_exceeded = check_etcd_key_size(client, root_path, module_args["size_limit_bytes"])
 
 
         assert size == expected_size
         assert size == expected_size
         return {
         return {
@@ -289,12 +289,12 @@ def test_etcd_key_size_check_calculates_correct_size(ansible_mounts, tree, root_
         )
         )
     )
     )
 
 
-    check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+    check = EtcdImageDataSize(execute_module, task_vars).run()
     assert not check.get("failed", False)
     assert not check.get("failed", False)
 
 
 
 
 def test_etcdkeysize_module_failure():
 def test_etcdkeysize_module_failure():
-    def execute_module(module_name, tmp=None, task_vars=None):
+    def execute_module(module_name, *_):
         if module_name != "etcdkeysize":
         if module_name != "etcdkeysize":
             return {
             return {
                 "changed": False,
                 "changed": False,
@@ -317,7 +317,7 @@ def test_etcdkeysize_module_failure():
         )
         )
     )
     )
 
 
-    check = EtcdImageDataSize(execute_module=execute_module).run(tmp=None, task_vars=task_vars)
+    check = EtcdImageDataSize(execute_module, task_vars).run()
 
 
     assert check["failed"]
     assert check["failed"]
     for word in "Failed to retrieve stats":
     for word in "Failed to retrieve stats":

+ 5 - 11
roles/openshift_health_checker/test/etcd_traffic_test.py

@@ -21,7 +21,7 @@ def test_is_active(group_names, version, is_active):
             common=dict(short_version=version),
             common=dict(short_version=version),
         ),
         ),
     )
     )
-    assert EtcdTraffic.is_active(task_vars=task_vars) == is_active
+    assert EtcdTraffic(task_vars=task_vars).is_active() == is_active
 
 
 
 
 @pytest.mark.parametrize('group_names,matched,failed,extra_words', [
 @pytest.mark.parametrize('group_names,matched,failed,extra_words', [
@@ -30,7 +30,7 @@ def test_is_active(group_names, version, is_active):
     (["etcd"], False, False, []),
     (["etcd"], False, False, []),
 ])
 ])
 def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
 def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
-    def execute_module(module_name, args, task_vars):
+    def execute_module(module_name, *_):
         return {
         return {
             "matched": matched,
             "matched": matched,
             "failed": failed,
             "failed": failed,
@@ -43,8 +43,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)
         )
         )
     )
     )
 
 
-    check = EtcdTraffic(execute_module=execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = EtcdTraffic(execute_module, task_vars).run()
 
 
     for word in extra_words:
     for word in extra_words:
         assert word in result.get("msg", "")
         assert word in result.get("msg", "")
@@ -63,7 +62,7 @@ def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_va
         )
         )
     )
     )
 
 
-    def execute_module(module_name, args, task_vars):
+    def execute_module(module_name, args, *_):
         assert module_name == "search_journalctl"
         assert module_name == "search_journalctl"
         matchers = args["log_matchers"]
         matchers = args["log_matchers"]
 
 
@@ -72,9 +71,4 @@ def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_va
 
 
         return {"failed": False}
         return {"failed": False}
 
 
-    check = EtcdTraffic(execute_module=execute_module)
-    check.run(tmp=None, task_vars=task_vars)
-
-
-def fake_execute_module(*args):
-    raise AssertionError('this function should not be called')
+    EtcdTraffic(execute_module, task_vars).run()

+ 3 - 6
roles/openshift_health_checker/test/etcd_volume_test.py

@@ -11,10 +11,9 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
     task_vars = dict(
     task_vars = dict(
         ansible_mounts=ansible_mounts,
         ansible_mounts=ansible_mounts,
     )
     )
-    check = EtcdVolume(execute_module=fake_execute_module)
 
 
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.run(tmp=None, task_vars=task_vars)
+        EtcdVolume(fake_execute_module, task_vars).run()
 
 
     for word in 'Unable to find etcd storage mount point'.split() + extra_words:
     for word in 'Unable to find etcd storage mount point'.split() + extra_words:
         assert word in str(excinfo.value)
         assert word in str(excinfo.value)
@@ -76,8 +75,7 @@ def test_succeeds_with_recommended_disk_space(size_limit, ansible_mounts):
     if task_vars["etcd_device_usage_threshold_percent"] is None:
     if task_vars["etcd_device_usage_threshold_percent"] is None:
         task_vars.pop("etcd_device_usage_threshold_percent")
         task_vars.pop("etcd_device_usage_threshold_percent")
 
 
-    check = EtcdVolume(execute_module=fake_execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = EtcdVolume(fake_execute_module, task_vars).run()
 
 
     assert not result.get('failed', False)
     assert not result.get('failed', False)
 
 
@@ -137,8 +135,7 @@ def test_fails_with_insufficient_disk_space(size_limit_percent, ansible_mounts,
     if task_vars["etcd_device_usage_threshold_percent"] is None:
     if task_vars["etcd_device_usage_threshold_percent"] is None:
         task_vars.pop("etcd_device_usage_threshold_percent")
         task_vars.pop("etcd_device_usage_threshold_percent")
 
 
-    check = EtcdVolume(execute_module=fake_execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = EtcdVolume(fake_execute_module, task_vars).run()
 
 
     assert result['failed']
     assert result['failed']
     for word in extra_words:
     for word in extra_words:

+ 2 - 2
roles/openshift_health_checker/test/fluentd_test.py

@@ -103,7 +103,7 @@ fluentd_node3_unlabeled = {
     ),
     ),
 ])
 ])
 def test_get_fluentd_pods(pods, nodes, expect_error):
 def test_get_fluentd_pods(pods, nodes, expect_error):
-    check = canned_fluentd(lambda cmd, args, task_vars: json.dumps(dict(items=nodes)))
+    check = canned_fluentd(exec_oc=lambda cmd, args: json.dumps(dict(items=nodes)))
 
 
-    error = check.check_fluentd(pods, {})
+    error = check.check_fluentd(pods)
     assert_error(error, expect_error)
     assert_error(error, expect_error)

+ 9 - 9
roles/openshift_health_checker/test/kibana_test.py

@@ -13,7 +13,7 @@ from openshift_checks.logging.kibana import Kibana
 
 
 def canned_kibana(exec_oc=None):
 def canned_kibana(exec_oc=None):
     """Create a Kibana check object with canned exec_oc method"""
     """Create a Kibana check object with canned exec_oc method"""
-    check = Kibana("dummy")  # fails if a module is actually invoked
+    check = Kibana()  # fails if a module is actually invoked
     if exec_oc:
     if exec_oc:
         check._exec_oc = exec_oc
         check._exec_oc = exec_oc
     return check
     return check
@@ -137,9 +137,9 @@ def test_check_kibana(pods, expect_error):
     ),
     ),
 ])
 ])
 def test_get_kibana_url(route, expect_url, expect_error):
 def test_get_kibana_url(route, expect_url, expect_error):
-    check = canned_kibana(lambda cmd, args, task_vars: json.dumps(route) if route else "")
+    check = canned_kibana(exec_oc=lambda cmd, args: json.dumps(route) if route else "")
 
 
-    url, error = check._get_kibana_url({})
+    url, error = check._get_kibana_url()
     if expect_url:
     if expect_url:
         assert url == expect_url
         assert url == expect_url
     else:
     else:
@@ -169,10 +169,10 @@ def test_get_kibana_url(route, expect_url, expect_error):
     ),
     ),
 ])
 ])
 def test_verify_url_internal_failure(exec_result, expect):
 def test_verify_url_internal_failure(exec_result, expect):
-    check = Kibana(execute_module=lambda module_name, args, tmp, task_vars: dict(failed=True, msg=exec_result))
-    check._get_kibana_url = lambda task_vars: ('url', None)
+    check = Kibana(execute_module=lambda *_: dict(failed=True, msg=exec_result))
+    check._get_kibana_url = lambda: ('url', None)
 
 
-    error = check._check_kibana_route({})
+    error = check._check_kibana_route()
     assert_error(error, expect)
     assert_error(error, expect)
 
 
 
 
@@ -211,8 +211,8 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
     monkeypatch.setattr(urllib2, 'urlopen', urlopen)
     monkeypatch.setattr(urllib2, 'urlopen', urlopen)
 
 
     check = canned_kibana()
     check = canned_kibana()
-    check._get_kibana_url = lambda task_vars: ('url', None)
-    check._verify_url_internal = lambda url, task_vars: None
+    check._get_kibana_url = lambda: ('url', None)
+    check._verify_url_internal = lambda url: None
 
 
-    error = check._check_kibana_route({})
+    error = check._check_kibana_route()
     assert_error(error, expect)
     assert_error(error, expect)

+ 6 - 8
roles/openshift_health_checker/test/logging_check_test.py

@@ -11,7 +11,7 @@ logging_namespace = "logging"
 
 
 def canned_loggingcheck(exec_oc=None):
 def canned_loggingcheck(exec_oc=None):
     """Create a LoggingCheck object with canned exec_oc method"""
     """Create a LoggingCheck object with canned exec_oc method"""
-    check = LoggingCheck("dummy")  # fails if a module is actually invoked
+    check = LoggingCheck()  # fails if a module is actually invoked
     check.logging_namespace = 'logging'
     check.logging_namespace = 'logging'
     if exec_oc:
     if exec_oc:
         check.exec_oc = exec_oc
         check.exec_oc = exec_oc
@@ -90,15 +90,15 @@ plain_curator_pod = {
     ("Permission denied", "Unexpected error using `oc`"),
     ("Permission denied", "Unexpected error using `oc`"),
 ])
 ])
 def test_oc_failure(problem, expect):
 def test_oc_failure(problem, expect):
-    def execute_module(module_name, args, tmp, task_vars):
+    def execute_module(module_name, *_):
         if module_name == "ocutil":
         if module_name == "ocutil":
             return dict(failed=True, result=problem)
             return dict(failed=True, result=problem)
         return dict(changed=False)
         return dict(changed=False)
 
 
-    check = LoggingCheck({})
+    check = LoggingCheck(execute_module, task_vars_config_base)
 
 
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.exec_oc(execute_module, logging_namespace, 'get foo', [], task_vars=task_vars_config_base)
+        check.exec_oc(logging_namespace, 'get foo', [])
     assert expect in str(excinfo)
     assert expect in str(excinfo)
 
 
 
 
@@ -121,7 +121,7 @@ def test_is_active(groups, logging_deployed, is_active):
         openshift_hosted_logging_deploy=logging_deployed,
         openshift_hosted_logging_deploy=logging_deployed,
     )
     )
 
 
-    assert LoggingCheck.is_active(task_vars=task_vars) == is_active
+    assert LoggingCheck(None, task_vars).is_active() == is_active
 
 
 
 
 @pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
 @pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
@@ -137,12 +137,10 @@ def test_is_active(groups, logging_deployed, is_active):
     ),
     ),
 ])
 ])
 def test_get_pods_for_component(pod_output, expect_pods, expect_error):
 def test_get_pods_for_component(pod_output, expect_pods, expect_error):
-    check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: pod_output)
+    check = canned_loggingcheck(lambda namespace, cmd, args: pod_output)
     pods, error = check.get_pods_for_component(
     pods, error = check.get_pods_for_component(
-        lambda name, args, task_vars: {},
         logging_namespace,
         logging_namespace,
         "es",
         "es",
-        {}
     )
     )
     assert_error(error, expect_error)
     assert_error(error, expect_error)
 
 

+ 10 - 22
roles/openshift_health_checker/test/logging_index_time_test.py

@@ -10,7 +10,7 @@ SAMPLE_UUID = "unique-test-uuid"
 
 
 def canned_loggingindextime(exec_oc=None):
 def canned_loggingindextime(exec_oc=None):
     """Create a check object with a canned exec_oc method"""
     """Create a check object with a canned exec_oc method"""
-    check = LoggingIndexTime("dummy")  # fails if a module is actually invoked
+    check = LoggingIndexTime()  # fails if a module is actually invoked
     if exec_oc:
     if exec_oc:
         check.exec_oc = exec_oc
         check.exec_oc = exec_oc
     return check
     return check
@@ -64,7 +64,7 @@ not_running_kibana_pod = {
     )
     )
 ])
 ])
 def test_check_running_pods(pods, expect_pods):
 def test_check_running_pods(pods, expect_pods):
-    check = canned_loggingindextime(None)
+    check = canned_loggingindextime()
     pods = check.running_pods(pods)
     pods = check.running_pods(pods)
     assert pods == expect_pods
     assert pods == expect_pods
 
 
@@ -81,11 +81,8 @@ def test_check_running_pods(pods, expect_pods):
     ),
     ),
 ], ids=lambda argval: argval[0])
 ], ids=lambda argval: argval[0])
 def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout, extra_words):
 def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout, extra_words):
-    def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
-        return json.dumps(json_response)
-
-    check = canned_loggingindextime(exec_oc)
-    check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout, None)
+    check = canned_loggingindextime(lambda *_: json.dumps(json_response))
+    check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
 
 
 
 
 @pytest.mark.parametrize('name, json_response, uuid, timeout, extra_words', [
 @pytest.mark.parametrize('name, json_response, uuid, timeout, extra_words', [
@@ -116,12 +113,9 @@ def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout, extr
     )
     )
 ], ids=lambda argval: argval[0])
 ], ids=lambda argval: argval[0])
 def test_wait_until_cmd_or_err(name, json_response, uuid, timeout, extra_words):
 def test_wait_until_cmd_or_err(name, json_response, uuid, timeout, extra_words):
-    def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
-        return json.dumps(json_response)
-
-    check = canned_loggingindextime(exec_oc)
+    check = canned_loggingindextime(lambda *_: json.dumps(json_response))
     with pytest.raises(OpenShiftCheckException) as error:
     with pytest.raises(OpenShiftCheckException) as error:
-        check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout, None)
+        check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
 
 
     for word in extra_words:
     for word in extra_words:
         assert word in str(error)
         assert word in str(error)
@@ -138,13 +132,10 @@ def test_wait_until_cmd_or_err(name, json_response, uuid, timeout, extra_words):
     ),
     ),
 ], ids=lambda argval: argval[0])
 ], ids=lambda argval: argval[0])
 def test_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
 def test_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
-    def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
-        return json.dumps(json_response)
-
-    check = canned_loggingindextime(exec_oc)
+    check = canned_loggingindextime(lambda *_: json.dumps(json_response))
     check.generate_uuid = lambda: uuid
     check.generate_uuid = lambda: uuid
 
 
-    result = check.curl_kibana_with_uuid(plain_running_kibana_pod, None)
+    result = check.curl_kibana_with_uuid(plain_running_kibana_pod)
 
 
     for word in extra_words:
     for word in extra_words:
         assert word in result
         assert word in result
@@ -169,14 +160,11 @@ def test_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
     ),
     ),
 ], ids=lambda argval: argval[0])
 ], ids=lambda argval: argval[0])
 def test_failed_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
 def test_failed_curl_kibana_with_uuid(name, json_response, uuid, extra_words):
-    def exec_oc(execute_module, ns, exec_cmd, args, task_vars):
-        return json.dumps(json_response)
-
-    check = canned_loggingindextime(exec_oc)
+    check = canned_loggingindextime(lambda *_: json.dumps(json_response))
     check.generate_uuid = lambda: uuid
     check.generate_uuid = lambda: uuid
 
 
     with pytest.raises(OpenShiftCheckException) as error:
     with pytest.raises(OpenShiftCheckException) as error:
-        check.curl_kibana_with_uuid(plain_running_kibana_pod, None)
+        check.curl_kibana_with_uuid(plain_running_kibana_pod)
 
 
     for word in extra_words:
     for word in extra_words:
         assert word in str(error)
         assert word in str(error)

+ 3 - 5
roles/openshift_health_checker/test/memory_availability_test.py

@@ -17,7 +17,7 @@ def test_is_active(group_names, is_active):
     task_vars = dict(
     task_vars = dict(
         group_names=group_names,
         group_names=group_names,
     )
     )
-    assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
+    assert MemoryAvailability(None, task_vars).is_active() == is_active
 
 
 
 
 @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
 @pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
@@ -59,8 +59,7 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m
         ansible_memtotal_mb=ansible_memtotal_mb,
         ansible_memtotal_mb=ansible_memtotal_mb,
     )
     )
 
 
-    check = MemoryAvailability(execute_module=fake_execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = MemoryAvailability(fake_execute_module, task_vars).run()
 
 
     assert not result.get('failed', False)
     assert not result.get('failed', False)
 
 
@@ -117,8 +116,7 @@ def test_fails_with_insufficient_memory(group_names, configured_min, ansible_mem
         ansible_memtotal_mb=ansible_memtotal_mb,
         ansible_memtotal_mb=ansible_memtotal_mb,
     )
     )
 
 
-    check = MemoryAvailability(execute_module=fake_execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = MemoryAvailability(fake_execute_module, task_vars).run()
 
 
     assert result.get('failed', False)
     assert result.get('failed', False)
     for word in 'below recommended'.split() + extra_words:
     for word in 'below recommended'.split() + extra_words:

+ 2 - 2
roles/openshift_health_checker/test/mixins_test.py

@@ -14,10 +14,10 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):
     (dict(openshift=dict(common=dict(is_containerized=True))), False),
     (dict(openshift=dict(common=dict(is_containerized=True))), False),
 ])
 ])
 def test_is_active(task_vars, expected):
 def test_is_active(task_vars, expected):
-    assert NotContainerizedCheck.is_active(task_vars) == expected
+    assert NotContainerizedCheck(None, task_vars).is_active() == expected
 
 
 
 
 def test_is_active_missing_task_vars():
 def test_is_active_missing_task_vars():
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        NotContainerizedCheck.is_active(task_vars={})
+        NotContainerizedCheck().is_active()
     assert 'is_containerized' in str(excinfo.value)
     assert 'is_containerized' in str(excinfo.value)

+ 19 - 8
roles/openshift_health_checker/test/openshift_check_test.py

@@ -1,7 +1,7 @@
 import pytest
 import pytest
 
 
 from openshift_checks import OpenShiftCheck, OpenShiftCheckException
 from openshift_checks import OpenShiftCheck, OpenShiftCheckException
-from openshift_checks import load_checks, get_var
+from openshift_checks import load_checks
 
 
 
 
 # Fixtures
 # Fixtures
@@ -28,8 +28,8 @@ def test_OpenShiftCheck_init():
         name = "test_check"
         name = "test_check"
         run = NotImplemented
         run = NotImplemented
 
 
-    # initialization requires at least one argument (apart from self)
-    with pytest.raises(TypeError) as excinfo:
+    # execute_module required at init if it will be used
+    with pytest.raises(RuntimeError) as excinfo:
         TestCheck().execute_module("foo")
         TestCheck().execute_module("foo")
     assert 'execute_module' in str(excinfo.value)
     assert 'execute_module' in str(excinfo.value)
 
 
@@ -37,11 +37,14 @@ def test_OpenShiftCheck_init():
 
 
     # initialize with positional argument
     # initialize with positional argument
     check = TestCheck(execute_module)
     check = TestCheck(execute_module)
-    assert check.execute_module == execute_module
+    assert check._execute_module == execute_module
 
 
     # initialize with keyword argument
     # initialize with keyword argument
     check = TestCheck(execute_module=execute_module)
     check = TestCheck(execute_module=execute_module)
-    assert check.execute_module == execute_module
+    assert check._execute_module == execute_module
+
+    assert check.task_vars == {}
+    assert check.tmp is None
 
 
 
 
 def test_subclasses():
 def test_subclasses():
@@ -67,19 +70,27 @@ def test_load_checks():
     assert modules
     assert modules
 
 
 
 
+def dummy_check(task_vars):
+    class TestCheck(OpenShiftCheck):
+        name = "dummy"
+        run = NotImplemented
+
+    return TestCheck(task_vars=task_vars)
+
+
 @pytest.mark.parametrize("keys,expected", [
 @pytest.mark.parametrize("keys,expected", [
     (("foo",), 42),
     (("foo",), 42),
     (("bar", "baz"), "openshift"),
     (("bar", "baz"), "openshift"),
 ])
 ])
 def test_get_var_ok(task_vars, keys, expected):
 def test_get_var_ok(task_vars, keys, expected):
-    assert get_var(task_vars, *keys) == expected
+    assert dummy_check(task_vars).get_var(*keys) == expected
 
 
 
 
 def test_get_var_error(task_vars, missing_keys):
 def test_get_var_error(task_vars, missing_keys):
     with pytest.raises(OpenShiftCheckException):
     with pytest.raises(OpenShiftCheckException):
-        get_var(task_vars, *missing_keys)
+        dummy_check(task_vars).get_var(*missing_keys)
 
 
 
 
 def test_get_var_default(task_vars, missing_keys):
 def test_get_var_default(task_vars, missing_keys):
     default = object()
     default = object()
-    assert get_var(task_vars, *missing_keys, default=default) == default
+    assert dummy_check(task_vars).get_var(*missing_keys, default=default) == default

+ 7 - 10
roles/openshift_health_checker/test/ovs_version_test.py

@@ -4,7 +4,7 @@ from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
 
 
 
 
 def test_openshift_version_not_supported():
 def test_openshift_version_not_supported():
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(*_):
         return {}
         return {}
 
 
     openshift_release = '111.7.0'
     openshift_release = '111.7.0'
@@ -16,15 +16,14 @@ def test_openshift_version_not_supported():
         openshift_deployment_type='origin',
         openshift_deployment_type='origin',
     )
     )
 
 
-    check = OvsVersion(execute_module=execute_module)
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.run(tmp=None, task_vars=task_vars)
+        OvsVersion(execute_module, task_vars).run()
 
 
     assert "no recommended version of Open vSwitch" in str(excinfo.value)
     assert "no recommended version of Open vSwitch" in str(excinfo.value)
 
 
 
 
 def test_invalid_openshift_release_format():
 def test_invalid_openshift_release_format():
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(*_):
         return {}
         return {}
 
 
     task_vars = dict(
     task_vars = dict(
@@ -33,9 +32,8 @@ def test_invalid_openshift_release_format():
         openshift_deployment_type='origin',
         openshift_deployment_type='origin',
     )
     )
 
 
-    check = OvsVersion(execute_module=execute_module)
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.run(tmp=None, task_vars=task_vars)
+        OvsVersion(execute_module, task_vars).run()
     assert "invalid version" in str(excinfo.value)
     assert "invalid version" in str(excinfo.value)
 
 
 
 
@@ -54,7 +52,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
     )
     )
     return_value = object()
     return_value = object()
 
 
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, module_args=None, *_):
         assert module_name == 'rpm_version'
         assert module_name == 'rpm_version'
         assert "package_list" in module_args
         assert "package_list" in module_args
 
 
@@ -64,8 +62,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
 
 
         return return_value
         return return_value
 
 
-    check = OvsVersion(execute_module=execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = OvsVersion(execute_module, task_vars).run()
     assert result is return_value
     assert result is return_value
 
 
 
 
@@ -86,4 +83,4 @@ def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized
         group_names=group_names,
         group_names=group_names,
         openshift=dict(common=dict(is_containerized=is_containerized)),
         openshift=dict(common=dict(is_containerized=is_containerized)),
     )
     )
-    assert OvsVersion.is_active(task_vars=task_vars) == is_active
+    assert OvsVersion(None, task_vars).is_active() == is_active

+ 3 - 4
roles/openshift_health_checker/test/package_availability_test.py

@@ -14,7 +14,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
         ansible_pkg_mgr=pkg_mgr,
         ansible_pkg_mgr=pkg_mgr,
         openshift=dict(common=dict(is_containerized=is_containerized)),
         openshift=dict(common=dict(is_containerized=is_containerized)),
     )
     )
-    assert PackageAvailability.is_active(task_vars=task_vars) == is_active
+    assert PackageAvailability(None, task_vars).is_active() == is_active
 
 
 
 
 @pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
 @pytest.mark.parametrize('task_vars,must_have_packages,must_not_have_packages', [
@@ -51,13 +51,12 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
 def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
 def test_package_availability(task_vars, must_have_packages, must_not_have_packages):
     return_value = object()
     return_value = object()
 
 
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, module_args=None, *_):
         assert module_name == 'check_yum_update'
         assert module_name == 'check_yum_update'
         assert 'packages' in module_args
         assert 'packages' in module_args
         assert set(module_args['packages']).issuperset(must_have_packages)
         assert set(module_args['packages']).issuperset(must_have_packages)
         assert not set(module_args['packages']).intersection(must_not_have_packages)
         assert not set(module_args['packages']).intersection(must_not_have_packages)
         return return_value
         return return_value
 
 
-    check = PackageAvailability(execute_module=execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    result = PackageAvailability(execute_module, task_vars).run()
     assert result is return_value
     assert result is return_value

+ 2 - 3
roles/openshift_health_checker/test/package_update_test.py

@@ -4,13 +4,12 @@ from openshift_checks.package_update import PackageUpdate
 def test_package_update():
 def test_package_update():
     return_value = object()
     return_value = object()
 
 
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, module_args=None, *_):
         assert module_name == 'check_yum_update'
         assert module_name == 'check_yum_update'
         assert 'packages' in module_args
         assert 'packages' in module_args
         # empty list of packages means "generic check if 'yum update' will work"
         # empty list of packages means "generic check if 'yum update' will work"
         assert module_args['packages'] == []
         assert module_args['packages'] == []
         return return_value
         return return_value
 
 
-    check = PackageUpdate(execute_module=execute_module)
-    result = check.run(tmp=None, task_vars=None)
+    result = PackageUpdate(execute_module).run()
     assert result is return_value
     assert result is return_value

+ 13 - 13
roles/openshift_health_checker/test/package_version_test.py

@@ -8,7 +8,7 @@ from openshift_checks.package_version import PackageVersion, OpenShiftCheckExcep
     ('0.0.0', ["no recommended version of Docker"]),
     ('0.0.0', ["no recommended version of Docker"]),
 ])
 ])
 def test_openshift_version_not_supported(openshift_release, extra_words):
 def test_openshift_version_not_supported(openshift_release, extra_words):
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(*_):
         return {}
         return {}
 
 
     task_vars = dict(
     task_vars = dict(
@@ -18,16 +18,16 @@ def test_openshift_version_not_supported(openshift_release, extra_words):
         openshift_deployment_type='origin',
         openshift_deployment_type='origin',
     )
     )
 
 
-    check = PackageVersion(execute_module=execute_module)
+    check = PackageVersion(execute_module, task_vars)
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.run(tmp=None, task_vars=task_vars)
+        check.run()
 
 
     for word in extra_words:
     for word in extra_words:
         assert word in str(excinfo.value)
         assert word in str(excinfo.value)
 
 
 
 
 def test_invalid_openshift_release_format():
 def test_invalid_openshift_release_format():
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(*_):
         return {}
         return {}
 
 
     task_vars = dict(
     task_vars = dict(
@@ -36,9 +36,9 @@ def test_invalid_openshift_release_format():
         openshift_deployment_type='origin',
         openshift_deployment_type='origin',
     )
     )
 
 
-    check = PackageVersion(execute_module=execute_module)
+    check = PackageVersion(execute_module, task_vars)
     with pytest.raises(OpenShiftCheckException) as excinfo:
     with pytest.raises(OpenShiftCheckException) as excinfo:
-        check.run(tmp=None, task_vars=task_vars)
+        check.run()
     assert "invalid version" in str(excinfo.value)
     assert "invalid version" in str(excinfo.value)
 
 
 
 
@@ -57,7 +57,7 @@ def test_package_version(openshift_release):
     )
     )
     return_value = object()
     return_value = object()
 
 
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None, *_):
         assert module_name == 'aos_version'
         assert module_name == 'aos_version'
         assert "package_list" in module_args
         assert "package_list" in module_args
 
 
@@ -67,8 +67,8 @@ def test_package_version(openshift_release):
 
 
         return return_value
         return return_value
 
 
-    check = PackageVersion(execute_module=execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    check = PackageVersion(execute_module, task_vars)
+    result = check.run()
     assert result is return_value
     assert result is return_value
 
 
 
 
@@ -89,7 +89,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
     )
     )
     return_value = object()
     return_value = object()
 
 
-    def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+    def execute_module(module_name=None, module_args=None, *_):
         assert module_name == 'aos_version'
         assert module_name == 'aos_version'
         assert "package_list" in module_args
         assert "package_list" in module_args
 
 
@@ -99,8 +99,8 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
 
 
         return return_value
         return return_value
 
 
-    check = PackageVersion(execute_module=execute_module)
-    result = check.run(tmp=None, task_vars=task_vars)
+    check = PackageVersion(execute_module, task_vars)
+    result = check.run()
     assert result is return_value
     assert result is return_value
 
 
 
 
@@ -121,4 +121,4 @@ def test_package_version_skip_when_not_master_nor_node(group_names, is_container
         group_names=group_names,
         group_names=group_names,
         openshift=dict(common=dict(is_containerized=is_containerized)),
         openshift=dict(common=dict(is_containerized=is_containerized)),
     )
     )
-    assert PackageVersion.is_active(task_vars=task_vars) == is_active
+    assert PackageVersion(None, task_vars).is_active() == is_active