Pārlūkot izejas kodu

openshift_checks: add property to track 'changed'

Introduced the 'changed' property for checks that can make changes to
track whether they did or not. Rather than the check's own logic having
to track this and include it in the result hash, just set the property
and have the action plugin insert it in the result hash after running
(even if there is an exception).

Cleared out a lot of crufty "changed: false" hash entries.
Luke Meyer 7 gadi atpakaļ
vecāks
revīzija
bf0828bc0f

+ 6 - 4
roles/openshift_health_checker/action_plugins/openshift_health_check.py

@@ -68,13 +68,15 @@ class ActionModule(ActionBase):
                         msg=str(e),
                     )
 
+            if check.changed:
+                r["changed"] = True
             check_results[check_name] = r
 
-            if r.get("failed", False):
-                result["failed"] = True
-                result["msg"] = "One or more checks failed"
+        result["changed"] = any(r.get("changed") for r in check_results.values())
+        if any(r.get("failed") for r in check_results.values()):
+            result["failed"] = True
+            result["msg"] = "One or more checks failed"
 
-        result["changed"] = any(r.get("changed", False) for r in check_results.values())
         return result
 
     def load_known_checks(self, tmp, task_vars):

+ 2 - 0
roles/openshift_health_checker/openshift_checks/__init__.py

@@ -34,6 +34,8 @@ class OpenShiftCheck(object):
         self._execute_module = execute_module
         self.task_vars = task_vars or {}
         self.tmp = tmp
+        # set True when the check makes a change to the host so it can be reported to the user:
+        self.changed = False
 
     @abstractproperty
     def name(self):

+ 4 - 6
roles/openshift_health_checker/openshift_checks/docker_image_availability.py

@@ -41,11 +41,10 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
         return super(DockerImageAvailability, self).is_active() and has_valid_deployment_type
 
     def run(self):
-        msg, failed, changed = self.ensure_dependencies()
+        msg, failed = self.ensure_dependencies()
         if failed:
             return {
                 "failed": True,
-                "changed": changed,
                 "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
             }
 
@@ -54,11 +53,11 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
 
         # exit early if all images were found locally
         if not missing_images:
-            return {"changed": changed}
+            return {}
 
         registries = self.known_docker_registries()
         if not registries:
-            return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
+            return {"failed": True, "msg": "Unable to retrieve any docker registries."}
 
         available_images = self.available_images(missing_images, registries)
         unavailable_images = set(missing_images) - set(available_images)
@@ -70,10 +69,9 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
                     "One or more required Docker images are not available:\n    {}\n"
                     "Configured registries: {}"
                 ).format(",\n    ".join(sorted(unavailable_images)), ", ".join(registries)),
-                "changed": changed,
             }
 
-        return {"changed": changed}
+        return {}
 
     def required_images(self):
         """

+ 4 - 6
roles/openshift_health_checker/openshift_checks/docker_storage.py

@@ -43,21 +43,20 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
     ]
 
     def run(self):
-        msg, failed, changed = self.ensure_dependencies()
+        msg, failed = self.ensure_dependencies()
         if failed:
             return {
                 "failed": True,
-                "changed": changed,
                 "msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
             }
 
         # attempt to get the docker info hash from the API
         docker_info = self.execute_module("docker_info", {})
         if docker_info.get("failed"):
-            return {"failed": True, "changed": changed,
+            return {"failed": True,
                     "msg": "Failed to query Docker API. Is docker running on this host?"}
         if not docker_info.get("info"):  # this would be very strange
-            return {"failed": True, "changed": changed,
+            return {"failed": True,
                     "msg": "Docker API query missing info:\n{}".format(json.dumps(docker_info))}
         docker_info = docker_info["info"]
 
@@ -68,7 +67,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
                 "Detected unsupported Docker storage driver '{driver}'.\n"
                 "Supported storage drivers are: {drivers}"
             ).format(driver=driver, drivers=', '.join(self.storage_drivers))
-            return {"failed": True, "changed": changed, "msg": msg}
+            return {"failed": True, "msg": msg}
 
         # driver status info is a list of tuples; convert to dict and validate based on driver
         driver_status = {item[0]: item[1] for item in docker_info.get("DriverStatus", [])}
@@ -81,7 +80,6 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
         if driver in ['overlay', 'overlay2']:
             result = self.check_overlay_support(docker_info, driver_status)
 
-        result['changed'] = result.get('changed', False) or changed
         return result
 
     def check_devicemapper_support(self, driver_status):

+ 2 - 2
roles/openshift_health_checker/openshift_checks/etcd_imagedata_size.py

@@ -56,7 +56,7 @@ class EtcdImageDataSize(OpenShiftCheck):
                     reason = etcdkeysize["module_stderr"]
 
                 msg = msg.format(host=etcd_host, reason=reason)
-                return {"failed": True, "changed": False, "msg": msg}
+                return {"failed": True, "msg": msg}
 
             if etcdkeysize["size_limit_exceeded"]:
                 limit = self._to_gigabytes(etcd_imagedata_size_limit)
@@ -65,7 +65,7 @@ class EtcdImageDataSize(OpenShiftCheck):
                        "Use the `oadm prune images` command to cleanup unused Docker images.")
                 return {"failed": True, "msg": msg.format(host=etcd_host, limit=limit)}
 
-        return {"changed": False}
+        return {}
 
     @staticmethod
     def _get_etcd_mountpath(ansible_mounts):

+ 1 - 1
roles/openshift_health_checker/openshift_checks/etcd_volume.py

@@ -40,7 +40,7 @@ class EtcdVolume(OpenShiftCheck):
             )
             return {"failed": True, "msg": msg}
 
-        return {"changed": False}
+        return {}
 
     def _etcd_mount_info(self):
         ansible_mounts = self.get_var("ansible_mounts")

+ 3 - 3
roles/openshift_health_checker/openshift_checks/logging/curator.py

@@ -18,16 +18,16 @@ class Curator(LoggingCheck):
             "curator",
         )
         if error:
-            return {"failed": True, "changed": False, "msg": error}
+            return {"failed": True, "msg": error}
         check_error = self.check_curator(curator_pods)
 
         if check_error:
             msg = ("The following Curator deployment issue was found:"
                    "{}".format(check_error))
-            return {"failed": True, "changed": False, "msg": msg}
+            return {"failed": True, "msg": msg}
 
         # TODO(lmeyer): run it all again for the ops cluster
-        return {"failed": False, "changed": False, "msg": 'No problems found with Curator deployment.'}
+        return {"failed": False, "msg": 'No problems found with Curator deployment.'}
 
     def check_curator(self, pods):
         """Check to see if curator is up and working. Returns: error string"""

+ 3 - 3
roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py

@@ -21,16 +21,16 @@ class Elasticsearch(LoggingCheck):
             "es",
         )
         if error:
-            return {"failed": True, "changed": False, "msg": error}
+            return {"failed": True, "msg": error}
         check_error = self.check_elasticsearch(es_pods)
 
         if check_error:
             msg = ("The following Elasticsearch deployment issue was found:"
                    "{}".format(check_error))
-            return {"failed": True, "changed": False, "msg": msg}
+            return {"failed": True, "msg": msg}
 
         # TODO(lmeyer): run it all again for the ops cluster
-        return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
+        return {"failed": False, "msg": 'No problems found with Elasticsearch deployment.'}
 
     def _not_running_elasticsearch_pods(self, es_pods):
         """Returns: list of pods that are not running, list of errors about non-running pods"""

+ 3 - 3
roles/openshift_health_checker/openshift_checks/logging/fluentd.py

@@ -20,16 +20,16 @@ class Fluentd(LoggingCheck):
             "fluentd",
         )
         if error:
-            return {"failed": True, "changed": False, "msg": error}
+            return {"failed": True, "msg": error}
         check_error = self.check_fluentd(fluentd_pods)
 
         if check_error:
             msg = ("The following Fluentd deployment issue was found:"
                    "{}".format(check_error))
-            return {"failed": True, "changed": False, "msg": msg}
+            return {"failed": True, "msg": msg}
 
         # TODO(lmeyer): run it all again for the ops cluster
-        return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
+        return {"failed": False, "msg": 'No problems found with Fluentd deployment.'}
 
     @staticmethod
     def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):

+ 3 - 3
roles/openshift_health_checker/openshift_checks/logging/kibana.py

@@ -30,7 +30,7 @@ class Kibana(LoggingCheck):
             "kibana",
         )
         if error:
-            return {"failed": True, "changed": False, "msg": error}
+            return {"failed": True, "msg": error}
         check_error = self.check_kibana(kibana_pods)
 
         if not check_error:
@@ -39,10 +39,10 @@ class Kibana(LoggingCheck):
         if check_error:
             msg = ("The following Kibana deployment issue was found:"
                    "{}".format(check_error))
-            return {"failed": True, "changed": False, "msg": msg}
+            return {"failed": True, "msg": msg}
 
         # TODO(lmeyer): run it all again for the ops cluster
-        return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
+        return {"failed": False, "msg": 'No problems found with Kibana deployment.'}
 
     def _verify_url_internal(self, url):
         """

+ 4 - 4
roles/openshift_health_checker/openshift_checks/mixins.py

@@ -29,10 +29,10 @@ class DockerHostMixin(object):
         """
         Ensure that docker-related packages exist, but not on atomic hosts
         (which would not be able to install but should already have them).
-        Returns: msg, failed, changed
+        Returns: msg, failed
         """
         if self.get_var("openshift", "common", "is_atomic"):
-            return "", False, False
+            return "", False
 
         # NOTE: we would use the "package" module but it's actually an action plugin
         # and it's not clear how to invoke one of those. This is about the same anyway:
@@ -49,5 +49,5 @@ class DockerHostMixin(object):
                 "    {deps}\n{msg}"
             ).format(deps=',\n    '.join(self.dependencies), msg=msg)
         failed = result.get("failed", False) or result.get("rc", 0) != 0
-        changed = result.get("changed", False)
-        return msg, failed, changed
+        self.changed = result.get("changed", False)
+        return msg, failed

+ 9 - 5
roles/openshift_health_checker/test/action_plugin_test.py

@@ -6,7 +6,7 @@ from openshift_health_check import ActionModule, resolve_checks
 from openshift_checks import OpenShiftCheckException
 
 
-def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None):
+def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, run_exception=None, changed=False):
     """Returns a new class that is compatible with OpenShiftCheck for testing."""
 
     _name, _tags = name, tags
@@ -14,6 +14,7 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
     class FakeCheck(object):
         name = _name
         tags = _tags or []
+        changed = False
 
         def __init__(self, execute_module=None, task_vars=None, tmp=None):
             pass
@@ -22,6 +23,7 @@ def fake_check(name='fake_check', tags=None, is_active=True, run_return=None, ru
             return is_active
 
         def run(self):
+            self.changed = changed
             if run_exception is not None:
                 raise run_exception
             return run_return
@@ -135,14 +137,15 @@ def test_action_plugin_run_check_ok(plugin, task_vars, monkeypatch):
 
 
 def test_action_plugin_run_check_changed(plugin, task_vars, monkeypatch):
-    check_return_value = {'ok': 'test', 'changed': True}
-    check_class = fake_check(run_return=check_return_value)
+    check_return_value = {'ok': 'test'}
+    check_class = fake_check(run_return=check_return_value, changed=True)
     monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
 
     result = plugin.run(tmp=None, task_vars=task_vars)
 
     assert result['checks']['fake_check'] == check_return_value
+    assert changed(result['checks']['fake_check'])
     assert not failed(result)
     assert changed(result)
     assert not skipped(result)
@@ -165,7 +168,7 @@ def test_action_plugin_run_check_fail(plugin, task_vars, monkeypatch):
 def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
     exception_msg = 'fake check has an exception'
     run_exception = OpenShiftCheckException(exception_msg)
-    check_class = fake_check(run_exception=run_exception)
+    check_class = fake_check(run_exception=run_exception, changed=True)
     monkeypatch.setattr(plugin, 'load_known_checks', lambda tmp, task_vars: {'fake_check': check_class()})
     monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
 
@@ -173,7 +176,8 @@ def test_action_plugin_run_check_exception(plugin, task_vars, monkeypatch):
 
     assert failed(result['checks']['fake_check'], msg_has=exception_msg)
     assert failed(result, msg_has=['failed'])
-    assert not changed(result)
+    assert changed(result['checks']['fake_check'])
+    assert changed(result)
     assert not skipped(result)