Browse Source

add fluentd logging driver config check

juanvallejo 7 years ago
parent
commit
04154a2123

+ 23 - 0
roles/openshift_health_checker/openshift_checks/__init__.py

@@ -105,6 +105,29 @@ class OpenShiftCheck(object):
             raise OpenShiftCheckException("'{}' is undefined".format(".".join(map(str, keys))))
         return value
 
+    @staticmethod
+    def get_major_minor_version(openshift_image_tag):
+        """Parse and return the deployed version of OpenShift as a tuple."""
+        if openshift_image_tag and openshift_image_tag[0] == 'v':
+            openshift_image_tag = openshift_image_tag[1:]
+
+        # map major release versions across releases
+        # to a common major version
+        openshift_major_release_version = {
+            "1": "3",
+        }
+
+        components = openshift_image_tag.split(".")
+        if not components or len(components) < 2:
+            msg = "An invalid version of OpenShift was found for this host: {}"
+            raise OpenShiftCheckException(msg.format(openshift_image_tag))
+
+        if components[0] in openshift_major_release_version:
+            components[0] = openshift_major_release_version[components[0]]
+
+        components = tuple(int(x) for x in components[:2])
+        return components
+
 
 LOADER_EXCLUDES = (
     "__init__.py",

+ 4 - 5
roles/openshift_health_checker/openshift_checks/logging/curator.py

@@ -9,11 +9,11 @@ class Curator(LoggingCheck):
     name = "curator"
     tags = ["health", "logging"]
 
-    logging_namespace = None
-
     def run(self):
+        """Check various things and gather errors. Returns: result as hash"""
+
         self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
-        curator_pods, error = super(Curator, self).get_pods_for_component(
+        curator_pods, error = self.get_pods_for_component(
             self.logging_namespace,
             "curator",
         )
@@ -23,7 +23,6 @@ class Curator(LoggingCheck):
 
         if check_error:
             msg = ("The following Curator deployment issue was found:"
-                   "\n-------\n"
                    "{}".format(check_error))
             return {"failed": True, "changed": False, "msg": msg}
 
@@ -39,7 +38,7 @@ class Curator(LoggingCheck):
                 "Is Curator correctly deployed?"
             )
 
-        not_running = super(Curator, self).not_running_pods(pods)
+        not_running = self.not_running_pods(pods)
         if len(not_running) == len(pods):
             return (
                 "The Curator pod is not currently in a running state,\n"

+ 6 - 16
roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py

@@ -12,13 +12,11 @@ class Elasticsearch(LoggingCheck):
     name = "elasticsearch"
     tags = ["health", "logging"]
 
-    logging_namespace = None
-
     def run(self):
         """Check various things and gather errors. Returns: result as hash"""
 
         self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
-        es_pods, error = super(Elasticsearch, self).get_pods_for_component(
+        es_pods, error = self.get_pods_for_component(
             self.logging_namespace,
             "es",
         )
@@ -28,7 +26,6 @@ class Elasticsearch(LoggingCheck):
 
         if check_error:
             msg = ("The following Elasticsearch deployment issue was found:"
-                   "\n-------\n"
                    "{}".format(check_error))
             return {"failed": True, "changed": False, "msg": msg}
 
@@ -37,7 +34,7 @@ class Elasticsearch(LoggingCheck):
 
     def _not_running_elasticsearch_pods(self, es_pods):
         """Returns: list of pods that are not running, list of errors about non-running pods"""
-        not_running = super(Elasticsearch, self).not_running_pods(es_pods)
+        not_running = self.not_running_pods(es_pods)
         if not_running:
             return not_running, [(
                 'The following Elasticsearch pods are not running:\n'
@@ -78,7 +75,7 @@ class Elasticsearch(LoggingCheck):
         for pod_name in pods_by_name.keys():
             # Compare what each ES node reports as master and compare for split brain
             get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
-            master_name_str = self._exec_oc(get_master_cmd, [])
+            master_name_str = self.exec_oc(self.logging_namespace, get_master_cmd, [])
             master_names = (master_name_str or '').split(' ')
             if len(master_names) > 1:
                 es_master_names.add(master_names[1])
@@ -111,7 +108,7 @@ class Elasticsearch(LoggingCheck):
 
         # get ES cluster nodes
         node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
-        cluster_node_data = self._exec_oc(node_cmd, [])
+        cluster_node_data = self.exec_oc(self.logging_namespace, node_cmd, [])
         try:
             cluster_nodes = json.loads(cluster_node_data)['nodes']
         except (ValueError, KeyError):
@@ -138,7 +135,7 @@ class Elasticsearch(LoggingCheck):
         error_msgs = []
         for pod_name in pods_by_name.keys():
             cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
-            cluster_health_data = self._exec_oc(cluster_health_cmd, [])
+            cluster_health_data = self.exec_oc(self.logging_namespace, cluster_health_cmd, [])
             try:
                 health_res = json.loads(cluster_health_data)
                 if not health_res or not health_res.get('status'):
@@ -165,7 +162,7 @@ class Elasticsearch(LoggingCheck):
         error_msgs = []
         for pod_name in pods_by_name.keys():
             df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
-            disk_output = self._exec_oc(df_cmd, [])
+            disk_output = self.exec_oc(self.logging_namespace, df_cmd, [])
             lines = disk_output.splitlines()
             # expecting one header looking like 'IUse% Use%' and one body line
             body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
@@ -201,10 +198,3 @@ class Elasticsearch(LoggingCheck):
                     ))
 
         return error_msgs
-
-    def _exec_oc(self, cmd_str, extra_args):
-        return super(Elasticsearch, self).exec_oc(
-            self.logging_namespace,
-            cmd_str,
-            extra_args,
-        )

+ 5 - 11
roles/openshift_health_checker/openshift_checks/logging/fluentd.py

@@ -11,8 +11,6 @@ class Fluentd(LoggingCheck):
     name = "fluentd"
     tags = ["health", "logging"]
 
-    logging_namespace = None
-
     def run(self):
         """Check various things and gather errors. Returns: result as hash"""
 
@@ -27,7 +25,6 @@ class Fluentd(LoggingCheck):
 
         if check_error:
             msg = ("The following Fluentd deployment issue was found:"
-                   "\n-------\n"
                    "{}".format(check_error))
             return {"failed": True, "changed": False, "msg": msg}
 
@@ -147,7 +144,11 @@ class Fluentd(LoggingCheck):
 
     def get_nodes_by_name(self):
         """Retrieve all the node definitions. Returns: dict(name: node), error string"""
-        nodes_json = self._exec_oc("get nodes -o json", [])
+        nodes_json = self.exec_oc(
+            self.logging_namespace,
+            "get nodes -o json",
+            []
+        )
         try:
             nodes = json.loads(nodes_json)
         except ValueError:  # no valid json - should not happen
@@ -158,10 +159,3 @@ class Fluentd(LoggingCheck):
             node['metadata']['name']: node
             for node in nodes['items']
         }, None
-
-    def _exec_oc(self, cmd_str, extra_args):
-        return super(Fluentd, self).exec_oc(
-            self.logging_namespace,
-            cmd_str,
-            extra_args,
-        )

+ 138 - 0
roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py

@@ -0,0 +1,138 @@
+"""
+Module for performing checks on a Fluentd logging deployment configuration
+"""
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class FluentdConfig(LoggingCheck):
+    """Module that checks logging configuration of an integrated logging Fluentd deployment"""
+    name = "fluentd_config"
+    tags = ["health"]
+
+    def is_active(self):
+        logging_deployed = self.get_var("openshift_hosted_logging_deploy", default=False)
+
+        try:
+            version = self.get_major_minor_version(self.get_var("openshift_image_tag"))
+        except ValueError:
+            # if failed to parse OpenShift version, perform check anyway (if logging enabled)
+            return logging_deployed
+
+        return logging_deployed and version < (3, 6)
+
+    def run(self):
+        """Check that Fluentd has running pods, and that its logging config matches Docker's logging config."""
+        self.logging_namespace = self.get_var("openshift_logging_namespace", default=self.logging_namespace)
+        config_error = self.check_logging_config()
+        if config_error:
+            msg = ("The following Fluentd logging configuration problem was found:"
+                   "\n{}".format(config_error))
+            return {"failed": True, "msg": msg}
+
+        return {}
+
+    def check_logging_config(self):
+        """Ensure that the configured Docker logging driver matches fluentd settings.
+        This means that, at least for now, if the following condition is met:
+
+            openshift_logging_fluentd_use_journal == True
+
+        then the value of the configured Docker logging driver should be "journald".
+        Otherwise, the value of the Docker logging driver should be "json-file".
+        Returns an error string if the above condition is not met, or None otherwise."""
+        use_journald = self.get_var("openshift_logging_fluentd_use_journal", default=True)
+
+        # if check is running on a master, retrieve all running pods
+        # and check any pod's container for the env var "USE_JOURNAL"
+        group_names = self.get_var("group_names")
+        if "masters" in group_names:
+            use_journald = self.check_fluentd_env_var()
+
+        docker_info = self.execute_module("docker_info", {})
+        try:
+            logging_driver = docker_info["info"]["LoggingDriver"]
+        except KeyError:
+            return "Unable to determine Docker logging driver."
+
+        logging_driver = docker_info["info"]["LoggingDriver"]
+        recommended_logging_driver = "journald"
+        error = None
+
+        # If fluentd is set to use journald but Docker is not, recommend setting the `--log-driver`
+        # option as an inventory file variable, or adding the log driver value as part of the
+        # Docker configuration in /etc/docker/daemon.json. There is no global --log-driver flag that
+        # can be passed to the Docker binary; the only other recommendation that can be made, would be
+        # to pass the `--log-driver` flag to the "run" sub-command of the `docker` binary when running
+        # individual containers.
+        if use_journald and logging_driver != "journald":
+            error = ('Your Fluentd configuration is set to aggregate Docker container logs from "journald".\n'
+                     'This differs from your Docker configuration, which has been set to use "{driver}" '
+                     'as the default method of storing logs.\n'
+                     'This discrepancy in configuration will prevent Fluentd from receiving any logs'
+                     'from your Docker containers.').format(driver=logging_driver)
+        elif not use_journald and logging_driver != "json-file":
+            recommended_logging_driver = "json-file"
+            error = ('Your Fluentd configuration is set to aggregate Docker container logs from '
+                     'individual json log files per container.\n '
+                     'This differs from your Docker configuration, which has been set to use '
+                     '"{driver}" as the default method of storing logs.\n'
+                     'This discrepancy in configuration will prevent Fluentd from receiving any logs'
+                     'from your Docker containers.').format(driver=logging_driver)
+
+        if error:
+            error += ('\nTo resolve this issue, add the following variable to your Ansible inventory file:\n\n'
+                      '  openshift_docker_options="--log-driver={driver}"\n\n'
+                      'Alternatively, you can add the following option to your Docker configuration, located in'
+                      '"/etc/docker/daemon.json":\n\n'
+                      '{{ "log-driver": "{driver}" }}\n\n'
+                      'See https://docs.docker.com/engine/admin/logging/json-file '
+                      'for more information.').format(driver=recommended_logging_driver)
+
+        return error
+
+    def check_fluentd_env_var(self):
+        """Read and return the value of the 'USE_JOURNAL' environment variable on a fluentd pod."""
+        running_pods = self.running_fluentd_pods()
+
+        try:
+            pod_containers = running_pods[0]["spec"]["containers"]
+        except KeyError:
+            return "Unable to detect running containers on selected Fluentd pod."
+
+        if not pod_containers:
+            msg = ('There are no running containers on selected Fluentd pod "{}".\n'
+                   'Unable to calculate expected logging driver.').format(running_pods[0]["metadata"].get("name", ""))
+            raise OpenShiftCheckException(msg)
+
+        pod_env = pod_containers[0].get("env")
+        if not pod_env:
+            msg = ('There are no environment variables set on the Fluentd container "{}".\n'
+                   'Unable to calculate expected logging driver.').format(pod_containers[0].get("name"))
+            raise OpenShiftCheckException(msg)
+
+        for env in pod_env:
+            if env["name"] == "USE_JOURNAL":
+                return env.get("value", "false") != "false"
+
+        return False
+
+    def running_fluentd_pods(self):
+        """Return a list of running fluentd pods."""
+        fluentd_pods, error = self.get_pods_for_component(
+            self.logging_namespace,
+            "fluentd",
+        )
+        if error:
+            msg = 'Unable to retrieve any pods for the "fluentd" logging component: {}'.format(error)
+            raise OpenShiftCheckException(msg)
+
+        running_fluentd_pods = [pod for pod in fluentd_pods if pod['status']['phase'] == 'Running']
+        if not running_fluentd_pods:
+            msg = ('No Fluentd pods were found to be in the "Running" state. '
+                   'At least one Fluentd pod is required in order to perform this check.')
+
+            raise OpenShiftCheckException(msg)
+
+        return running_fluentd_pods

+ 6 - 12
roles/openshift_health_checker/openshift_checks/logging/kibana.py

@@ -21,13 +21,11 @@ class Kibana(LoggingCheck):
     name = "kibana"
     tags = ["health", "logging"]
 
-    logging_namespace = None
-
     def run(self):
         """Check various things and gather errors. Returns: result as hash"""
 
         self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
-        kibana_pods, error = super(Kibana, self).get_pods_for_component(
+        kibana_pods, error = self.get_pods_for_component(
             self.logging_namespace,
             "kibana",
         )
@@ -40,7 +38,6 @@ class Kibana(LoggingCheck):
 
         if check_error:
             msg = ("The following Kibana deployment issue was found:"
-                   "\n-------\n"
                    "{}".format(check_error))
             return {"failed": True, "changed": False, "msg": msg}
 
@@ -118,7 +115,11 @@ class Kibana(LoggingCheck):
         """
 
         # Get logging url
-        get_route = self._exec_oc("get route logging-kibana -o json", [])
+        get_route = self.exec_oc(
+            self.logging_namespace,
+            "get route logging-kibana -o json",
+            [],
+        )
         if not get_route:
             return None, 'no_route_exists'
 
@@ -217,10 +218,3 @@ class Kibana(LoggingCheck):
             ).format(error=error)
             return error
         return None
-
-    def _exec_oc(self, cmd_str, extra_args):
-        return super(Kibana, self).exec_oc(
-            self.logging_namespace,
-            cmd_str,
-            extra_args,
-        )

+ 5 - 28
roles/openshift_health_checker/openshift_checks/ovs_version.py

@@ -21,12 +21,6 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
         "3.4": "2.4",
     }
 
-    # map major release versions across releases
-    # to a common major version
-    openshift_major_release_version = {
-        "1": "3",
-    }
-
     def is_active(self):
         """Skip hosts that do not have package requirements."""
         group_names = self.get_var("group_names", default=[])
@@ -46,32 +40,15 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
 
     def get_required_ovs_version(self):
         """Return the correct Open vSwitch version for the current OpenShift version"""
-        openshift_version = self._get_openshift_version()
+        openshift_version_tuple = self.get_major_minor_version(self.get_var("openshift_image_tag"))
 
-        if float(openshift_version) < 3.5:
+        if openshift_version_tuple < (3, 5):
             return self.openshift_to_ovs_version["3.4"]
 
-        ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+        openshift_version = ".".join(str(x) for x in openshift_version_tuple)
+        ovs_version = self.openshift_to_ovs_version.get(openshift_version)
         if ovs_version:
-            return self.openshift_to_ovs_version[str(openshift_version)]
+            return self.openshift_to_ovs_version[openshift_version]
 
         msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
         raise OpenShiftCheckException(msg.format(openshift_version))
-
-    def _get_openshift_version(self):
-        openshift_version = self.get_var("openshift_image_tag")
-        if openshift_version and openshift_version[0] == 'v':
-            openshift_version = openshift_version[1:]
-
-        return self._parse_version(openshift_version)
-
-    def _parse_version(self, version):
-        components = version.split(".")
-        if not components or len(components) < 2:
-            msg = "An invalid version of OpenShift was found for this host: {}"
-            raise OpenShiftCheckException(msg.format(version))
-
-        if components[0] in self.openshift_major_release_version:
-            components[0] = self.openshift_major_release_version[components[0]]
-
-        return '.'.join(components[:2])

+ 13 - 16
roles/openshift_health_checker/test/elasticsearch_test.py

@@ -6,14 +6,6 @@ from openshift_checks.logging.elasticsearch import Elasticsearch
 task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
 
 
-def canned_elasticsearch(task_vars=None, exec_oc=None):
-    """Create an Elasticsearch check object with canned exec_oc method"""
-    check = Elasticsearch("dummy", task_vars or {})  # fails if a module is actually invoked
-    if exec_oc:
-        check._exec_oc = exec_oc
-    return check
-
-
 def assert_error(error, expect_error):
     if expect_error:
         assert error
@@ -50,10 +42,10 @@ split_es_pod = {
 
 
 def test_check_elasticsearch():
-    assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([])
+    assert 'No logging Elasticsearch pods' in Elasticsearch().check_elasticsearch([])
 
     # canned oc responses to match so all the checks pass
-    def _exec_oc(cmd, args):
+    def _exec_oc(ns, cmd, args):
         if '_cat/master' in cmd:
             return 'name logging-es'
         elif '/_nodes' in cmd:
@@ -65,7 +57,9 @@ def test_check_elasticsearch():
         else:
             raise Exception(cmd)
 
-    assert not canned_elasticsearch({}, _exec_oc).check_elasticsearch([plain_es_pod])
+    check = Elasticsearch(None, {})
+    check.exec_oc = _exec_oc
+    assert not check.check_elasticsearch([plain_es_pod])
 
 
 def pods_by_name(pods):
@@ -88,8 +82,8 @@ def pods_by_name(pods):
 ])
 def test_check_elasticsearch_masters(pods, expect_error):
     test_pods = list(pods)
-    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: test_pods.pop(0)['_test_master_name_str'])
-
+    check = Elasticsearch(None, task_vars_config_base)
+    check.execute_module = lambda cmd, args: {'result': test_pods.pop(0)['_test_master_name_str']}
     errors = check._check_elasticsearch_masters(pods_by_name(pods))
     assert_error(''.join(errors), expect_error)
 
@@ -124,7 +118,8 @@ es_node_list = {
     ),
 ])
 def test_check_elasticsearch_node_list(pods, node_list, expect_error):
-    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(node_list))
+    check = Elasticsearch(None, task_vars_config_base)
+    check.execute_module = lambda cmd, args: {'result': json.dumps(node_list)}
 
     errors = check._check_elasticsearch_node_list(pods_by_name(pods))
     assert_error(''.join(errors), expect_error)
@@ -149,7 +144,8 @@ def test_check_elasticsearch_node_list(pods, node_list, expect_error):
 ])
 def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
     test_health_data = list(health_data)
-    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: json.dumps(test_health_data.pop(0)))
+    check = Elasticsearch(None, task_vars_config_base)
+    check.execute_module = lambda cmd, args: {'result': json.dumps(test_health_data.pop(0))}
 
     errors = check._check_es_cluster_health(pods_by_name(pods))
     assert_error(''.join(errors), expect_error)
@@ -174,7 +170,8 @@ def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
     ),
 ])
 def test_check_elasticsearch_diskspace(disk_data, expect_error):
-    check = canned_elasticsearch(task_vars_config_base, lambda cmd, args: disk_data)
+    check = Elasticsearch(None, task_vars_config_base)
+    check.execute_module = lambda cmd, args: {'result': disk_data}
 
     errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]))
     assert_error(''.join(errors), expect_error)

+ 357 - 0
roles/openshift_health_checker/test/fluentd_config_test.py

@@ -0,0 +1,357 @@
+import pytest
+
+from openshift_checks.logging.fluentd_config import FluentdConfig, OpenShiftCheckException
+
+
+def canned_fluentd_pod(containers):
+    return {
+        "metadata": {
+            "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+            "name": "logging-fluentd-1",
+        },
+        "spec": {
+            "host": "node1",
+            "nodeName": "node1",
+            "containers": containers,
+        },
+        "status": {
+            "phase": "Running",
+            "containerStatuses": [{"ready": True}],
+            "conditions": [{"status": "True", "type": "Ready"}],
+        }
+    }
+
+
+fluentd_pod = {
+    "metadata": {
+        "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+        "name": "logging-fluentd-1",
+    },
+    "spec": {
+        "host": "node1",
+        "nodeName": "node1",
+        "containers": [
+            {
+                "name": "container1",
+                "env": [
+                    {
+                        "name": "USE_JOURNAL",
+                        "value": "true",
+                    }
+                ],
+            }
+        ],
+    },
+    "status": {
+        "phase": "Running",
+        "containerStatuses": [{"ready": True}],
+        "conditions": [{"status": "True", "type": "Ready"}],
+    }
+}
+
+not_running_fluentd_pod = {
+    "metadata": {
+        "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+        "name": "logging-fluentd-2",
+    },
+    "status": {
+        "phase": "Unknown",
+        "containerStatuses": [{"ready": True}, {"ready": False}],
+        "conditions": [{"status": "True", "type": "Ready"}],
+    }
+}
+
+
+@pytest.mark.parametrize('name, use_journald, logging_driver, extra_words', [
+    (
+        'test success with use_journald=false, and docker config set to use "json-file"',
+        False,
+        "json-file",
+        [],
+    ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_non_master(name, use_journald, logging_driver, extra_words):
+    def execute_module(module_name, args):
+        if module_name == "docker_info":
+            return {
+                "info": {
+                    "LoggingDriver": logging_driver,
+                }
+            }
+
+        return {}
+
+    task_vars = dict(
+        group_names=["nodes", "etcd"],
+        openshift_logging_fluentd_use_journal=use_journald,
+        openshift=dict(
+            common=dict(config_base=""),
+        ),
+    )
+
+    check = FluentdConfig(execute_module, task_vars)
+    check.execute_module = execute_module
+    error = check.check_logging_config()
+
+    assert error is None
+
+
+@pytest.mark.parametrize('name, use_journald, logging_driver, words', [
+    (
+        'test failure with use_journald=false, but docker config set to use "journald"',
+        False,
+        "journald",
+        ['json log files', 'has been set to use "journald"'],
+    ),
+    (
+        'test failure with use_journald=false, but docker config set to use an "unsupported" driver',
+        False,
+        "unsupported",
+        ["json log files", 'has been set to use "unsupported"'],
+    ),
+    (
+        'test failure with use_journald=true, but docker config set to use "json-file"',
+        True,
+        "json-file",
+        ['logs from "journald"', 'has been set to use "json-file"'],
+    ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_non_master_failed(name, use_journald, logging_driver, words):
+    def execute_module(module_name, args):
+        if module_name == "docker_info":
+            return {
+                "info": {
+                    "LoggingDriver": logging_driver,
+                }
+            }
+
+        return {}
+
+    task_vars = dict(
+        group_names=["nodes", "etcd"],
+        openshift_logging_fluentd_use_journal=use_journald,
+        openshift=dict(
+            common=dict(config_base=""),
+        ),
+    )
+
+    check = FluentdConfig(execute_module, task_vars)
+    check.execute_module = execute_module
+    error = check.check_logging_config()
+
+    assert error is not None
+    for word in words:
+        assert word in error
+
+
+@pytest.mark.parametrize('name, pods, logging_driver, extra_words', [
+    # use_journald returns false (not using journald), but check succeeds
+    # since docker is set to use json-file
+    (
+        'test success with use_journald=false, and docker config set to use default driver "json-file"',
+        [canned_fluentd_pod(
+            [
+                {
+                    "name": "container1",
+                    "env": [{
+                        "name": "USE_JOURNAL",
+                        "value": "false",
+                    }],
+                },
+            ]
+        )],
+        "json-file",
+        [],
+    ),
+    (
+        'test success with USE_JOURNAL env var missing and docker config set to use default driver "json-file"',
+        [canned_fluentd_pod(
+            [
+                {
+                    "name": "container1",
+                    "env": [{
+                        "name": "RANDOM",
+                        "value": "value",
+                    }],
+                },
+            ]
+        )],
+        "json-file",
+        [],
+    ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master(name, pods, logging_driver, extra_words):
+    def execute_module(module_name, args):
+        if module_name == "docker_info":
+            return {
+                "info": {
+                    "LoggingDriver": logging_driver,
+                }
+            }
+
+        return {}
+
+    task_vars = dict(
+        group_names=["masters"],
+        openshift=dict(
+            common=dict(config_base=""),
+        ),
+    )
+
+    def get_pods(namespace, logging_component):
+        return pods, None
+
+    check = FluentdConfig(execute_module, task_vars)
+    check.execute_module = execute_module
+    check.get_pods_for_component = get_pods
+    error = check.check_logging_config()
+
+    assert error is None
+
+
+@pytest.mark.parametrize('name, pods, logging_driver, words', [
+    (
+        'test failure with use_journald=false, but docker config set to use "journald"',
+        [canned_fluentd_pod(
+            [
+                {
+                    "name": "container1",
+                    "env": [{
+                        "name": "USE_JOURNAL",
+                        "value": "false",
+                    }],
+                },
+            ]
+        )],
+        "journald",
+        ['json log files', 'has been set to use "journald"'],
+    ),
+    (
+        'test failure with use_journald=true, but docker config set to use "json-file"',
+        [fluentd_pod],
+        "json-file",
+        ['logs from "journald"', 'has been set to use "json-file"'],
+    ),
+    (
+        'test failure with use_journald=false, but docker set to use an "unsupported" driver',
+        [canned_fluentd_pod(
+            [
+                {
+                    "name": "container1",
+                    "env": [{
+                        "name": "USE_JOURNAL",
+                        "value": "false",
+                    }],
+                },
+            ]
+        )],
+        "unsupported",
+        ["json log files", 'has been set to use "unsupported"'],
+    ),
+    (
+        'test failure with USE_JOURNAL env var missing and docker config set to use "journald"',
+        [canned_fluentd_pod(
+            [
+                {
+                    "name": "container1",
+                    "env": [{
+                        "name": "RANDOM",
+                        "value": "value",
+                    }],
+                },
+            ]
+        )],
+        "journald",
+        ["configuration is set to", "json log files"],
+    ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master_failed(name, pods, logging_driver, words):
+    def execute_module(module_name, args):
+        if module_name == "docker_info":
+            return {
+                "info": {
+                    "LoggingDriver": logging_driver,
+                }
+            }
+
+        return {}
+
+    task_vars = dict(
+        group_names=["masters"],
+        openshift=dict(
+            common=dict(config_base=""),
+        ),
+    )
+
+    def get_pods(namespace, logging_component):
+        return pods, None
+
+    check = FluentdConfig(execute_module, task_vars)
+    check.execute_module = execute_module
+    check.get_pods_for_component = get_pods
+    error = check.check_logging_config()
+
+    assert error is not None
+    for word in words:
+        assert word in error
+
+
+@pytest.mark.parametrize('name, pods, response, logging_driver, extra_words', [
+    (
+        'test OpenShiftCheckException with no running containers',
+        [canned_fluentd_pod([])],
+        {
+            "failed": True,
+            "result": "unexpected",
+        },
+        "json-file",
+        ['no running containers'],
+    ),
+    (
+        'test OpenShiftCheckException one container and no env vars set',
+        [canned_fluentd_pod(
+            [
+                {
+                    "name": "container1",
+                    "env": [],
+                },
+            ]
+        )],
+        {
+            "failed": True,
+            "result": "unexpected",
+        },
+        "json-file",
+        ['no environment variables'],
+    ),
+], ids=lambda argvals: argvals[0])
+def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, response, logging_driver, extra_words):
+    def execute_module(module_name, args):
+        if module_name == "docker_info":
+            return {
+                "info": {
+                    "LoggingDriver": logging_driver,
+                }
+            }
+
+        return {}
+
+    task_vars = dict(
+        group_names=["masters"],
+        openshift=dict(
+            common=dict(config_base=""),
+        ),
+    )
+
+    def get_pods(namespace, logging_component):
+        return pods, None
+
+    check = FluentdConfig(execute_module, task_vars)
+    check.get_pods_for_component = get_pods
+
+    with pytest.raises(OpenShiftCheckException) as error:
+        check.check_logging_config()
+
+    assert error is not None
+    for word in extra_words:
+        assert word in str(error)

+ 2 - 10
roles/openshift_health_checker/test/fluentd_test.py

@@ -4,14 +4,6 @@ import json
 from openshift_checks.logging.fluentd import Fluentd
 
 
-def canned_fluentd(exec_oc=None):
-    """Create a Fluentd check object with canned exec_oc method"""
-    check = Fluentd("dummy")  # fails if a module is actually invoked
-    if exec_oc:
-        check._exec_oc = exec_oc
-    return check
-
-
 def assert_error(error, expect_error):
     if expect_error:
         assert error
@@ -103,7 +95,7 @@ fluentd_node3_unlabeled = {
     ),
 ])
 def test_get_fluentd_pods(pods, nodes, expect_error):
-    check = canned_fluentd(exec_oc=lambda cmd, args: json.dumps(dict(items=nodes)))
-
+    check = Fluentd()
+    check.exec_oc = lambda ns, cmd, args: json.dumps(dict(items=nodes))
     error = check.check_fluentd(pods)
     assert_error(error, expect_error)

+ 4 - 11
roles/openshift_health_checker/test/kibana_test.py

@@ -11,14 +11,6 @@ except ImportError:
 from openshift_checks.logging.kibana import Kibana
 
 
-def canned_kibana(exec_oc=None):
-    """Create a Kibana check object with canned exec_oc method"""
-    check = Kibana()  # fails if a module is actually invoked
-    if exec_oc:
-        check._exec_oc = exec_oc
-    return check
-
-
 def assert_error(error, expect_error):
     if expect_error:
         assert error
@@ -68,7 +60,7 @@ not_running_kibana_pod = {
     ),
 ])
 def test_check_kibana(pods, expect_error):
-    check = canned_kibana()
+    check = Kibana()
     error = check.check_kibana(pods)
     assert_error(error, expect_error)
 
@@ -137,7 +129,8 @@ def test_check_kibana(pods, expect_error):
     ),
 ])
 def test_get_kibana_url(route, expect_url, expect_error):
-    check = canned_kibana(exec_oc=lambda cmd, args: json.dumps(route) if route else "")
+    check = Kibana()
+    check.exec_oc = lambda ns, cmd, args: json.dumps(route) if route else ""
 
     url, error = check._get_kibana_url()
     if expect_url:
@@ -210,7 +203,7 @@ def test_verify_url_external_failure(lib_result, expect, monkeypatch):
         raise lib_result
     monkeypatch.setattr(urllib2, 'urlopen', urlopen)
 
-    check = canned_kibana()
+    check = Kibana()
     check._get_kibana_url = lambda: ('url', None)
     check._verify_url_internal = lambda url: None