Переглянути джерело

openshift_checks: improve comments/names

Luke Meyer 7 роки тому
батько
коміт
3f3f87b798

+ 21 - 17
roles/openshift_health_checker/callback_plugins/zz_failure_summary.py

@@ -1,6 +1,6 @@
-'''
-Ansible callback plugin.
-'''
+"""
+Ansible callback plugin to give a nicely formatted summary of failures.
+"""
 
 # Reason: In several locations below we disable pylint protected-access
 #         for Ansible objects that do not give us any public way
@@ -16,11 +16,11 @@ from ansible.utils.color import stringc
 
 
 class CallbackModule(CallbackBase):
-    '''
+    """
     This callback plugin stores task results and summarizes failures.
     The file name is prefixed with `zz_` to make this plugin be loaded last by
     Ansible, thus making its output the last thing that users see.
-    '''
+    """
 
     CALLBACK_VERSION = 2.0
     CALLBACK_TYPE = 'aggregate'
@@ -48,7 +48,7 @@ class CallbackModule(CallbackBase):
             self._print_failure_details(self.__failures)
 
     def _print_failure_details(self, failures):
-        '''Print a summary of failed tasks or checks.'''
+        """Print a summary of failed tasks or checks."""
         self._display.display(u'\nFailure summary:\n')
 
         width = len(str(len(failures)))
@@ -69,7 +69,9 @@ class CallbackModule(CallbackBase):
         playbook_context = None
         # re: result attrs see top comment  # pylint: disable=protected-access
         for failure in failures:
-            # get context from check task result since callback plugins cannot access task vars
+            # Get context from check task result since callback plugins cannot access task vars.
+            # NOTE: thus context is not known unless checks run. Failures prior to checks running
+            # don't have playbook_context in the results. But we only use it now when checks fail.
             playbook_context = playbook_context or failure['result']._result.get('playbook_context')
             failed_checks.update(
                 name
@@ -81,8 +83,11 @@ class CallbackModule(CallbackBase):
 
     def _print_check_failure_summary(self, failed_checks, context):
         checks = ','.join(sorted(failed_checks))
-        # NOTE: context is not set if all failures occurred prior to checks task
-        summary = (
+        # The purpose of specifying context is to vary the output depending on what the user was
+        # expecting to happen (based on which playbook they ran). The only use currently is to
+        # vary the message depending on whether the user was deliberately running checks or was
+        # trying to install/upgrade and checks are just included. Other use cases may arise.
+        summary = (  # default to explaining what checks are in the first place
             '\n'
             'The execution of "{playbook}"\n'
             'includes checks designed to fail early if the requirements\n'
@@ -94,27 +99,26 @@ class CallbackModule(CallbackBase):
             'Some checks may be configurable by variables if your requirements\n'
             'are different from the defaults; consult check documentation.\n'
             'Variables can be set in the inventory or passed on the\n'
-            'command line using the -e flag to ansible-playbook.\n'
+            'command line using the -e flag to ansible-playbook.\n\n'
         ).format(playbook=self._playbook_file, checks=checks)
         if context in ['pre-install', 'health']:
-            summary = (
+            summary = (  # user was expecting to run checks, less explanation needed
                 '\n'
                 'You may choose to configure or disable failing checks by\n'
                 'setting Ansible variables. To disable those above:\n\n'
                 '    openshift_disable_check={checks}\n\n'
                 'Consult check documentation for configurable variables.\n'
                 'Variables can be set in the inventory or passed on the\n'
-                'command line using the -e flag to ansible-playbook.\n'
+                'command line using the -e flag to ansible-playbook.\n\n'
             ).format(checks=checks)
-        # other expected contexts: install, upgrade
         self._display.display(summary)
 
 
 # re: result attrs see top comment  # pylint: disable=protected-access
 def _format_failure(failure):
-    '''Return a list of pretty-formatted text entries describing a failure, including
+    """Return a list of pretty-formatted text entries describing a failure, including
     relevant information about it. Expect that the list of text entries will be joined
-    by a newline separator when output to the user.'''
+    by a newline separator when output to the user."""
     result = failure['result']
     host = result._host.get_name()
     play = _get_play(result._task)
@@ -135,7 +139,7 @@ def _format_failure(failure):
 
 
 def _format_failed_checks(checks):
-    '''Return pretty-formatted text describing checks that failed.'''
+    """Return pretty-formatted text describing checks that failed."""
     failed_check_msgs = []
     for check, body in checks.items():
         if body.get('failed', False):   # only show the failed checks
@@ -150,7 +154,7 @@ def _format_failed_checks(checks):
 # This is inspired by ansible.playbook.base.Base.dump_me.
 # re: play/task/block attrs see top comment  # pylint: disable=protected-access
 def _get_play(obj):
-    '''Given a task or block, recursively tries to find its parent play.'''
+    """Given a task or block, recursively try to find its parent play."""
     if hasattr(obj, '_play'):
         return obj._play
     if getattr(obj, '_parent'):

+ 3 - 3
roles/openshift_health_checker/library/aos_version.py

@@ -1,5 +1,5 @@
 #!/usr/bin/python
-'''
+"""
 Ansible module for yum-based systems determining if multiple releases
 of an OpenShift package are available, and if the release requested
 (if any) is available down to the given precision.
@@ -16,7 +16,7 @@ of release availability already. Without duplicating all that, we would
 like the user to have a helpful error message if we detect things will
 not work out right. Note that if openshift_release is not specified in
 the inventory, the version comparison checks just pass.
-'''
+"""
 
 from ansible.module_utils.basic import AnsibleModule
 # NOTE: because of the dependency on yum (Python 2-only), this module does not
@@ -32,7 +32,7 @@ except ImportError as err:
 
 
 class AosVersionException(Exception):
-    '''Base exception class for package version problems'''
+    """Base exception class for package version problems"""
     def __init__(self, message, problem_pkgs=None):
         Exception.__init__(self, message)
         self.problem_pkgs = problem_pkgs

+ 0 - 0
roles/openshift_health_checker/library/check_yum_update.py


+ 1 - 1
roles/openshift_health_checker/library/docker_info.py

@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring
 """
 Ansible module for determining information about the docker host.
 
@@ -13,6 +12,7 @@ from ansible.module_utils.docker_common import AnsibleDockerClient
 
 
 def main():
+    """Entrypoint for running an Ansible module."""
     client = AnsibleDockerClient()
 
     client.module.exit_json(

+ 12 - 9
roles/openshift_health_checker/openshift_checks/docker_image_availability.py

@@ -22,14 +22,16 @@ DEPLOYMENT_IMAGE_INFO = {
 class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
     """Check that required Docker images are available.
 
-    This check attempts to ensure that required docker images are
-    either present locally, or able to be pulled down from available
-    registries defined in a host machine.
+    Determine docker images that an install would require and check that they
+    are either present in the host's docker index, or available for the host to pull
+    with known registries as defined in our inventory file (or defaults).
     """
 
     name = "docker_image_availability"
     tags = ["preflight"]
-    dependencies = ["skopeo", "python-docker-py"]
+    # we use python-docker-py to check local docker for images, and skopeo
+    # to look for images available remotely without waiting to pull them.
+    dependencies = ["python-docker-py", "skopeo"]
 
     @classmethod
     def is_active(cls, task_vars):
@@ -154,17 +156,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
 
         return list(regs)
 
-    def available_images(self, images, registries, task_vars):
-        """Inspect existing images using Skopeo and return all images successfully inspected."""
+    def available_images(self, images, default_registries, task_vars):
+        """Search remotely for images. Returns: list of images found."""
         return [
             image for image in images
-            if self.is_available_skopeo_image(image, registries, task_vars)
+            if self.is_available_skopeo_image(image, default_registries, task_vars)
         ]
 
-    def is_available_skopeo_image(self, image, registries, task_vars):
+    def is_available_skopeo_image(self, image, default_registries, task_vars):
         """Use Skopeo to determine if required image exists in known registry(s)."""
+        registries = default_registries
 
-        # if image does already includes a registry, just use that
+        # if image already includes a registry, only use that
         if image.count("/") > 1:
             registry, image = image.split("/", 1)
             registries = [registry]

+ 2 - 6
roles/openshift_health_checker/openshift_checks/logging/curator.py

@@ -1,13 +1,11 @@
-"""
-Module for performing checks on an Curator logging deployment
-"""
+"""Check for an aggregated logging Curator deployment"""
 
 from openshift_checks import get_var
 from openshift_checks.logging.logging import LoggingCheck
 
 
 class Curator(LoggingCheck):
-    """Module that checks an integrated logging Curator deployment"""
+    """Check for an aggregated logging Curator deployment"""
 
     name = "curator"
     tags = ["health", "logging"]
@@ -15,8 +13,6 @@ class Curator(LoggingCheck):
     logging_namespace = None
 
     def run(self, tmp, task_vars):
-        """Check various things and gather errors. Returns: result as hash"""
-
         self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
         curator_pods, error = super(Curator, self).get_pods_for_component(
             self.module_executor,

+ 3 - 5
roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py

@@ -1,6 +1,4 @@
-"""
-Module for performing checks on an Elasticsearch logging deployment
-"""
+"""Check for an aggregated logging Elasticsearch deployment"""
 
 import json
 import re
@@ -10,7 +8,7 @@ from openshift_checks.logging.logging import LoggingCheck
 
 
 class Elasticsearch(LoggingCheck):
-    """Module that checks an integrated logging Elasticsearch deployment"""
+    """Check for an aggregated logging Elasticsearch deployment"""
 
     name = "elasticsearch"
     tags = ["health", "logging"]
@@ -41,7 +39,7 @@ class Elasticsearch(LoggingCheck):
         return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
 
     def _not_running_elasticsearch_pods(self, es_pods):
-        """Returns: list of running pods, list of errors about non-running pods"""
+        """Returns: list of pods that are not running, list of errors about non-running pods"""
         not_running = super(Elasticsearch, self).not_running_pods(es_pods)
         if not_running:
             return not_running, [(

+ 3 - 4
roles/openshift_health_checker/openshift_checks/logging/fluentd.py

@@ -1,6 +1,4 @@
-"""
-Module for performing checks on an Fluentd logging deployment
-"""
+"""Check for an aggregated logging Fluentd deployment"""
 
 import json
 
@@ -9,7 +7,8 @@ from openshift_checks.logging.logging import LoggingCheck
 
 
 class Fluentd(LoggingCheck):
-    """Module that checks an integrated logging Fluentd deployment"""
+    """Check for an aggregated logging Fluentd deployment"""
+
     name = "fluentd"
     tags = ["health", "logging"]
 

+ 1 - 1
roles/openshift_health_checker/openshift_checks/logging/logging.py

@@ -9,7 +9,7 @@ from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
 
 
 class LoggingCheck(OpenShiftCheck):
-    """Base class for logging component checks"""
+    """Base class for OpenShift aggregated logging component checks"""
 
     name = "logging"
     logging_namespace = "logging"

+ 1 - 1
roles/openshift_health_checker/openshift_checks/memory_availability.py

@@ -1,4 +1,4 @@
-# pylint: disable=missing-docstring
+"""Check that recommended memory is available."""
 from openshift_checks import OpenShiftCheck, get_var
 
 MIB = 2**20

+ 5 - 1
roles/openshift_health_checker/openshift_checks/package_availability.py

@@ -1,4 +1,5 @@
-# pylint: disable=missing-docstring
+"""Check that required RPM packages are available."""
+
 from openshift_checks import OpenShiftCheck, get_var
 from openshift_checks.mixins import NotContainerizedMixin
 
@@ -11,6 +12,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
 
     @classmethod
     def is_active(cls, task_vars):
+        """Run only when yum is the package manager as the code is specific to it."""
         return super(PackageAvailability, cls).is_active(task_vars) and task_vars["ansible_pkg_mgr"] == "yum"
 
     def run(self, tmp, task_vars):
@@ -29,6 +31,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
 
     @staticmethod
     def master_packages(rpm_prefix):
+        """Return a list of RPMs that we expect a master install to have available."""
         return [
             "{rpm_prefix}".format(rpm_prefix=rpm_prefix),
             "{rpm_prefix}-clients".format(rpm_prefix=rpm_prefix),
@@ -44,6 +47,7 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
 
     @staticmethod
     def node_packages(rpm_prefix):
+        """Return a list of RPMs that we expect a node install to have available."""
         return [
             "{rpm_prefix}".format(rpm_prefix=rpm_prefix),
             "{rpm_prefix}-node".format(rpm_prefix=rpm_prefix),

+ 2 - 2
roles/openshift_health_checker/openshift_checks/package_update.py

@@ -1,10 +1,10 @@
-# pylint: disable=missing-docstring
+"""Check that a yum update would not run into conflicts with available packages."""
 from openshift_checks import OpenShiftCheck
 from openshift_checks.mixins import NotContainerizedMixin
 
 
 class PackageUpdate(NotContainerizedMixin, OpenShiftCheck):
-    """Check that there are no conflicts in RPM packages."""
+    """Check that a yum update would not run into conflicts with available packages."""
 
     name = "package_update"
     tags = ["preflight"]

+ 3 - 1
roles/openshift_health_checker/openshift_checks/package_version.py

@@ -1,4 +1,4 @@
-# pylint: disable=missing-docstring
+"""Check that available RPM packages match the required versions."""
 from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
 from openshift_checks.mixins import NotContainerizedMixin
 
@@ -107,6 +107,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
         raise OpenShiftCheckException(msg.format(openshift_version))
 
     def get_openshift_version(self, task_vars):
+        """Return received image tag as a normalized X.Y minor version string."""
         openshift_version = get_var(task_vars, "openshift_image_tag")
         if openshift_version and openshift_version[0] == 'v':
             openshift_version = openshift_version[1:]
@@ -114,6 +115,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
         return self.parse_version(openshift_version)
 
     def parse_version(self, version):
+        """Return a normalized X.Y minor version string."""
         components = version.split(".")
         if not components or len(components) < 2:
             msg = "An invalid version of OpenShift was found for this host: {}"