fluentd.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. """Check for an aggregated logging Fluentd deployment"""
  2. import json
  3. from openshift_checks import get_var
  4. from openshift_checks.logging.logging import LoggingCheck
  5. class Fluentd(LoggingCheck):
  6. """Check for an aggregated logging Fluentd deployment"""
  7. name = "fluentd"
  8. tags = ["health", "logging"]
  9. logging_namespace = None
  10. def run(self, tmp, task_vars):
  11. """Check various things and gather errors. Returns: result as hash"""
  12. self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
  13. fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
  14. self.execute_module,
  15. self.logging_namespace,
  16. "fluentd",
  17. task_vars,
  18. )
  19. if error:
  20. return {"failed": True, "changed": False, "msg": error}
  21. check_error = self.check_fluentd(fluentd_pods, task_vars)
  22. if check_error:
  23. msg = ("The following Fluentd deployment issue was found:"
  24. "\n-------\n"
  25. "{}".format(check_error))
  26. return {"failed": True, "changed": False, "msg": msg}
  27. # TODO(lmeyer): run it all again for the ops cluster
  28. return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
  29. @staticmethod
  30. def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
  31. """Filter to all nodes with fluentd label. Returns dict(name: node), error string"""
  32. label, value = node_selector.split('=', 1)
  33. fluentd_nodes = {
  34. name: node for name, node in nodes_by_name.items()
  35. if node['metadata']['labels'].get(label) == value
  36. }
  37. if not fluentd_nodes:
  38. return None, (
  39. 'There are no nodes with the fluentd label {label}.\n'
  40. 'This means no logs will be aggregated from the nodes.'
  41. ).format(label=node_selector)
  42. return fluentd_nodes, None
  43. @staticmethod
  44. def _check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars):
  45. """Note if nodes are not labeled as expected. Returns: error string"""
  46. intended_nodes = get_var(task_vars, 'openshift_logging_fluentd_hosts', default=['--all'])
  47. if not intended_nodes or '--all' in intended_nodes:
  48. intended_nodes = nodes_by_name.keys()
  49. nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
  50. if nodes_missing_labels:
  51. return (
  52. 'The following nodes are supposed to be labeled with {label} but are not:\n'
  53. ' {nodes}\n'
  54. 'Fluentd will not aggregate logs from these nodes.'
  55. ).format(label=node_selector, nodes=', '.join(nodes_missing_labels))
  56. return None
  57. @staticmethod
  58. def _check_nodes_have_fluentd(pods, fluentd_nodes):
  59. """Make sure fluentd is on all the labeled nodes. Returns: error string"""
  60. unmatched_nodes = fluentd_nodes.copy()
  61. node_names_by_label = {
  62. node['metadata']['labels']['kubernetes.io/hostname']: name
  63. for name, node in fluentd_nodes.items()
  64. }
  65. node_names_by_internal_ip = {
  66. address['address']: name
  67. for name, node in fluentd_nodes.items()
  68. for address in node['status']['addresses']
  69. if address['type'] == "InternalIP"
  70. }
  71. for pod in pods:
  72. for name in [
  73. pod['spec']['nodeName'],
  74. node_names_by_internal_ip.get(pod['spec']['nodeName']),
  75. node_names_by_label.get(pod.get('spec', {}).get('host')),
  76. ]:
  77. unmatched_nodes.pop(name, None)
  78. if unmatched_nodes:
  79. return (
  80. 'The following nodes are supposed to have a Fluentd pod but do not:\n'
  81. '{nodes}'
  82. 'These nodes will not have their logs aggregated.'
  83. ).format(nodes=''.join(
  84. " {}\n".format(name)
  85. for name in unmatched_nodes.keys()
  86. ))
  87. return None
  88. def _check_fluentd_pods_running(self, pods):
  89. """Make sure all fluentd pods are running. Returns: error string"""
  90. not_running = super(Fluentd, self).not_running_pods(pods)
  91. if not_running:
  92. return (
  93. 'The following Fluentd pods are supposed to be running but are not:\n'
  94. '{pods}'
  95. 'These pods will not aggregate logs from their nodes.'
  96. ).format(pods=''.join(
  97. " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
  98. for pod in not_running
  99. ))
  100. return None
  101. def check_fluentd(self, pods, task_vars):
  102. """Verify fluentd is running everywhere. Returns: error string"""
  103. node_selector = get_var(task_vars, 'openshift_logging_fluentd_nodeselector',
  104. default='logging-infra-fluentd=true')
  105. nodes_by_name, error = self.get_nodes_by_name(task_vars)
  106. if error:
  107. return error
  108. fluentd_nodes, error = self._filter_fluentd_labeled_nodes(nodes_by_name, node_selector)
  109. if error:
  110. return error
  111. error_msgs = []
  112. error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars)
  113. if error:
  114. error_msgs.append(error)
  115. error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
  116. if error:
  117. error_msgs.append(error)
  118. error = self._check_fluentd_pods_running(pods)
  119. if error:
  120. error_msgs.append(error)
  121. # Make sure there are no extra fluentd pods
  122. if len(pods) > len(fluentd_nodes):
  123. error_msgs.append(
  124. 'There are more Fluentd pods running than nodes labeled.\n'
  125. 'This may not cause problems with logging but it likely indicates something wrong.'
  126. )
  127. return '\n'.join(error_msgs)
  128. def get_nodes_by_name(self, task_vars):
  129. """Retrieve all the node definitions. Returns: dict(name: node), error string"""
  130. nodes_json = self._exec_oc("get nodes -o json", [], task_vars)
  131. try:
  132. nodes = json.loads(nodes_json)
  133. except ValueError: # no valid json - should not happen
  134. return None, "Could not obtain a list of nodes to validate fluentd. Output from oc get:\n" + nodes_json
  135. if not nodes or not nodes.get('items'): # also should not happen
  136. return None, "No nodes appear to be defined according to the API."
  137. return {
  138. node['metadata']['name']: node
  139. for node in nodes['items']
  140. }, None
  141. def _exec_oc(self, cmd_str, extra_args, task_vars):
  142. return super(Fluentd, self).exec_oc(self.execute_module,
  143. self.logging_namespace,
  144. cmd_str,
  145. extra_args,
  146. task_vars)