fluentd.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. """Check for an aggregated logging Fluentd deployment"""
  2. import json
  3. from openshift_checks.logging.logging import LoggingCheck
  4. class Fluentd(LoggingCheck):
  5. """Check for an aggregated logging Fluentd deployment"""
  6. name = "fluentd"
  7. tags = ["health", "logging"]
  8. logging_namespace = None
  9. def run(self):
  10. """Check various things and gather errors. Returns: result as hash"""
  11. self.logging_namespace = self.get_var("openshift_logging_namespace", default="logging")
  12. fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
  13. self.logging_namespace,
  14. "fluentd",
  15. )
  16. if error:
  17. return {"failed": True, "changed": False, "msg": error}
  18. check_error = self.check_fluentd(fluentd_pods)
  19. if check_error:
  20. msg = ("The following Fluentd deployment issue was found:"
  21. "\n-------\n"
  22. "{}".format(check_error))
  23. return {"failed": True, "changed": False, "msg": msg}
  24. # TODO(lmeyer): run it all again for the ops cluster
  25. return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
  26. @staticmethod
  27. def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
  28. """Filter to all nodes with fluentd label. Returns dict(name: node), error string"""
  29. label, value = node_selector.split('=', 1)
  30. fluentd_nodes = {
  31. name: node for name, node in nodes_by_name.items()
  32. if node['metadata']['labels'].get(label) == value
  33. }
  34. if not fluentd_nodes:
  35. return None, (
  36. 'There are no nodes with the fluentd label {label}.\n'
  37. 'This means no logs will be aggregated from the nodes.'
  38. ).format(label=node_selector)
  39. return fluentd_nodes, None
  40. def _check_node_labeling(self, nodes_by_name, fluentd_nodes, node_selector):
  41. """Note if nodes are not labeled as expected. Returns: error string"""
  42. intended_nodes = self.get_var('openshift_logging_fluentd_hosts', default=['--all'])
  43. if not intended_nodes or '--all' in intended_nodes:
  44. intended_nodes = nodes_by_name.keys()
  45. nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
  46. if nodes_missing_labels:
  47. return (
  48. 'The following nodes are supposed to be labeled with {label} but are not:\n'
  49. ' {nodes}\n'
  50. 'Fluentd will not aggregate logs from these nodes.'
  51. ).format(label=node_selector, nodes=', '.join(nodes_missing_labels))
  52. return None
  53. @staticmethod
  54. def _check_nodes_have_fluentd(pods, fluentd_nodes):
  55. """Make sure fluentd is on all the labeled nodes. Returns: error string"""
  56. unmatched_nodes = fluentd_nodes.copy()
  57. node_names_by_label = {
  58. node['metadata']['labels']['kubernetes.io/hostname']: name
  59. for name, node in fluentd_nodes.items()
  60. }
  61. node_names_by_internal_ip = {
  62. address['address']: name
  63. for name, node in fluentd_nodes.items()
  64. for address in node['status']['addresses']
  65. if address['type'] == "InternalIP"
  66. }
  67. for pod in pods:
  68. for name in [
  69. pod['spec']['nodeName'],
  70. node_names_by_internal_ip.get(pod['spec']['nodeName']),
  71. node_names_by_label.get(pod.get('spec', {}).get('host')),
  72. ]:
  73. unmatched_nodes.pop(name, None)
  74. if unmatched_nodes:
  75. return (
  76. 'The following nodes are supposed to have a Fluentd pod but do not:\n'
  77. '{nodes}'
  78. 'These nodes will not have their logs aggregated.'
  79. ).format(nodes=''.join(
  80. " {}\n".format(name)
  81. for name in unmatched_nodes.keys()
  82. ))
  83. return None
  84. def _check_fluentd_pods_running(self, pods):
  85. """Make sure all fluentd pods are running. Returns: error string"""
  86. not_running = super(Fluentd, self).not_running_pods(pods)
  87. if not_running:
  88. return (
  89. 'The following Fluentd pods are supposed to be running but are not:\n'
  90. '{pods}'
  91. 'These pods will not aggregate logs from their nodes.'
  92. ).format(pods=''.join(
  93. " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
  94. for pod in not_running
  95. ))
  96. return None
  97. def check_fluentd(self, pods):
  98. """Verify fluentd is running everywhere. Returns: error string"""
  99. node_selector = self.get_var(
  100. 'openshift_logging_fluentd_nodeselector',
  101. default='logging-infra-fluentd=true'
  102. )
  103. nodes_by_name, error = self.get_nodes_by_name()
  104. if error:
  105. return error
  106. fluentd_nodes, error = self._filter_fluentd_labeled_nodes(nodes_by_name, node_selector)
  107. if error:
  108. return error
  109. error_msgs = []
  110. error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector)
  111. if error:
  112. error_msgs.append(error)
  113. error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
  114. if error:
  115. error_msgs.append(error)
  116. error = self._check_fluentd_pods_running(pods)
  117. if error:
  118. error_msgs.append(error)
  119. # Make sure there are no extra fluentd pods
  120. if len(pods) > len(fluentd_nodes):
  121. error_msgs.append(
  122. 'There are more Fluentd pods running than nodes labeled.\n'
  123. 'This may not cause problems with logging but it likely indicates something wrong.'
  124. )
  125. return '\n'.join(error_msgs)
  126. def get_nodes_by_name(self):
  127. """Retrieve all the node definitions. Returns: dict(name: node), error string"""
  128. nodes_json = self._exec_oc("get nodes -o json", [])
  129. try:
  130. nodes = json.loads(nodes_json)
  131. except ValueError: # no valid json - should not happen
  132. return None, "Could not obtain a list of nodes to validate fluentd. Output from oc get:\n" + nodes_json
  133. if not nodes or not nodes.get('items'): # also should not happen
  134. return None, "No nodes appear to be defined according to the API."
  135. return {
  136. node['metadata']['name']: node
  137. for node in nodes['items']
  138. }, None
  139. def _exec_oc(self, cmd_str, extra_args):
  140. return super(Fluentd, self).exec_oc(
  141. self.logging_namespace,
  142. cmd_str,
  143. extra_args,
  144. )