fluentd_test.py 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. import pytest
  2. import json
  3. from openshift_checks.logging.fluentd import Fluentd
  4. def assert_error(error, expect_error):
  5. if expect_error:
  6. assert error
  7. assert expect_error in error
  8. else:
  9. assert not error
  10. fluentd_pod_node1 = {
  11. "metadata": {
  12. "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
  13. "name": "logging-fluentd-1",
  14. },
  15. "spec": {"host": "node1", "nodeName": "node1"},
  16. "status": {
  17. "containerStatuses": [{"ready": True}],
  18. "conditions": [{"status": "True", "type": "Ready"}],
  19. }
  20. }
  21. fluentd_pod_node2_down = {
  22. "metadata": {
  23. "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
  24. "name": "logging-fluentd-2",
  25. },
  26. "spec": {"host": "node2", "nodeName": "node2"},
  27. "status": {
  28. "containerStatuses": [{"ready": False}],
  29. "conditions": [{"status": "False", "type": "Ready"}],
  30. }
  31. }
  32. fluentd_node1 = {
  33. "metadata": {
  34. "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "node1"},
  35. "name": "node1",
  36. },
  37. "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.1"}]},
  38. }
  39. fluentd_node2 = {
  40. "metadata": {
  41. "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "hostname"},
  42. "name": "node2",
  43. },
  44. "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.2"}]},
  45. }
  46. fluentd_node3_unlabeled = {
  47. "metadata": {
  48. "labels": {"kubernetes.io/hostname": "hostname"},
  49. "name": "node3",
  50. },
  51. "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.3"}]},
  52. }
  53. @pytest.mark.parametrize('pods, nodes, expect_error', [
  54. (
  55. [],
  56. [],
  57. 'No nodes appear to be defined',
  58. ),
  59. (
  60. [],
  61. [fluentd_node3_unlabeled],
  62. 'There are no nodes with the fluentd label',
  63. ),
  64. (
  65. [],
  66. [fluentd_node1, fluentd_node3_unlabeled],
  67. 'Fluentd will not aggregate logs from these nodes.',
  68. ),
  69. (
  70. [],
  71. [fluentd_node2],
  72. "nodes are supposed to have a Fluentd pod but do not",
  73. ),
  74. (
  75. [fluentd_pod_node1, fluentd_pod_node1],
  76. [fluentd_node1],
  77. 'more Fluentd pods running than nodes labeled',
  78. ),
  79. (
  80. [fluentd_pod_node2_down],
  81. [fluentd_node2],
  82. "Fluentd pods are supposed to be running",
  83. ),
  84. (
  85. [fluentd_pod_node1],
  86. [fluentd_node1],
  87. None,
  88. ),
  89. ])
  90. def test_get_fluentd_pods(pods, nodes, expect_error):
  91. check = Fluentd()
  92. check.exec_oc = lambda ns, cmd, args: json.dumps(dict(items=nodes))
  93. error = check.check_fluentd(pods)
  94. assert_error(error, expect_error)