logging_index_time_test.py 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. import json
  2. import pytest
  3. from openshift_checks.logging.logging_index_time import LoggingIndexTime, OpenShiftCheckException
  4. SAMPLE_UUID = "unique-test-uuid"
  5. def canned_loggingindextime(exec_oc=None):
  6. """Create a check object with a canned exec_oc method"""
  7. check = LoggingIndexTime() # fails if a module is actually invoked
  8. if exec_oc:
  9. check.exec_oc = exec_oc
  10. return check
  11. plain_running_elasticsearch_pod = {
  12. "metadata": {
  13. "labels": {"component": "es", "deploymentconfig": "logging-es-data-master"},
  14. "name": "logging-es-data-master-1",
  15. },
  16. "status": {
  17. "containerStatuses": [{"ready": True}, {"ready": True}],
  18. "phase": "Running",
  19. }
  20. }
  21. plain_running_kibana_pod = {
  22. "metadata": {
  23. "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
  24. "name": "logging-kibana-1",
  25. },
  26. "status": {
  27. "containerStatuses": [{"ready": True}, {"ready": True}],
  28. "phase": "Running",
  29. }
  30. }
  31. not_running_kibana_pod = {
  32. "metadata": {
  33. "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
  34. "name": "logging-kibana-2",
  35. },
  36. "status": {
  37. "containerStatuses": [{"ready": True}, {"ready": False}],
  38. "conditions": [{"status": "True", "type": "Ready"}],
  39. "phase": "pending",
  40. }
  41. }
  42. @pytest.mark.parametrize('pods, expect_pods', [
  43. (
  44. [not_running_kibana_pod],
  45. [],
  46. ),
  47. (
  48. [plain_running_kibana_pod],
  49. [plain_running_kibana_pod],
  50. ),
  51. (
  52. [],
  53. [],
  54. )
  55. ])
  56. def test_check_running_pods(pods, expect_pods):
  57. check = canned_loggingindextime()
  58. pods = check.running_pods(pods)
  59. assert pods == expect_pods
  60. def test_bad_config_param():
  61. with pytest.raises(OpenShiftCheckException) as error:
  62. LoggingIndexTime(task_vars=dict(openshift_check_logging_index_timeout_seconds="foo")).run()
  63. assert 'InvalidTimeout' == error.value.name
  64. def test_no_running_pods():
  65. check = LoggingIndexTime()
  66. check.get_pods_for_component = lambda *_: [not_running_kibana_pod]
  67. with pytest.raises(OpenShiftCheckException) as error:
  68. check.run()
  69. assert 'kibanaNoRunningPods' == error.value.name
  70. def test_with_running_pods():
  71. check = LoggingIndexTime()
  72. check.get_pods_for_component = lambda *_: [plain_running_kibana_pod, plain_running_elasticsearch_pod]
  73. check.curl_kibana_with_uuid = lambda *_: SAMPLE_UUID
  74. check.wait_until_cmd_or_err = lambda *_: None
  75. assert not check.run().get("failed")
  76. @pytest.mark.parametrize('name, json_response, uuid, timeout', [
  77. (
  78. 'valid count in response',
  79. {
  80. "count": 1,
  81. },
  82. SAMPLE_UUID,
  83. 0.001,
  84. ),
  85. ], ids=lambda argval: argval[0])
  86. def test_wait_until_cmd_or_err_succeeds(name, json_response, uuid, timeout):
  87. check = canned_loggingindextime(lambda *_: json.dumps(json_response))
  88. check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, uuid, timeout)
  89. @pytest.mark.parametrize('name, json_response, timeout, expect_error', [
  90. (
  91. 'invalid json response',
  92. {
  93. "invalid_field": 1,
  94. },
  95. 0.001,
  96. 'esInvalidResponse',
  97. ),
  98. (
  99. 'empty response',
  100. {},
  101. 0.001,
  102. 'esInvalidResponse',
  103. ),
  104. (
  105. 'valid response but invalid match count',
  106. {
  107. "count": 0,
  108. },
  109. 0.005,
  110. 'NoMatchFound',
  111. )
  112. ], ids=lambda argval: argval[0])
  113. def test_wait_until_cmd_or_err(name, json_response, timeout, expect_error):
  114. check = canned_loggingindextime(lambda *_: json.dumps(json_response))
  115. with pytest.raises(OpenShiftCheckException) as error:
  116. check.wait_until_cmd_or_err(plain_running_elasticsearch_pod, SAMPLE_UUID, timeout)
  117. assert expect_error == error.value.name
  118. def test_curl_kibana_with_uuid():
  119. check = canned_loggingindextime(lambda *_: json.dumps({"statusCode": 404}))
  120. check.generate_uuid = lambda: SAMPLE_UUID
  121. assert SAMPLE_UUID == check.curl_kibana_with_uuid(plain_running_kibana_pod)
  122. @pytest.mark.parametrize('name, json_response, expect_error', [
  123. (
  124. 'invalid json response',
  125. {
  126. "invalid_field": "invalid",
  127. },
  128. 'kibanaInvalidResponse',
  129. ),
  130. (
  131. 'wrong error code in response',
  132. {
  133. "statusCode": 500,
  134. },
  135. 'kibanaInvalidReturnCode',
  136. ),
  137. ], ids=lambda argval: argval[0])
  138. def test_failed_curl_kibana_with_uuid(name, json_response, expect_error):
  139. check = canned_loggingindextime(lambda *_: json.dumps(json_response))
  140. check.generate_uuid = lambda: SAMPLE_UUID
  141. with pytest.raises(OpenShiftCheckException) as error:
  142. check.curl_kibana_with_uuid(plain_running_kibana_pod)
  143. assert expect_error == error.value.name