elasticsearch_test.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. import pytest
  2. import json
  3. from openshift_checks.logging.elasticsearch import Elasticsearch
  4. task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
  5. def canned_elasticsearch(exec_oc=None):
  6. """Create an Elasticsearch check object with canned exec_oc method"""
  7. check = Elasticsearch("dummy") # fails if a module is actually invoked
  8. if exec_oc:
  9. check._exec_oc = exec_oc
  10. return check
  11. def assert_error(error, expect_error):
  12. if expect_error:
  13. assert error
  14. assert expect_error in error
  15. else:
  16. assert not error
  17. plain_es_pod = {
  18. "metadata": {
  19. "labels": {"component": "es", "deploymentconfig": "logging-es"},
  20. "name": "logging-es",
  21. },
  22. "status": {
  23. "conditions": [{"status": "True", "type": "Ready"}],
  24. "containerStatuses": [{"ready": True}],
  25. "podIP": "10.10.10.10",
  26. },
  27. "_test_master_name_str": "name logging-es",
  28. }
  29. split_es_pod = {
  30. "metadata": {
  31. "labels": {"component": "es", "deploymentconfig": "logging-es-2"},
  32. "name": "logging-es-2",
  33. },
  34. "status": {
  35. "conditions": [{"status": "True", "type": "Ready"}],
  36. "containerStatuses": [{"ready": True}],
  37. "podIP": "10.10.10.10",
  38. },
  39. "_test_master_name_str": "name logging-es-2",
  40. }
  41. def test_check_elasticsearch():
  42. assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([], {})
  43. # canned oc responses to match so all the checks pass
  44. def _exec_oc(cmd, args, task_vars):
  45. if '_cat/master' in cmd:
  46. return 'name logging-es'
  47. elif '/_nodes' in cmd:
  48. return json.dumps(es_node_list)
  49. elif '_cluster/health' in cmd:
  50. return '{"status": "green"}'
  51. elif ' df ' in cmd:
  52. return 'IUse% Use%\n 3% 4%\n'
  53. else:
  54. raise Exception(cmd)
  55. assert not canned_elasticsearch(_exec_oc).check_elasticsearch([plain_es_pod], {})
  56. def pods_by_name(pods):
  57. return {pod['metadata']['name']: pod for pod in pods}
  58. @pytest.mark.parametrize('pods, expect_error', [
  59. (
  60. [],
  61. 'No logging Elasticsearch masters',
  62. ),
  63. (
  64. [plain_es_pod],
  65. None,
  66. ),
  67. (
  68. [plain_es_pod, split_es_pod],
  69. 'Found multiple Elasticsearch masters',
  70. ),
  71. ])
  72. def test_check_elasticsearch_masters(pods, expect_error):
  73. test_pods = list(pods)
  74. check = canned_elasticsearch(lambda cmd, args, task_vars: test_pods.pop(0)['_test_master_name_str'])
  75. errors = check._check_elasticsearch_masters(pods_by_name(pods), task_vars_config_base)
  76. assert_error(''.join(errors), expect_error)
  77. es_node_list = {
  78. 'nodes': {
  79. 'random-es-name': {
  80. 'host': 'logging-es',
  81. }}}
  82. @pytest.mark.parametrize('pods, node_list, expect_error', [
  83. (
  84. [],
  85. {},
  86. 'No logging Elasticsearch masters',
  87. ),
  88. (
  89. [plain_es_pod],
  90. es_node_list,
  91. None,
  92. ),
  93. (
  94. [plain_es_pod],
  95. {}, # empty list of nodes triggers KeyError
  96. "Failed to query",
  97. ),
  98. (
  99. [split_es_pod],
  100. es_node_list,
  101. 'does not correspond to any known ES pod',
  102. ),
  103. ])
  104. def test_check_elasticsearch_node_list(pods, node_list, expect_error):
  105. check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(node_list))
  106. errors = check._check_elasticsearch_node_list(pods_by_name(pods), task_vars_config_base)
  107. assert_error(''.join(errors), expect_error)
  108. @pytest.mark.parametrize('pods, health_data, expect_error', [
  109. (
  110. [plain_es_pod],
  111. [{"status": "green"}],
  112. None,
  113. ),
  114. (
  115. [plain_es_pod],
  116. [{"no-status": "should bomb"}],
  117. 'Could not retrieve cluster health status',
  118. ),
  119. (
  120. [plain_es_pod, split_es_pod],
  121. [{"status": "green"}, {"status": "red"}],
  122. 'Elasticsearch cluster health status is RED',
  123. ),
  124. ])
  125. def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
  126. test_health_data = list(health_data)
  127. check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(test_health_data.pop(0)))
  128. errors = check._check_es_cluster_health(pods_by_name(pods), task_vars_config_base)
  129. assert_error(''.join(errors), expect_error)
  130. @pytest.mark.parametrize('disk_data, expect_error', [
  131. (
  132. 'df: /elasticsearch/persistent: No such file or directory\n',
  133. 'Could not retrieve storage usage',
  134. ),
  135. (
  136. 'IUse% Use%\n 3% 4%\n',
  137. None,
  138. ),
  139. (
  140. 'IUse% Use%\n 95% 40%\n',
  141. 'Inode percent usage on the storage volume',
  142. ),
  143. (
  144. 'IUse% Use%\n 3% 94%\n',
  145. 'Disk percent usage on the storage volume',
  146. ),
  147. ])
  148. def test_check_elasticsearch_diskspace(disk_data, expect_error):
  149. check = canned_elasticsearch(lambda cmd, args, task_vars: disk_data)
  150. errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]), task_vars_config_base)
  151. assert_error(''.join(errors), expect_error)