disk_availability_test.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248
  1. import pytest
  2. from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckException
  3. @pytest.mark.parametrize('group_names,is_active', [
  4. (['masters'], True),
  5. (['nodes'], True),
  6. (['etcd'], True),
  7. (['masters', 'nodes'], True),
  8. (['masters', 'etcd'], True),
  9. ([], False),
  10. (['lb'], False),
  11. (['nfs'], False),
  12. ])
  13. def test_is_active(group_names, is_active):
  14. task_vars = dict(
  15. group_names=group_names,
  16. )
  17. assert DiskAvailability(None, task_vars).is_active() == is_active
  18. @pytest.mark.parametrize('desc, ansible_mounts, expect_chunks', [
  19. (
  20. 'empty ansible_mounts',
  21. [],
  22. ['determine mount point', 'none'],
  23. ),
  24. (
  25. 'missing relevant mount paths',
  26. [{'mount': '/mnt'}],
  27. ['determine mount point', '/mnt'],
  28. ),
  29. (
  30. 'missing size_available',
  31. [{'mount': '/var'}, {'mount': '/usr'}, {'mount': '/tmp'}],
  32. ['missing', 'size_available'],
  33. ),
  34. ])
  35. def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
  36. task_vars = dict(
  37. group_names=['masters'],
  38. ansible_mounts=ansible_mounts,
  39. )
  40. with pytest.raises(OpenShiftCheckException) as excinfo:
  41. DiskAvailability(fake_execute_module, task_vars).run()
  42. for chunk in expect_chunks:
  43. assert chunk in str(excinfo.value)
  44. @pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [
  45. (
  46. ['masters'],
  47. 0,
  48. [{
  49. 'mount': '/',
  50. 'size_available': 40 * 10**9 + 1,
  51. }],
  52. ),
  53. (
  54. ['nodes'],
  55. 0,
  56. [{
  57. 'mount': '/',
  58. 'size_available': 15 * 10**9 + 1,
  59. }],
  60. ),
  61. (
  62. ['etcd'],
  63. 0,
  64. [{
  65. 'mount': '/',
  66. 'size_available': 20 * 10**9 + 1,
  67. }],
  68. ),
  69. (
  70. ['etcd'],
  71. 1, # configure lower threshold
  72. [{
  73. 'mount': '/',
  74. 'size_available': 1 * 10**9 + 1, # way smaller than recommended
  75. }],
  76. ),
  77. (
  78. ['etcd'],
  79. 0,
  80. [{
  81. # not enough space on / ...
  82. 'mount': '/',
  83. 'size_available': 2 * 10**9,
  84. }, {
  85. # ... but enough on /var
  86. 'mount': '/var',
  87. 'size_available': 20 * 10**9 + 1,
  88. }],
  89. ),
  90. ])
  91. def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):
  92. task_vars = dict(
  93. group_names=group_names,
  94. openshift_check_min_host_disk_gb=configured_min,
  95. ansible_mounts=ansible_mounts,
  96. )
  97. result = DiskAvailability(fake_execute_module, task_vars).run()
  98. assert not result.get('failed', False)
  99. @pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [
  100. (
  101. 'test with no space available',
  102. ['masters'],
  103. 0,
  104. [{
  105. 'mount': '/',
  106. 'size_available': 1,
  107. }],
  108. ['0.0 GB'],
  109. ),
  110. (
  111. 'test with a higher configured required value',
  112. ['masters'],
  113. 100, # set a higher threshold
  114. [{
  115. 'mount': '/',
  116. 'size_available': 50 * 10**9, # would normally be enough...
  117. }],
  118. ['100.0 GB'],
  119. ),
  120. (
  121. 'test with 1GB available, but "0" GB space requirement',
  122. ['nodes'],
  123. 0,
  124. [{
  125. 'mount': '/',
  126. 'size_available': 1 * 10**9,
  127. }],
  128. ['1.0 GB'],
  129. ),
  130. (
  131. 'test with no space available, but "0" GB space requirement',
  132. ['etcd'],
  133. 0,
  134. [{
  135. 'mount': '/',
  136. 'size_available': 1,
  137. }],
  138. ['0.0 GB'],
  139. ),
  140. (
  141. 'test with enough space for a node, but not for a master',
  142. ['nodes', 'masters'],
  143. 0,
  144. [{
  145. 'mount': '/',
  146. 'size_available': 15 * 10**9 + 1,
  147. }],
  148. ['15.0 GB'],
  149. ),
  150. (
  151. 'test failure with enough space on "/", but not enough on "/var"',
  152. ['etcd'],
  153. 0,
  154. [{
  155. # enough space on / ...
  156. 'mount': '/',
  157. 'size_available': 20 * 10**9 + 1,
  158. }, {
  159. # .. but not enough on /var
  160. 'mount': '/var',
  161. 'size_available': 0,
  162. }],
  163. ['0.0 GB'],
  164. ),
  165. ], ids=lambda argval: argval[0])
  166. def test_fails_with_insufficient_disk_space(name, group_names, configured_min, ansible_mounts, expect_chunks):
  167. task_vars = dict(
  168. group_names=group_names,
  169. openshift_check_min_host_disk_gb=configured_min,
  170. ansible_mounts=ansible_mounts,
  171. )
  172. result = DiskAvailability(fake_execute_module, task_vars).run()
  173. assert result['failed']
  174. for chunk in 'below recommended'.split() + expect_chunks:
  175. assert chunk in result.get('msg', '')
  176. @pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
  177. (
  178. 'test without enough space for master under "upgrade" context',
  179. ['nodes', 'masters'],
  180. "upgrade",
  181. [{
  182. 'mount': '/',
  183. 'size_available': 1 * 10**9 + 1,
  184. 'size_total': 21 * 10**9 + 1,
  185. }],
  186. True,
  187. ["1.0 GB"],
  188. ),
  189. (
  190. 'test with enough space for master under "upgrade" context',
  191. ['nodes', 'masters'],
  192. "upgrade",
  193. [{
  194. 'mount': '/',
  195. 'size_available': 10 * 10**9 + 1,
  196. 'size_total': 21 * 10**9 + 1,
  197. }],
  198. False,
  199. [],
  200. ),
  201. (
  202. 'test with not enough space for master, and non-upgrade context',
  203. ['nodes', 'masters'],
  204. "health",
  205. [{
  206. 'mount': '/',
  207. # not enough space for a master,
  208. # "health" context should not lower requirement
  209. 'size_available': 20 * 10**9 + 1,
  210. }],
  211. True,
  212. ["20.0 GB", "below minimum"],
  213. ),
  214. ], ids=lambda argval: argval[0])
  215. def test_min_required_space_changes_with_upgrade_context(name, group_names, context, ansible_mounts, failed, extra_words):
  216. task_vars = dict(
  217. r_openshift_health_checker_playbook_context=context,
  218. group_names=group_names,
  219. ansible_mounts=ansible_mounts,
  220. )
  221. check = DiskAvailability(fake_execute_module, task_vars)
  222. result = check.run()
  223. assert result.get("failed", False) == failed
  224. for word in extra_words:
  225. assert word in result.get('msg', '')
  226. def fake_execute_module(*args):
  227. raise AssertionError('this function should not be called')