docker_storage_test.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305
  1. import pytest
  2. from openshift_checks import OpenShiftCheckException
  3. from openshift_checks.docker_storage import DockerStorage
  4. @pytest.mark.parametrize('openshift_is_atomic, group_names, is_active', [
  5. (False, ["oo_masters_to_config", "oo_etcd_to_config"], False),
  6. (False, ["oo_masters_to_config", "oo_nodes_to_config"], True),
  7. (True, ["oo_etcd_to_config"], False),
  8. ])
  9. def test_is_active(openshift_is_atomic, group_names, is_active):
  10. task_vars = dict(
  11. openshift_is_atomic=openshift_is_atomic,
  12. group_names=group_names,
  13. )
  14. assert DockerStorage(None, task_vars).is_active() == is_active
  15. def non_atomic_task_vars():
  16. return {"openshift_is_atomic": False}
  17. @pytest.mark.parametrize('docker_info, failed, expect_msg', [
  18. (
  19. dict(failed=True, msg="Error connecting: Error while fetching server API version"),
  20. True,
  21. ["Is docker running on this host?"],
  22. ),
  23. (
  24. dict(msg="I have no info"),
  25. True,
  26. ["missing info"],
  27. ),
  28. (
  29. dict(info={
  30. "Driver": "devicemapper",
  31. "DriverStatus": [("Pool Name", "docker-docker--pool")],
  32. }),
  33. False,
  34. [],
  35. ),
  36. (
  37. dict(info={
  38. "Driver": "devicemapper",
  39. "DriverStatus": [("Data loop file", "true")],
  40. }),
  41. True,
  42. ["loopback devices with the Docker devicemapper storage driver"],
  43. ),
  44. (
  45. dict(info={
  46. "Driver": "overlay2",
  47. "DriverStatus": [("Backing Filesystem", "xfs")],
  48. }),
  49. False,
  50. [],
  51. ),
  52. (
  53. dict(info={
  54. "Driver": "overlay",
  55. "DriverStatus": [("Backing Filesystem", "btrfs")],
  56. }),
  57. True,
  58. ["storage is type 'btrfs'", "only supported with\n'xfs'"],
  59. ),
  60. (
  61. dict(info={
  62. "Driver": "overlay2",
  63. "DriverStatus": [("Backing Filesystem", "xfs")],
  64. "OperatingSystem": "Red Hat Enterprise Linux Server release 7.2 (Maipo)",
  65. "KernelVersion": "3.10.0-327.22.2.el7.x86_64",
  66. }),
  67. True,
  68. ["Docker reports kernel version 3.10.0-327"],
  69. ),
  70. (
  71. dict(info={
  72. "Driver": "overlay",
  73. "DriverStatus": [("Backing Filesystem", "xfs")],
  74. "OperatingSystem": "CentOS",
  75. "KernelVersion": "3.10.0-514",
  76. }),
  77. False,
  78. [],
  79. ),
  80. (
  81. dict(info={
  82. "Driver": "unsupported",
  83. }),
  84. True,
  85. ["unsupported Docker storage driver"],
  86. ),
  87. ])
  88. def test_check_storage_driver(docker_info, failed, expect_msg):
  89. def execute_module(module_name, *_):
  90. if module_name == "yum":
  91. return {}
  92. if module_name != "docker_info":
  93. raise ValueError("not expecting module " + module_name)
  94. return docker_info
  95. check = DockerStorage(execute_module, non_atomic_task_vars())
  96. check.check_dm_usage = lambda status: dict() # stub out for this test
  97. check.check_overlay_usage = lambda info: dict() # stub out for this test
  98. result = check.run()
  99. if failed:
  100. assert result["failed"]
  101. else:
  102. assert not result.get("failed", False)
  103. for word in expect_msg:
  104. assert word in result["msg"]
  105. enough_space = {
  106. "Pool Name": "docker--vg-docker--pool",
  107. "Data Space Used": "19.92 MB",
  108. "Data Space Total": "8.535 GB",
  109. "Metadata Space Used": "40.96 kB",
  110. "Metadata Space Total": "25.17 MB",
  111. }
  112. not_enough_space = {
  113. "Pool Name": "docker--vg-docker--pool",
  114. "Data Space Used": "10 GB",
  115. "Data Space Total": "10 GB",
  116. "Metadata Space Used": "42 kB",
  117. "Metadata Space Total": "43 kB",
  118. }
  119. @pytest.mark.parametrize('task_vars, driver_status, vg_free, success, expect_msg', [
  120. (
  121. {"max_thinpool_data_usage_percent": "not a float"},
  122. enough_space,
  123. "12g",
  124. False,
  125. ["is not a percentage"],
  126. ),
  127. (
  128. {},
  129. {}, # empty values from driver status
  130. "bogus", # also does not parse as bytes
  131. False,
  132. ["Could not interpret", "as bytes"],
  133. ),
  134. (
  135. {},
  136. enough_space,
  137. "12.00g",
  138. True,
  139. [],
  140. ),
  141. (
  142. {},
  143. not_enough_space,
  144. "0.00",
  145. False,
  146. ["data usage", "metadata usage", "higher than threshold"],
  147. ),
  148. ])
  149. def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
  150. check = DockerStorage(None, task_vars)
  151. check.get_vg_free = lambda pool: vg_free
  152. result = check.check_dm_usage(driver_status)
  153. result_success = not result.get("failed")
  154. assert result_success is success
  155. for msg in expect_msg:
  156. assert msg in result["msg"]
  157. @pytest.mark.parametrize('pool, command_returns, raises, returns', [
  158. (
  159. "foo-bar",
  160. { # vgs missing
  161. "msg": "[Errno 2] No such file or directory",
  162. "failed": True,
  163. "cmd": "/sbin/vgs",
  164. "rc": 2,
  165. },
  166. "Failed to run /sbin/vgs",
  167. None,
  168. ),
  169. (
  170. "foo", # no hyphen in name - should not happen
  171. {},
  172. "name does not have the expected format",
  173. None,
  174. ),
  175. (
  176. "foo-bar",
  177. dict(stdout=" 4.00g\n"),
  178. None,
  179. "4.00g",
  180. ),
  181. (
  182. "foo-bar",
  183. dict(stdout="\n"), # no matching VG
  184. "vgs did not find this VG",
  185. None,
  186. )
  187. ])
  188. def test_vg_free(pool, command_returns, raises, returns):
  189. def execute_module(module_name, *_):
  190. if module_name != "command":
  191. raise ValueError("not expecting module " + module_name)
  192. return command_returns
  193. check = DockerStorage(execute_module)
  194. if raises:
  195. with pytest.raises(OpenShiftCheckException) as err:
  196. check.get_vg_free(pool)
  197. assert raises in str(err.value)
  198. else:
  199. ret = check.get_vg_free(pool)
  200. assert ret == returns
  201. @pytest.mark.parametrize('string, expect_bytes', [
  202. ("12", 12.0),
  203. ("12 k", 12.0 * 1024),
  204. ("42.42 MB", 42.42 * 1024**2),
  205. ("12g", 12.0 * 1024**3),
  206. ])
  207. def test_convert_to_bytes(string, expect_bytes):
  208. got = DockerStorage.convert_to_bytes(string)
  209. assert got == expect_bytes
  210. @pytest.mark.parametrize('string', [
  211. "bork",
  212. "42 Qs",
  213. ])
  214. def test_convert_to_bytes_error(string):
  215. with pytest.raises(ValueError) as err:
  216. DockerStorage.convert_to_bytes(string)
  217. assert "Cannot convert" in str(err.value)
  218. assert string in str(err.value)
  219. ansible_mounts_enough = [{
  220. 'mount': '/var/lib/docker',
  221. 'size_available': 50 * 10**9,
  222. 'size_total': 50 * 10**9,
  223. }]
  224. ansible_mounts_not_enough = [{
  225. 'mount': '/var/lib/docker',
  226. 'size_available': 0,
  227. 'size_total': 50 * 10**9,
  228. }]
  229. ansible_mounts_missing_fields = [dict(mount='/var/lib/docker')]
  230. ansible_mounts_zero_size = [{
  231. 'mount': '/var/lib/docker',
  232. 'size_available': 0,
  233. 'size_total': 0,
  234. }]
  235. @pytest.mark.parametrize('ansible_mounts, threshold, expect_fail, expect_msg', [
  236. (
  237. ansible_mounts_enough,
  238. None,
  239. False,
  240. [],
  241. ),
  242. (
  243. ansible_mounts_not_enough,
  244. None,
  245. True,
  246. ["usage percentage", "higher than threshold"],
  247. ),
  248. (
  249. ansible_mounts_not_enough,
  250. "bogus percent",
  251. True,
  252. ["is not a percentage"],
  253. ),
  254. (
  255. ansible_mounts_missing_fields,
  256. None,
  257. True,
  258. ["Ansible bug"],
  259. ),
  260. (
  261. ansible_mounts_zero_size,
  262. None,
  263. True,
  264. ["Ansible bug"],
  265. ),
  266. ])
  267. def test_overlay_usage(ansible_mounts, threshold, expect_fail, expect_msg):
  268. task_vars = non_atomic_task_vars()
  269. task_vars["ansible_mounts"] = ansible_mounts
  270. if threshold is not None:
  271. task_vars["max_overlay_usage_percent"] = threshold
  272. check = DockerStorage(None, task_vars)
  273. docker_info = dict(DockerRootDir="/var/lib/docker", Driver="overlay")
  274. result = check.check_overlay_usage(docker_info)
  275. assert expect_fail == bool(result.get("failed"))
  276. for msg in expect_msg:
  277. assert msg in result["msg"]