disk_availability.py 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. """Check that there is enough disk space in predefined paths."""
  2. import tempfile
  3. from openshift_checks import OpenShiftCheck, OpenShiftCheckException
  4. class DiskAvailability(OpenShiftCheck):
  5. """Check that recommended disk space is available before a first-time install."""
  6. name = "disk_availability"
  7. tags = ["preflight"]
  8. # Values taken from the official installation documentation:
  9. # https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
  10. recommended_disk_space_bytes = {
  11. '/var': {
  12. 'masters': 40 * 10**9,
  13. 'nodes': 15 * 10**9,
  14. 'etcd': 20 * 10**9,
  15. },
  16. # Used to copy client binaries into,
  17. # see roles/openshift_cli/library/openshift_container_binary_sync.py.
  18. '/usr/local/bin': {
  19. 'masters': 1 * 10**9,
  20. 'nodes': 1 * 10**9,
  21. 'etcd': 1 * 10**9,
  22. },
  23. # Used as temporary storage in several cases.
  24. tempfile.gettempdir(): {
  25. 'masters': 1 * 10**9,
  26. 'nodes': 1 * 10**9,
  27. 'etcd': 1 * 10**9,
  28. },
  29. }
  30. # recommended disk space for each location under an upgrade context
  31. recommended_disk_upgrade_bytes = {
  32. '/var': {
  33. 'masters': 10 * 10**9,
  34. 'nodes': 5 * 10 ** 9,
  35. 'etcd': 5 * 10 ** 9,
  36. },
  37. }
  38. def is_active(self):
  39. """Skip hosts that do not have recommended disk space requirements."""
  40. group_names = self.get_var("group_names", default=[])
  41. active_groups = set()
  42. for recommendation in self.recommended_disk_space_bytes.values():
  43. active_groups.update(recommendation.keys())
  44. has_disk_space_recommendation = bool(active_groups.intersection(group_names))
  45. return super(DiskAvailability, self).is_active() and has_disk_space_recommendation
  46. def run(self):
  47. group_names = self.get_var("group_names")
  48. user_config = self.get_var("openshift_check_min_host_disk_gb", default={})
  49. try:
  50. # For backwards-compatibility, if openshift_check_min_host_disk_gb
  51. # is a number, then it overrides the required config for '/var'.
  52. number = float(user_config)
  53. user_config = {
  54. '/var': {
  55. 'masters': number,
  56. 'nodes': number,
  57. 'etcd': number,
  58. },
  59. }
  60. except TypeError:
  61. # If it is not a number, then it should be a nested dict.
  62. pass
  63. self.register_log("recommended thresholds", self.recommended_disk_space_bytes)
  64. if user_config:
  65. self.register_log("user-configured thresholds", user_config)
  66. # TODO: as suggested in
  67. # https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
  68. # maybe we could support checking disk availability in paths that are
  69. # not part of the official recommendation but present in the user
  70. # configuration.
  71. for path, recommendation in self.recommended_disk_space_bytes.items():
  72. free_bytes = self.free_bytes(path)
  73. recommended_bytes = max(recommendation.get(name, 0) for name in group_names)
  74. config = user_config.get(path, {})
  75. # NOTE: the user config is in GB, but we compare bytes, thus the
  76. # conversion.
  77. config_bytes = max(config.get(name, 0) for name in group_names) * 10**9
  78. recommended_bytes = config_bytes or recommended_bytes
  79. # if an "upgrade" context is set, update the minimum disk requirement
  80. # as this signifies an in-place upgrade - the node might have the
  81. # required total disk space, but some of that space may already be
  82. # in use by the existing OpenShift deployment.
  83. context = self.get_var("r_openshift_health_checker_playbook_context", default="")
  84. if context == "upgrade":
  85. recommended_upgrade_paths = self.recommended_disk_upgrade_bytes.get(path, {})
  86. if recommended_upgrade_paths:
  87. recommended_bytes = config_bytes or max(recommended_upgrade_paths.get(name, 0)
  88. for name in group_names)
  89. if free_bytes < recommended_bytes:
  90. free_gb = float(free_bytes) / 10**9
  91. recommended_gb = float(recommended_bytes) / 10**9
  92. msg = (
  93. 'Available disk space in "{}" ({:.1f} GB) '
  94. 'is below minimum recommended ({:.1f} GB)'
  95. ).format(path, free_gb, recommended_gb)
  96. # warn if check failed under an "upgrade" context
  97. # due to limits imposed by the user config
  98. if config_bytes and context == "upgrade":
  99. msg += ('\n\nMake sure to account for decreased disk space during an upgrade\n'
  100. 'due to an existing OpenShift deployment. Please check the value of\n'
  101. ' openshift_check_min_host_disk_gb={}\n'
  102. 'in your Ansible inventory, and lower the recommended disk space availability\n'
  103. 'if necessary for this upgrade.').format(config_bytes)
  104. self.register_failure(msg)
  105. return {}
  106. def free_bytes(self, path):
  107. """Return the size available in path based on ansible_mounts."""
  108. mount = self.find_ansible_mount(path)
  109. try:
  110. return mount['size_available']
  111. except KeyError:
  112. raise OpenShiftCheckException(
  113. 'Unable to retrieve disk availability for "{path}".\n'
  114. 'Ansible facts included a matching mount point for this path:\n'
  115. ' {mount}\n'
  116. 'however it is missing the size_available field.\n'
  117. 'To investigate, you can inspect the output of `ansible -m setup <host>`'
  118. ''.format(path=path, mount=mount)
  119. )