openshift_logging_facts.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340
  1. '''
  2. ---
  3. module: openshift_logging_facts
  4. version_added: ""
  5. short_description: Gather facts about the OpenShift logging stack
  6. description:
  7. - Determine the current facts about the OpenShift logging stack (e.g. cluster size)
  8. options:
  9. author: Red Hat, Inc
  10. '''
  11. import copy
  12. import json
  13. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  14. from subprocess import * # noqa: F402,F403
  15. # ignore pylint errors related to the module_utils import
  16. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  17. from ansible.module_utils.basic import * # noqa: F402,F403
  18. import yaml
  19. EXAMPLES = """
  20. - action: opneshift_logging_facts
  21. """
  22. RETURN = """
  23. """
  24. DEFAULT_OC_OPTIONS = ["-o", "json"]
  25. # constants used for various labels and selectors
  26. COMPONENT_KEY = "component"
  27. LOGGING_INFRA_KEY = "logging-infra"
  28. # selectors for filtering resources
  29. DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
  30. LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
  31. ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift"
  32. COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
  33. class OCBaseCommand(object):
  34. ''' The base class used to query openshift '''
  35. def __init__(self, binary, kubeconfig, namespace):
  36. ''' the init method of OCBaseCommand class '''
  37. self.binary = binary
  38. self.kubeconfig = kubeconfig
  39. self.user = self.get_system_admin(self.kubeconfig)
  40. self.namespace = namespace
  41. # pylint: disable=no-self-use
  42. def get_system_admin(self, kubeconfig):
  43. ''' Retrieves the system admin '''
  44. with open(kubeconfig, 'r') as kubeconfig_file:
  45. config = yaml.load(kubeconfig_file)
  46. for user in config["users"]:
  47. if user["name"].startswith("system:admin"):
  48. return user["name"]
  49. raise Exception("Unable to find system:admin in: " + kubeconfig)
  50. # pylint: disable=too-many-arguments, dangerous-default-value
  51. def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):
  52. ''' Wrapper method for the "oc" command '''
  53. cmd = [self.binary, sub, kind]
  54. if name is not None:
  55. cmd = cmd + [name]
  56. if namespace is not None:
  57. cmd = cmd + ["-n", namespace]
  58. if add_options is None:
  59. add_options = []
  60. cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options
  61. try:
  62. process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405
  63. out, err = process.communicate(cmd)
  64. if len(err) > 0:
  65. if 'not found' in err:
  66. return {'items': []}
  67. if 'No resources found' in err:
  68. return {'items': []}
  69. raise Exception(err)
  70. except Exception as excp:
  71. err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp)
  72. raise Exception(err)
  73. return json.loads(out)
  74. class OpenshiftLoggingFacts(OCBaseCommand):
  75. ''' The class structure for holding the OpenshiftLogging Facts'''
  76. name = "facts"
  77. def __init__(self, logger, binary, kubeconfig, namespace):
  78. ''' The init method for OpenshiftLoggingFacts '''
  79. super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)
  80. self.logger = logger
  81. self.facts = dict()
  82. def default_keys_for(self, kind):
  83. ''' Sets the default key values for kind '''
  84. for comp in COMPONENTS:
  85. self.add_facts_for(comp, kind)
  86. def add_facts_for(self, comp, kind, name=None, facts=None):
  87. ''' Add facts for the provided kind '''
  88. if comp not in self.facts:
  89. self.facts[comp] = dict()
  90. if kind not in self.facts[comp]:
  91. self.facts[comp][kind] = dict()
  92. if name:
  93. self.facts[comp][kind][name] = facts
  94. def facts_for_routes(self, namespace):
  95. ''' Gathers facts for Routes in logging namespace '''
  96. self.default_keys_for("routes")
  97. route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR])
  98. if len(route_list["items"]) == 0:
  99. return None
  100. for route in route_list["items"]:
  101. name = route["metadata"]["name"]
  102. comp = self.comp(name)
  103. if comp is not None:
  104. self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"]))
  105. self.facts["agl_namespace"] = namespace
  106. def facts_for_daemonsets(self, namespace):
  107. ''' Gathers facts for Daemonsets in logging namespace '''
  108. self.default_keys_for("daemonsets")
  109. ds_list = self.oc_command("get", "daemonsets", namespace=namespace,
  110. add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"])
  111. if len(ds_list["items"]) == 0:
  112. return
  113. for ds_item in ds_list["items"]:
  114. name = ds_item["metadata"]["name"]
  115. comp = self.comp(name)
  116. spec = ds_item["spec"]["template"]["spec"]
  117. container = spec["containers"][0]
  118. result = dict(
  119. selector=ds_item["spec"]["selector"],
  120. image=container["image"],
  121. resources=container["resources"],
  122. nodeSelector=spec["nodeSelector"],
  123. serviceAccount=spec["serviceAccount"],
  124. terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"]
  125. )
  126. self.add_facts_for(comp, "daemonsets", name, result)
  127. def facts_for_pvcs(self, namespace):
  128. ''' Gathers facts for PVCS in logging namespace'''
  129. self.default_keys_for("pvcs")
  130. pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
  131. if len(pvclist["items"]) == 0:
  132. return
  133. for pvc in pvclist["items"]:
  134. name = pvc["metadata"]["name"]
  135. comp = self.comp(name)
  136. self.add_facts_for(comp, "pvcs", name, dict())
  137. def facts_for_deploymentconfigs(self, namespace):
  138. ''' Gathers facts for DeploymentConfigs in logging namespace '''
  139. self.default_keys_for("deploymentconfigs")
  140. dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
  141. if len(dclist["items"]) == 0:
  142. return
  143. dcs = dclist["items"]
  144. for dc_item in dcs:
  145. name = dc_item["metadata"]["name"]
  146. comp = self.comp(name)
  147. if comp is not None:
  148. spec = dc_item["spec"]["template"]["spec"]
  149. facts = dict(
  150. selector=dc_item["spec"]["selector"],
  151. replicas=dc_item["spec"]["replicas"],
  152. serviceAccount=spec["serviceAccount"],
  153. containers=dict(),
  154. volumes=dict()
  155. )
  156. if "volumes" in spec:
  157. for vol in spec["volumes"]:
  158. clone = copy.deepcopy(vol)
  159. clone.pop("name", None)
  160. facts["volumes"][vol["name"]] = clone
  161. for container in spec["containers"]:
  162. facts["containers"][container["name"]] = dict(
  163. image=container["image"],
  164. resources=container["resources"],
  165. )
  166. self.add_facts_for(comp, "deploymentconfigs", name, facts)
  167. def facts_for_services(self, namespace):
  168. ''' Gathers facts for services in logging namespace '''
  169. self.default_keys_for("services")
  170. servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
  171. if len(servicelist["items"]) == 0:
  172. return
  173. for service in servicelist["items"]:
  174. name = service["metadata"]["name"]
  175. comp = self.comp(name)
  176. if comp is not None:
  177. self.add_facts_for(comp, "services", name, dict())
  178. def facts_for_configmaps(self, namespace):
  179. ''' Gathers facts for configmaps in logging namespace '''
  180. self.default_keys_for("configmaps")
  181. a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
  182. if len(a_list["items"]) == 0:
  183. return
  184. for item in a_list["items"]:
  185. name = item["metadata"]["name"]
  186. comp = self.comp(name)
  187. if comp is not None:
  188. self.add_facts_for(comp, "configmaps", name, item["data"])
  189. def facts_for_oauthclients(self, namespace):
  190. ''' Gathers facts for oauthclients used with logging '''
  191. self.default_keys_for("oauthclients")
  192. a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
  193. if len(a_list["items"]) == 0:
  194. return
  195. for item in a_list["items"]:
  196. name = item["metadata"]["name"]
  197. comp = self.comp(name)
  198. if comp is not None:
  199. result = dict(
  200. redirectURIs=item["redirectURIs"]
  201. )
  202. self.add_facts_for(comp, "oauthclients", name, result)
  203. def facts_for_secrets(self, namespace):
  204. ''' Gathers facts for secrets in the logging namespace '''
  205. self.default_keys_for("secrets")
  206. a_list = self.oc_command("get", "secrets", namespace=namespace)
  207. if len(a_list["items"]) == 0:
  208. return
  209. for item in a_list["items"]:
  210. name = item["metadata"]["name"]
  211. comp = self.comp(name)
  212. if comp is not None and item["type"] == "Opaque":
  213. result = dict(
  214. keys=item["data"].keys()
  215. )
  216. self.add_facts_for(comp, "secrets", name, result)
  217. def facts_for_sccs(self):
  218. ''' Gathers facts for SCCs used with logging '''
  219. self.default_keys_for("sccs")
  220. scc = self.oc_command("get", "scc", name="privileged")
  221. if len(scc["users"]) == 0:
  222. return
  223. for item in scc["users"]:
  224. comp = self.comp(item)
  225. if comp is not None:
  226. self.add_facts_for(comp, "sccs", "privileged", dict())
  227. def facts_for_clusterrolebindings(self, namespace):
  228. ''' Gathers ClusterRoleBindings used with logging '''
  229. self.default_keys_for("clusterrolebindings")
  230. role = self.oc_command("get", "clusterrolebindings", name="cluster-readers")
  231. if "subjects" not in role or len(role["subjects"]) == 0:
  232. return
  233. for item in role["subjects"]:
  234. comp = self.comp(item["name"])
  235. if comp is not None and namespace == item["namespace"]:
  236. self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
  237. # this needs to end up nested under the service account...
  238. def facts_for_rolebindings(self, namespace):
  239. ''' Gathers facts for RoleBindings used with logging '''
  240. self.default_keys_for("rolebindings")
  241. role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")
  242. if "subjects" not in role or len(role["subjects"]) == 0:
  243. return
  244. for item in role["subjects"]:
  245. comp = self.comp(item["name"])
  246. if comp is not None and namespace == item["namespace"]:
  247. self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
  248. # pylint: disable=no-self-use, too-many-return-statements
  249. def comp(self, name):
  250. ''' Does a comparison to evaluate the logging component '''
  251. if name.startswith("logging-curator-ops"):
  252. return "curator_ops"
  253. elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"):
  254. return "kibana_ops"
  255. elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"):
  256. return "elasticsearch_ops"
  257. elif name.startswith("logging-curator"):
  258. return "curator"
  259. elif name.startswith("logging-kibana") or name.startswith("kibana"):
  260. return "kibana"
  261. elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"):
  262. return "elasticsearch"
  263. elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"):
  264. return "fluentd"
  265. else:
  266. return None
  267. def build_facts(self):
  268. ''' Builds the logging facts and returns them '''
  269. self.facts_for_routes(self.namespace)
  270. self.facts_for_daemonsets(self.namespace)
  271. self.facts_for_deploymentconfigs(self.namespace)
  272. self.facts_for_services(self.namespace)
  273. self.facts_for_configmaps(self.namespace)
  274. self.facts_for_sccs()
  275. self.facts_for_oauthclients(self.namespace)
  276. self.facts_for_clusterrolebindings(self.namespace)
  277. self.facts_for_rolebindings(self.namespace)
  278. self.facts_for_secrets(self.namespace)
  279. self.facts_for_pvcs(self.namespace)
  280. return self.facts
  281. def main():
  282. ''' The main method '''
  283. module = AnsibleModule( # noqa: F405
  284. argument_spec=dict(
  285. admin_kubeconfig={"required": True, "type": "str"},
  286. oc_bin={"required": True, "type": "str"},
  287. openshift_logging_namespace={"required": True, "type": "str"}
  288. ),
  289. supports_check_mode=False
  290. )
  291. try:
  292. cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],
  293. module.params['openshift_logging_namespace'])
  294. module.exit_json(
  295. ansible_facts={"openshift_logging_facts": cmd.build_facts()}
  296. )
  297. # ignore broad-except error to avoid stack trace to ansible user
  298. # pylint: disable=broad-except
  299. except Exception as error:
  300. module.fail_json(msg=str(error))
  301. if __name__ == '__main__':
  302. main()