openshift_facts.py 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  5. # Status: Permanently disabled to keep this module as self-contained as possible.
  6. """Ansible module for retrieving and setting openshift related facts"""
  7. # pylint: disable=no-name-in-module, import-error, wrong-import-order
  8. import copy
  9. import errno
  10. import json
  11. import re
  12. import os
  13. import yaml
  14. import struct
  15. import socket
  16. import ipaddress
  17. from distutils.util import strtobool
  18. from distutils.version import LooseVersion
  19. from ansible.module_utils.six import text_type
  20. from ansible.module_utils.six import string_types
  21. from ansible.module_utils.six.moves import configparser
  22. # ignore pylint errors related to the module_utils import
  23. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  24. # import module snippets
  25. from ansible.module_utils.basic import * # noqa: F403
  26. from ansible.module_utils.facts import * # noqa: F403
  27. from ansible.module_utils.urls import * # noqa: F403
  28. from ansible.module_utils.six import iteritems, itervalues
  29. from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
  30. from ansible.module_utils._text import to_native
  31. HAVE_DBUS = False
  32. try:
  33. from dbus import SystemBus, Interface
  34. from dbus.exceptions import DBusException
  35. HAVE_DBUS = True
  36. except ImportError:
  37. pass
  38. DOCUMENTATION = '''
  39. ---
  40. module: openshift_facts
  41. short_description: Cluster Facts
  42. author: Jason DeTiberus
  43. requirements: [ ]
  44. '''
  45. EXAMPLES = '''
  46. '''
  47. # TODO: We should add a generic migration function that takes source and destination
  48. # paths and does the right thing rather than one function for common, one for node, etc.
  49. def migrate_common_facts(facts):
  50. """ Migrate facts from various roles into common """
  51. params = {
  52. 'node': ('portal_net'),
  53. 'master': ('portal_net')
  54. }
  55. if 'common' not in facts:
  56. facts['common'] = {}
  57. # pylint: disable=consider-iterating-dictionary
  58. for role in params.keys():
  59. if role in facts:
  60. for param in params[role]:
  61. if param in facts[role]:
  62. facts['common'][param] = facts[role].pop(param)
  63. return facts
  64. def migrate_admission_plugin_facts(facts):
  65. """ Apply migrations for admission plugin facts """
  66. if 'master' in facts:
  67. if 'kube_admission_plugin_config' in facts['master']:
  68. if 'admission_plugin_config' not in facts['master']:
  69. facts['master']['admission_plugin_config'] = dict()
  70. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  71. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  72. facts['master']['kube_admission_plugin_config'],
  73. additive_facts_to_overwrite=[])
  74. # Remove kube_admission_plugin_config fact
  75. facts['master'].pop('kube_admission_plugin_config', None)
  76. return facts
  77. def migrate_local_facts(facts):
  78. """ Apply migrations of local facts """
  79. migrated_facts = copy.deepcopy(facts)
  80. migrated_facts = migrate_common_facts(migrated_facts)
  81. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  82. return migrated_facts
  83. def first_ip(network):
  84. """ Return the first IPv4 address in network
  85. Args:
  86. network (str): network in CIDR format
  87. Returns:
  88. str: first IPv4 address
  89. """
  90. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
  91. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
  92. (address, netmask) = network.split('/')
  93. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  94. return itoa((atoi(address) & netmask_i) + 1)
  95. def hostname_valid(hostname):
  96. """ Test if specified hostname should be considered valid
  97. Args:
  98. hostname (str): hostname to test
  99. Returns:
  100. bool: True if valid, otherwise False
  101. """
  102. if (not hostname or
  103. hostname.startswith('localhost') or
  104. hostname.endswith('localdomain') or
  105. # OpenShift will not allow a node with more than 63 chars in name.
  106. len(hostname) > 63):
  107. return False
  108. return True
  109. def choose_hostname(hostnames=None, fallback=''):
  110. """ Choose a hostname from the provided hostnames
  111. Given a list of hostnames and a fallback value, choose a hostname to
  112. use. This function will prefer fqdns if they exist (excluding any that
  113. begin with localhost or end with localdomain) over ip addresses.
  114. Args:
  115. hostnames (list): list of hostnames
  116. fallback (str): default value to set if hostnames does not contain
  117. a valid hostname
  118. Returns:
  119. str: chosen hostname
  120. """
  121. hostname = fallback
  122. if hostnames is None:
  123. return hostname
  124. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  125. ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
  126. hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
  127. for host_list in (hosts, ips):
  128. for host in host_list:
  129. if hostname_valid(host):
  130. return host
  131. return hostname
  132. def query_metadata(metadata_url, headers=None, expect_json=False):
  133. """ Return metadata from the provided metadata_url
  134. Args:
  135. metadata_url (str): metadata url
  136. headers (dict): headers to set for metadata request
  137. expect_json (bool): does the metadata_url return json
  138. Returns:
  139. dict or list: metadata request result
  140. """
  141. result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
  142. if info['status'] != 200:
  143. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  144. if expect_json:
  145. return module.from_json(to_native(result.read())) # noqa: F405
  146. else:
  147. return [to_native(line.strip()) for line in result.readlines()]
  148. def walk_metadata(metadata_url, headers=None, expect_json=False):
  149. """ Walk the metadata tree and return a dictionary of the entire tree
  150. Args:
  151. metadata_url (str): metadata url
  152. headers (dict): headers to set for metadata request
  153. expect_json (bool): does the metadata_url return json
  154. Returns:
  155. dict: the result of walking the metadata tree
  156. """
  157. metadata = dict()
  158. for line in query_metadata(metadata_url, headers, expect_json):
  159. if line.endswith('/') and not line == 'public-keys/':
  160. key = line[:-1]
  161. metadata[key] = walk_metadata(metadata_url + line,
  162. headers, expect_json)
  163. else:
  164. results = query_metadata(metadata_url + line, headers,
  165. expect_json)
  166. if len(results) == 1:
  167. # disable pylint maybe-no-member because overloaded use of
  168. # the module name causes pylint to not detect that results
  169. # is an array or hash
  170. # pylint: disable=maybe-no-member
  171. metadata[line] = results.pop()
  172. else:
  173. metadata[line] = results
  174. return metadata
  175. def get_provider_metadata(metadata_url, supports_recursive=False,
  176. headers=None, expect_json=False):
  177. """ Retrieve the provider metadata
  178. Args:
  179. metadata_url (str): metadata url
  180. supports_recursive (bool): does the provider metadata api support
  181. recursion
  182. headers (dict): headers to set for metadata request
  183. expect_json (bool): does the metadata_url return json
  184. Returns:
  185. dict: the provider metadata
  186. """
  187. try:
  188. if supports_recursive:
  189. metadata = query_metadata(metadata_url, headers,
  190. expect_json)
  191. else:
  192. metadata = walk_metadata(metadata_url, headers,
  193. expect_json)
  194. except OpenShiftFactsMetadataUnavailableError:
  195. metadata = None
  196. return metadata
  197. def normalize_gce_facts(metadata, facts):
  198. """ Normalize gce facts
  199. Args:
  200. metadata (dict): provider metadata
  201. facts (dict): facts to update
  202. Returns:
  203. dict: the result of adding the normalized metadata to the provided
  204. facts dict
  205. """
  206. for interface in metadata['instance']['networkInterfaces']:
  207. int_info = dict(ips=[interface['ip']], network_type='gce')
  208. int_info['public_ips'] = [ac['externalIp'] for ac
  209. in interface['accessConfigs']]
  210. int_info['public_ips'].extend(interface['forwardedIps'])
  211. _, _, network_id = interface['network'].rpartition('/')
  212. int_info['network_id'] = network_id
  213. facts['network']['interfaces'].append(int_info)
  214. _, _, zone = metadata['instance']['zone'].rpartition('/')
  215. facts['zone'] = zone
  216. # GCE currently only supports a single interface
  217. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  218. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  219. facts['network']['public_ip'] = pub_ip
  220. # Split instance hostname from GCE metadata to use the short instance name
  221. facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
  222. # TODO: attempt to resolve public_hostname
  223. facts['network']['public_hostname'] = facts['network']['public_ip']
  224. return facts
  225. def normalize_aws_facts(metadata, facts):
  226. """ Normalize aws facts
  227. Args:
  228. metadata (dict): provider metadata
  229. facts (dict): facts to update
  230. Returns:
  231. dict: the result of adding the normalized metadata to the provided
  232. facts dict
  233. """
  234. for interface in sorted(
  235. metadata['network']['interfaces']['macs'].values(),
  236. key=lambda x: x['device-number']
  237. ):
  238. int_info = dict()
  239. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  240. for ips_var, int_var in iteritems(var_map):
  241. ips = interface.get(int_var)
  242. if isinstance(ips, string_types):
  243. int_info[ips_var] = [ips]
  244. else:
  245. int_info[ips_var] = ips
  246. if 'vpc-id' in interface:
  247. int_info['network_type'] = 'vpc'
  248. else:
  249. int_info['network_type'] = 'classic'
  250. if int_info['network_type'] == 'vpc':
  251. int_info['network_id'] = interface['subnet-id']
  252. else:
  253. int_info['network_id'] = None
  254. facts['network']['interfaces'].append(int_info)
  255. facts['zone'] = metadata['placement']['availability-zone']
  256. # TODO: actually attempt to determine default local and public ips
  257. # by using the ansible default ip fact and the ipv4-associations
  258. # from the ec2 metadata
  259. facts['network']['ip'] = metadata.get('local-ipv4')
  260. facts['network']['public_ip'] = metadata.get('public-ipv4')
  261. # TODO: verify that local hostname makes sense and is resolvable
  262. facts['network']['hostname'] = metadata.get('local-hostname')
  263. # TODO: verify that public hostname makes sense and is resolvable
  264. facts['network']['public_hostname'] = metadata.get('public-hostname')
  265. return facts
  266. def normalize_openstack_facts(metadata, facts):
  267. """ Normalize openstack facts
  268. Args:
  269. metadata (dict): provider metadata
  270. facts (dict): facts to update
  271. Returns:
  272. dict: the result of adding the normalized metadata to the provided
  273. facts dict
  274. """
  275. # openstack ec2 compat api does not support network interfaces and
  276. # the version tested on did not include the info in the openstack
  277. # metadata api, should be updated if neutron exposes this.
  278. facts['zone'] = metadata['availability_zone']
  279. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  280. facts['network']['ip'] = local_ipv4
  281. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  282. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  283. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  284. try:
  285. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  286. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  287. else:
  288. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  289. except socket.gaierror:
  290. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  291. return facts
  292. def normalize_provider_facts(provider, metadata):
  293. """ Normalize provider facts
  294. Args:
  295. provider (str): host provider
  296. metadata (dict): provider metadata
  297. Returns:
  298. dict: the normalized provider facts
  299. """
  300. if provider is None or metadata is None:
  301. return {}
  302. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  303. # and configure ipv6 facts if available
  304. # TODO: add support for setting user_data if available
  305. facts = dict(name=provider, metadata=metadata,
  306. network=dict(interfaces=[], ipv6_enabled=False))
  307. if provider == 'gce':
  308. facts = normalize_gce_facts(metadata, facts)
  309. elif provider == 'aws':
  310. facts = normalize_aws_facts(metadata, facts)
  311. elif provider == 'openstack':
  312. facts = normalize_openstack_facts(metadata, facts)
  313. return facts
  314. def set_identity_providers_if_unset(facts):
  315. """ Set identity_providers fact if not already present in facts dict
  316. Args:
  317. facts (dict): existing facts
  318. Returns:
  319. dict: the facts dict updated with the generated identity providers
  320. facts if they were not already present
  321. """
  322. if 'master' in facts:
  323. deployment_type = facts['common']['deployment_type']
  324. if 'identity_providers' not in facts['master']:
  325. identity_provider = dict(
  326. name='allow_all', challenge=True, login=True,
  327. kind='AllowAllPasswordIdentityProvider'
  328. )
  329. if deployment_type == 'openshift-enterprise':
  330. identity_provider = dict(
  331. name='deny_all', challenge=True, login=True,
  332. kind='DenyAllPasswordIdentityProvider'
  333. )
  334. facts['master']['identity_providers'] = [identity_provider]
  335. return facts
  336. def set_url_facts_if_unset(facts):
  337. """ Set url facts if not already present in facts dict
  338. Args:
  339. facts (dict): existing facts
  340. Returns:
  341. dict: the facts dict updated with the generated url facts if they
  342. were not already present
  343. """
  344. if 'master' in facts:
  345. hostname = facts['common']['hostname']
  346. cluster_hostname = facts['master'].get('cluster_hostname')
  347. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  348. public_hostname = facts['common']['public_hostname']
  349. api_hostname = cluster_hostname if cluster_hostname else hostname
  350. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  351. console_path = facts['master']['console_path']
  352. etcd_hosts = facts['master']['etcd_hosts']
  353. use_ssl = dict(
  354. api=facts['master']['api_use_ssl'],
  355. public_api=facts['master']['api_use_ssl'],
  356. loopback_api=facts['master']['api_use_ssl'],
  357. console=facts['master']['console_use_ssl'],
  358. public_console=facts['master']['console_use_ssl'],
  359. etcd=facts['master']['etcd_use_ssl']
  360. )
  361. ports = dict(
  362. api=facts['master']['api_port'],
  363. public_api=facts['master']['api_port'],
  364. loopback_api=facts['master']['api_port'],
  365. console=facts['master']['console_port'],
  366. public_console=facts['master']['console_port'],
  367. etcd=facts['master']['etcd_port'],
  368. )
  369. etcd_urls = []
  370. if etcd_hosts != '':
  371. facts['master']['etcd_port'] = ports['etcd']
  372. for host in etcd_hosts:
  373. etcd_urls.append(format_url(use_ssl['etcd'], host,
  374. ports['etcd']))
  375. else:
  376. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  377. ports['etcd'])]
  378. facts['master'].setdefault('etcd_urls', etcd_urls)
  379. prefix_hosts = [('api', api_hostname),
  380. ('public_api', api_public_hostname),
  381. ('loopback_api', hostname)]
  382. for prefix, host in prefix_hosts:
  383. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  384. host,
  385. ports[prefix]))
  386. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  387. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  388. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  389. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  390. facts['master'].setdefault('loopback_user', r_lhu)
  391. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  392. for prefix, host in prefix_hosts:
  393. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  394. host,
  395. ports[prefix],
  396. console_path))
  397. return facts
  398. def set_aggregate_facts(facts):
  399. """ Set aggregate facts
  400. Args:
  401. facts (dict): existing facts
  402. Returns:
  403. dict: the facts dict updated with aggregated facts
  404. """
  405. all_hostnames = set()
  406. internal_hostnames = set()
  407. kube_svc_ip = first_ip(facts['common']['portal_net'])
  408. if 'common' in facts:
  409. all_hostnames.add(facts['common']['hostname'])
  410. all_hostnames.add(facts['common']['public_hostname'])
  411. all_hostnames.add(facts['common']['ip'])
  412. all_hostnames.add(facts['common']['public_ip'])
  413. facts['common']['kube_svc_ip'] = kube_svc_ip
  414. internal_hostnames.add(facts['common']['hostname'])
  415. internal_hostnames.add(facts['common']['ip'])
  416. cluster_domain = facts['common']['dns_domain']
  417. if 'master' in facts:
  418. if 'cluster_hostname' in facts['master']:
  419. all_hostnames.add(facts['master']['cluster_hostname'])
  420. if 'cluster_public_hostname' in facts['master']:
  421. all_hostnames.add(facts['master']['cluster_public_hostname'])
  422. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  423. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  424. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  425. all_hostnames.update(svc_names)
  426. internal_hostnames.update(svc_names)
  427. all_hostnames.add(kube_svc_ip)
  428. internal_hostnames.add(kube_svc_ip)
  429. facts['common']['all_hostnames'] = list(all_hostnames)
  430. facts['common']['internal_hostnames'] = list(internal_hostnames)
  431. return facts
  432. def set_deployment_facts_if_unset(facts):
  433. """ Set Facts that vary based on deployment_type. This currently
  434. includes master.registry_url
  435. Args:
  436. facts (dict): existing facts
  437. Returns:
  438. dict: the facts dict updated with the generated deployment_type
  439. facts
  440. """
  441. if 'master' in facts:
  442. deployment_type = facts['common']['deployment_type']
  443. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  444. if 'disabled_features' not in facts['master']:
  445. if facts['common']['deployment_subtype'] == 'registry':
  446. facts['master']['disabled_features'] = openshift_features
  447. if 'registry_url' not in facts['master']:
  448. registry_url = 'openshift/origin-${component}:${version}'
  449. if deployment_type == 'openshift-enterprise':
  450. registry_url = 'openshift3/ose-${component}:${version}'
  451. facts['master']['registry_url'] = registry_url
  452. return facts
  453. # pylint: disable=too-many-statements
  454. def set_version_facts_if_unset(facts):
  455. """ Set version facts. This currently includes common.version and
  456. common.version_gte_3_x
  457. Args:
  458. facts (dict): existing facts
  459. Returns:
  460. dict: the facts dict updated with version facts.
  461. """
  462. if 'common' in facts:
  463. openshift_version = get_openshift_version(facts)
  464. if openshift_version and openshift_version != "latest":
  465. version = LooseVersion(openshift_version)
  466. facts['common']['version'] = openshift_version
  467. facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
  468. version_gte_3_6 = version >= LooseVersion('3.6')
  469. version_gte_3_7 = version >= LooseVersion('3.7')
  470. version_gte_3_8 = version >= LooseVersion('3.8')
  471. version_gte_3_9 = version >= LooseVersion('3.9')
  472. version_gte_3_10 = version >= LooseVersion('3.10')
  473. else:
  474. # 'Latest' version is set to True, 'Next' versions set to False
  475. version_gte_3_6 = True
  476. version_gte_3_7 = True
  477. version_gte_3_8 = False
  478. version_gte_3_9 = False
  479. version_gte_3_10 = False
  480. facts['common']['version_gte_3_6'] = version_gte_3_6
  481. facts['common']['version_gte_3_7'] = version_gte_3_7
  482. facts['common']['version_gte_3_8'] = version_gte_3_8
  483. facts['common']['version_gte_3_9'] = version_gte_3_9
  484. facts['common']['version_gte_3_10'] = version_gte_3_10
  485. if version_gte_3_10:
  486. examples_content_version = 'v3.10'
  487. elif version_gte_3_9:
  488. examples_content_version = 'v3.9'
  489. elif version_gte_3_8:
  490. examples_content_version = 'v3.8'
  491. elif version_gte_3_7:
  492. examples_content_version = 'v3.7'
  493. elif version_gte_3_6:
  494. examples_content_version = 'v3.6'
  495. else:
  496. examples_content_version = 'v1.5'
  497. facts['common']['examples_content_version'] = examples_content_version
  498. return facts
  499. def set_sdn_facts_if_unset(facts, system_facts):
  500. """ Set sdn facts if not already present in facts dict
  501. Args:
  502. facts (dict): existing facts
  503. system_facts (dict): ansible_facts
  504. Returns:
  505. dict: the facts dict updated with the generated sdn facts if they
  506. were not already present
  507. """
  508. if 'master' in facts:
  509. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  510. # these might be overridden if they exist in the master config file
  511. sdn_cluster_network_cidr = '10.128.0.0/14'
  512. sdn_host_subnet_length = '9'
  513. master_cfg_path = os.path.join(facts['common']['config_base'],
  514. 'master/master-config.yaml')
  515. if os.path.isfile(master_cfg_path):
  516. with open(master_cfg_path, 'r') as master_cfg_f:
  517. config = yaml.safe_load(master_cfg_f.read())
  518. if 'networkConfig' in config:
  519. if 'clusterNetworkCIDR' in config['networkConfig']:
  520. sdn_cluster_network_cidr = \
  521. config['networkConfig']['clusterNetworkCIDR']
  522. if 'hostSubnetLength' in config['networkConfig']:
  523. sdn_host_subnet_length = \
  524. config['networkConfig']['hostSubnetLength']
  525. if 'sdn_cluster_network_cidr' not in facts['master']:
  526. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  527. if 'sdn_host_subnet_length' not in facts['master']:
  528. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  529. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  530. node_ip = facts['common']['ip']
  531. # default MTU if interface MTU cannot be detected
  532. facts['node']['sdn_mtu'] = '1450'
  533. for val in itervalues(system_facts):
  534. if isinstance(val, dict) and 'mtu' in val:
  535. mtu = val['mtu']
  536. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  537. facts['node']['sdn_mtu'] = str(mtu - 50)
  538. return facts
  539. def set_nodename(facts):
  540. """ set nodename """
  541. if 'node' in facts and 'common' in facts:
  542. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
  543. facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
  544. # TODO: The openstack cloudprovider nodename setting was too opinionaed.
  545. # It needs to be generalized before it can be enabled again.
  546. # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  547. # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  548. else:
  549. facts['node']['nodename'] = facts['common']['hostname'].lower()
  550. return facts
  551. def format_url(use_ssl, hostname, port, path=''):
  552. """ Format url based on ssl flag, hostname, port and path
  553. Args:
  554. use_ssl (bool): is ssl enabled
  555. hostname (str): hostname
  556. port (str): port
  557. path (str): url path
  558. Returns:
  559. str: The generated url string
  560. """
  561. scheme = 'https' if use_ssl else 'http'
  562. netloc = hostname
  563. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  564. netloc += ":%s" % port
  565. try:
  566. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  567. except AttributeError:
  568. # pylint: disable=undefined-variable
  569. url = urlunparse((scheme, netloc, path, '', '', ''))
  570. return url
  571. def get_current_config(facts):
  572. """ Get current openshift config
  573. Args:
  574. facts (dict): existing facts
  575. Returns:
  576. dict: the facts dict updated with the current openshift config
  577. """
  578. current_config = dict()
  579. roles = [role for role in facts if role not in ['common', 'provider']]
  580. for role in roles:
  581. if 'roles' in current_config:
  582. current_config['roles'].append(role)
  583. else:
  584. current_config['roles'] = [role]
  585. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  586. # determine the location of files.
  587. # TODO: I suspect this isn't working right now, but it doesn't prevent
  588. # anything from working properly as far as I can tell, perhaps because
  589. # we override the kubeconfig path everywhere we use it?
  590. # Query kubeconfig settings
  591. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  592. if role == 'node':
  593. kubeconfig_dir = os.path.join(
  594. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  595. )
  596. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  597. if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
  598. try:
  599. _, output, _ = module.run_command( # noqa: F405
  600. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  601. "json", "--kubeconfig=%s" % kubeconfig_path],
  602. check_rc=False
  603. )
  604. config = json.loads(output)
  605. cad = 'certificate-authority-data'
  606. try:
  607. for cluster in config['clusters']:
  608. config['clusters'][cluster][cad] = 'masked'
  609. except KeyError:
  610. pass
  611. try:
  612. for user in config['users']:
  613. config['users'][user][cad] = 'masked'
  614. config['users'][user]['client-key-data'] = 'masked'
  615. except KeyError:
  616. pass
  617. current_config['kubeconfig'] = config
  618. # override pylint broad-except warning, since we do not want
  619. # to bubble up any exceptions if oc config view
  620. # fails
  621. # pylint: disable=broad-except
  622. except Exception:
  623. pass
  624. return current_config
  625. def build_controller_args(facts):
  626. """ Build master controller_args """
  627. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  628. 'cloudprovider')
  629. if 'master' in facts:
  630. controller_args = {}
  631. if 'cloudprovider' in facts:
  632. if 'kind' in facts['cloudprovider']:
  633. if facts['cloudprovider']['kind'] == 'aws':
  634. controller_args['cloud-provider'] = ['aws']
  635. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  636. if facts['cloudprovider']['kind'] == 'openstack':
  637. controller_args['cloud-provider'] = ['openstack']
  638. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  639. if facts['cloudprovider']['kind'] == 'gce':
  640. controller_args['cloud-provider'] = ['gce']
  641. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  642. if controller_args != {}:
  643. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
  644. return facts
  645. def build_api_server_args(facts):
  646. """ Build master api_server_args """
  647. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  648. 'cloudprovider')
  649. if 'master' in facts:
  650. api_server_args = {}
  651. if 'cloudprovider' in facts:
  652. if 'kind' in facts['cloudprovider']:
  653. if facts['cloudprovider']['kind'] == 'aws':
  654. api_server_args['cloud-provider'] = ['aws']
  655. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  656. if facts['cloudprovider']['kind'] == 'openstack':
  657. api_server_args['cloud-provider'] = ['openstack']
  658. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  659. if facts['cloudprovider']['kind'] == 'gce':
  660. api_server_args['cloud-provider'] = ['gce']
  661. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  662. if api_server_args != {}:
  663. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
  664. return facts
  665. def is_service_running(service):
  666. """ Queries systemd through dbus to see if the service is running """
  667. service_running = False
  668. try:
  669. bus = SystemBus()
  670. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  671. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  672. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  673. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  674. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  675. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  676. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  677. if service_load_state == 'loaded' and service_active_state == 'active':
  678. service_running = True
  679. except DBusException:
  680. # TODO: do not swallow exception, as it may be hiding useful debugging
  681. # information.
  682. pass
  683. return service_running
  684. def rpm_rebuilddb():
  685. """
  686. Runs rpm --rebuilddb to ensure the db is in good shape.
  687. """
  688. module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
  689. def get_version_output(binary, version_cmd):
  690. """ runs and returns the version output for a command """
  691. cmd = []
  692. for item in (binary, version_cmd):
  693. if isinstance(item, list):
  694. cmd.extend(item)
  695. else:
  696. cmd.append(item)
  697. if os.path.isfile(cmd[0]):
  698. _, output, _ = module.run_command(cmd) # noqa: F405
  699. return output
  700. # We may need this in the future.
  701. def get_docker_version_info():
  702. """ Parses and returns the docker version info """
  703. result = None
  704. if is_service_running('docker') or is_service_running('container-engine'):
  705. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  706. if 'Server' in version_info:
  707. result = {
  708. 'api_version': version_info['Server']['API version'],
  709. 'version': version_info['Server']['Version']
  710. }
  711. return result
  712. def get_openshift_version(facts):
  713. """ Get current version of openshift on the host.
  714. Checks a variety of ways ranging from fastest to slowest.
  715. Args:
  716. facts (dict): existing facts
  717. optional cli_image for pulling the version number
  718. Returns:
  719. version: the current openshift version
  720. """
  721. version = None
  722. # No need to run this method repeatedly on a system if we already know the
  723. # version
  724. # TODO: We need a way to force reload this after upgrading bits.
  725. if 'common' in facts:
  726. if 'version' in facts['common'] and facts['common']['version'] is not None:
  727. return chomp_commit_offset(facts['common']['version'])
  728. if os.path.isfile('/usr/bin/openshift'):
  729. _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
  730. version = parse_openshift_version(output)
  731. else:
  732. version = get_container_openshift_version(facts)
  733. # Handle containerized masters that have not yet been configured as a node.
  734. # This can be very slow and may get re-run multiple times, so we only use this
  735. # if other methods failed to find a version.
  736. if not version and os.path.isfile('/usr/local/bin/openshift'):
  737. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
  738. version = parse_openshift_version(output)
  739. return chomp_commit_offset(version)
  740. def chomp_commit_offset(version):
  741. """Chomp any "+git.foo" commit offset string from the given `version`
  742. and return the modified version string.
  743. Ex:
  744. - chomp_commit_offset(None) => None
  745. - chomp_commit_offset(1337) => "1337"
  746. - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
  747. - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
  748. - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
  749. """
  750. if version is None:
  751. return version
  752. else:
  753. # Stringify, just in case it's a Number type. Split by '+' and
  754. # return the first split. No concerns about strings without a
  755. # '+', .split() returns an array of the original string.
  756. return str(version).split('+')[0]
  757. def get_container_openshift_version(facts):
  758. """
  759. If containerized, see if we can determine the installed version via the
  760. systemd environment files.
  761. """
  762. deployment_type = facts['common']['deployment_type']
  763. service_type_dict = {'origin': 'origin',
  764. 'openshift-enterprise': 'atomic-openshift'}
  765. service_type = service_type_dict[deployment_type]
  766. for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
  767. env_path = filename % service_type
  768. if not os.path.exists(env_path):
  769. continue
  770. with open(env_path) as env_file:
  771. for line in env_file:
  772. if line.startswith("IMAGE_VERSION="):
  773. tag = line[len("IMAGE_VERSION="):].strip()
  774. # Remove leading "v" and any trailing release info, we just want
  775. # a version number here:
  776. no_v_version = tag[1:] if tag[0] == 'v' else tag
  777. version = no_v_version.split("-")[0]
  778. return version
  779. return None
  780. def parse_openshift_version(output):
  781. """ Apply provider facts to supplied facts dict
  782. Args:
  783. string: output of 'openshift version'
  784. Returns:
  785. string: the version number
  786. """
  787. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  788. ver = versions.get('openshift', '')
  789. # Remove trailing build number and commit hash from older versions, we need to return a straight
  790. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  791. ver = ver.split('-')[0]
  792. return ver
  793. def apply_provider_facts(facts, provider_facts):
  794. """ Apply provider facts to supplied facts dict
  795. Args:
  796. facts (dict): facts dict to update
  797. provider_facts (dict): provider facts to apply
  798. roles: host roles
  799. Returns:
  800. dict: the merged facts
  801. """
  802. if not provider_facts:
  803. return facts
  804. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  805. for h_var, ip_var in common_vars:
  806. ip_value = provider_facts['network'].get(ip_var)
  807. if ip_value:
  808. facts['common'][ip_var] = ip_value
  809. facts['common'][h_var] = choose_hostname(
  810. [provider_facts['network'].get(h_var)],
  811. facts['common'][h_var]
  812. )
  813. facts['provider'] = provider_facts
  814. return facts
  815. # Disabling pylint too many branches. This function needs refactored
  816. # but is a very core part of openshift_facts.
  817. # pylint: disable=too-many-branches, too-many-nested-blocks
  818. def merge_facts(orig, new, additive_facts_to_overwrite):
  819. """ Recursively merge facts dicts
  820. Args:
  821. orig (dict): existing facts
  822. new (dict): facts to update
  823. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  824. '.' notation ex: ['master.named_certificates']
  825. Returns:
  826. dict: the merged facts
  827. """
  828. additive_facts = ['named_certificates']
  829. # Facts we do not ever want to merge. These originate in inventory variables
  830. # and contain JSON dicts. We don't ever want to trigger a merge
  831. # here, just completely overwrite with the new if they are present there.
  832. inventory_json_facts = ['admission_plugin_config',
  833. 'kube_admission_plugin_config',
  834. 'image_policy_config',
  835. "builddefaults",
  836. "buildoverrides"]
  837. facts = dict()
  838. for key, value in iteritems(orig):
  839. # Key exists in both old and new facts.
  840. if key in new:
  841. if key in inventory_json_facts:
  842. # Watchout for JSON facts that sometimes load as strings.
  843. # (can happen if the JSON contains a boolean)
  844. if isinstance(new[key], string_types):
  845. facts[key] = yaml.safe_load(new[key])
  846. else:
  847. facts[key] = copy.deepcopy(new[key])
  848. # Continue to recurse if old and new fact is a dictionary.
  849. elif isinstance(value, dict) and isinstance(new[key], dict):
  850. # Collect the subset of additive facts to overwrite if
  851. # key matches. These will be passed to the subsequent
  852. # merge_facts call.
  853. relevant_additive_facts = []
  854. for item in additive_facts_to_overwrite:
  855. if '.' in item and item.startswith(key + '.'):
  856. relevant_additive_facts.append(item)
  857. facts[key] = merge_facts(value, new[key], relevant_additive_facts)
  858. # Key matches an additive fact and we are not overwriting
  859. # it so we will append the new value to the existing value.
  860. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  861. if isinstance(value, list) and isinstance(new[key], list):
  862. new_fact = []
  863. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  864. if item not in new_fact:
  865. new_fact.append(item)
  866. facts[key] = new_fact
  867. # No other condition has been met. Overwrite the old fact
  868. # with the new value.
  869. else:
  870. facts[key] = copy.deepcopy(new[key])
  871. # Key isn't in new so add it to facts to keep it.
  872. else:
  873. facts[key] = copy.deepcopy(value)
  874. new_keys = set(new.keys()) - set(orig.keys())
  875. for key in new_keys:
  876. # Watchout for JSON facts that sometimes load as strings.
  877. # (can happen if the JSON contains a boolean)
  878. if key in inventory_json_facts and isinstance(new[key], string_types):
  879. facts[key] = yaml.safe_load(new[key])
  880. else:
  881. facts[key] = copy.deepcopy(new[key])
  882. return facts
  883. def save_local_facts(filename, facts):
  884. """ Save local facts
  885. Args:
  886. filename (str): local facts file
  887. facts (dict): facts to set
  888. """
  889. try:
  890. fact_dir = os.path.dirname(filename)
  891. try:
  892. os.makedirs(fact_dir) # try to make the directory
  893. except OSError as exception:
  894. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  895. raise # pass any other exceptions up the chain
  896. with open(filename, 'w') as fact_file:
  897. fact_file.write(module.jsonify(facts)) # noqa: F405
  898. os.chmod(filename, 0o600)
  899. except (IOError, OSError) as ex:
  900. raise OpenShiftFactsFileWriteError(
  901. "Could not create fact file: %s, error: %s" % (filename, ex)
  902. )
  903. def get_local_facts_from_file(filename):
  904. """ Retrieve local facts from fact file
  905. Args:
  906. filename (str): local facts file
  907. Returns:
  908. dict: the retrieved facts
  909. """
  910. local_facts = dict()
  911. try:
  912. # Handle conversion of INI style facts file to json style
  913. ini_facts = configparser.SafeConfigParser()
  914. ini_facts.read(filename)
  915. for section in ini_facts.sections():
  916. local_facts[section] = dict()
  917. for key, value in ini_facts.items(section):
  918. local_facts[section][key] = value
  919. except (configparser.MissingSectionHeaderError,
  920. configparser.ParsingError):
  921. try:
  922. with open(filename, 'r') as facts_file:
  923. local_facts = json.load(facts_file)
  924. except (ValueError, IOError):
  925. pass
  926. return local_facts
  927. def sort_unique(alist):
  928. """ Sorts and de-dupes a list
  929. Args:
  930. list: a list
  931. Returns:
  932. list: a sorted de-duped list
  933. """
  934. return sorted(list(set(alist)))
  935. def safe_get_bool(fact):
  936. """ Get a boolean fact safely.
  937. Args:
  938. facts: fact to convert
  939. Returns:
  940. bool: given fact as a bool
  941. """
  942. return bool(strtobool(str(fact)))
  943. def set_proxy_facts(facts):
  944. """ Set global proxy facts
  945. Args:
  946. facts(dict): existing facts
  947. Returns:
  948. facts(dict): Updated facts with missing values
  949. """
  950. if 'common' in facts:
  951. common = facts['common']
  952. if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
  953. if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
  954. common['no_proxy'] = common['no_proxy'].split(",")
  955. elif 'no_proxy' not in common:
  956. common['no_proxy'] = []
  957. # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
  958. # masters behind a proxy need to connect to etcd via IP
  959. if 'no_proxy_etcd_host_ips' in common:
  960. if isinstance(common['no_proxy_etcd_host_ips'], string_types):
  961. common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
  962. if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
  963. if 'no_proxy_internal_hostnames' in common:
  964. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  965. # We always add local dns domain and ourselves no matter what
  966. kube_svc_ip = str(ipaddress.ip_network(text_type(common['portal_net']))[1])
  967. common['no_proxy'].append(kube_svc_ip)
  968. common['no_proxy'].append('.' + common['dns_domain'])
  969. common['no_proxy'].append('.svc')
  970. common['no_proxy'].append(common['hostname'])
  971. common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
  972. facts['common'] = common
  973. return facts
  974. def set_builddefaults_facts(facts):
  975. """ Set build defaults including setting proxy values from http_proxy, https_proxy,
  976. no_proxy to the more specific builddefaults and builddefaults_git vars.
  977. 1. http_proxy, https_proxy, no_proxy
  978. 2. builddefaults_*
  979. 3. builddefaults_git_*
  980. Args:
  981. facts(dict): existing facts
  982. Returns:
  983. facts(dict): Updated facts with missing values
  984. """
  985. if 'builddefaults' in facts:
  986. builddefaults = facts['builddefaults']
  987. common = facts['common']
  988. # Copy values from common to builddefaults
  989. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  990. builddefaults['http_proxy'] = common['http_proxy']
  991. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  992. builddefaults['https_proxy'] = common['https_proxy']
  993. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  994. builddefaults['no_proxy'] = common['no_proxy']
  995. # Create git specific facts from generic values, if git specific values are
  996. # not defined.
  997. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  998. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  999. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1000. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1001. if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
  1002. builddefaults['git_no_proxy'] = builddefaults['no_proxy']
  1003. # If we're actually defining a builddefaults config then create admission_plugin_config
  1004. # then merge builddefaults[config] structure into admission_plugin_config
  1005. # 'config' is the 'openshift_builddefaults_json' inventory variable
  1006. if 'config' in builddefaults:
  1007. if 'admission_plugin_config' not in facts['master']:
  1008. # Scaffold out the full expected datastructure
  1009. facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
  1010. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  1011. if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
  1012. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
  1013. return facts
  1014. def delete_empty_keys(keylist):
  1015. """ Delete dictionary elements from keylist where "value" is empty.
  1016. Args:
  1017. keylist(list): A list of builddefault configuration envs.
  1018. Returns:
  1019. none
  1020. Example:
  1021. keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1022. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1023. {'name': 'NO_PROXY', 'value': ''}]
  1024. After calling delete_empty_keys the provided list is modified to become:
  1025. [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1026. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
  1027. """
  1028. count = 0
  1029. for i in range(0, len(keylist)):
  1030. if len(keylist[i - count]['value']) == 0:
  1031. del keylist[i - count]
  1032. count += 1
  1033. def set_buildoverrides_facts(facts):
  1034. """ Set build overrides
  1035. Args:
  1036. facts(dict): existing facts
  1037. Returns:
  1038. facts(dict): Updated facts with missing values
  1039. """
  1040. if 'buildoverrides' in facts:
  1041. buildoverrides = facts['buildoverrides']
  1042. # If we're actually defining a buildoverrides config then create admission_plugin_config
  1043. # then merge buildoverrides[config] structure into admission_plugin_config
  1044. if 'config' in buildoverrides:
  1045. if 'admission_plugin_config' not in facts['master']:
  1046. facts['master']['admission_plugin_config'] = dict()
  1047. facts['master']['admission_plugin_config'].update(buildoverrides['config'])
  1048. return facts
  1049. # pylint: disable=too-many-statements
  1050. def set_container_facts_if_unset(facts):
  1051. """ Set containerized facts.
  1052. Args:
  1053. facts (dict): existing facts
  1054. Returns:
  1055. dict: the facts dict updated with the generated containerization
  1056. facts
  1057. """
  1058. return facts
  1059. class OpenShiftFactsInternalError(Exception):
  1060. """Origin Facts Error"""
  1061. pass
  1062. class OpenShiftFactsUnsupportedRoleError(Exception):
  1063. """Origin Facts Unsupported Role Error"""
  1064. pass
  1065. class OpenShiftFactsFileWriteError(Exception):
  1066. """Origin Facts File Write Error"""
  1067. pass
  1068. class OpenShiftFactsMetadataUnavailableError(Exception):
  1069. """Origin Facts Metadata Unavailable Error"""
  1070. pass
  1071. class OpenShiftFacts(object):
  1072. """ Origin Facts
  1073. Attributes:
  1074. facts (dict): facts for the host
  1075. Args:
  1076. module (AnsibleModule): an AnsibleModule object
  1077. role (str): role for setting local facts
  1078. filename (str): local facts file to use
  1079. local_facts (dict): local facts to set
  1080. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1081. '.' notation ex: ['master.named_certificates']
  1082. Raises:
  1083. OpenShiftFactsUnsupportedRoleError:
  1084. """
  1085. known_roles = ['builddefaults',
  1086. 'buildoverrides',
  1087. 'cloudprovider',
  1088. 'common',
  1089. 'etcd',
  1090. 'master',
  1091. 'node']
  1092. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1093. # pylint: disable=too-many-arguments,no-value-for-parameter
  1094. def __init__(self, role, filename, local_facts,
  1095. additive_facts_to_overwrite=None):
  1096. self.changed = False
  1097. self.filename = filename
  1098. if role not in self.known_roles:
  1099. raise OpenShiftFactsUnsupportedRoleError(
  1100. "Role %s is not supported by this module" % role
  1101. )
  1102. self.role = role
  1103. # Collect system facts and preface each fact with 'ansible_'.
  1104. try:
  1105. # pylint: disable=too-many-function-args,invalid-name
  1106. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
  1107. additional_facts = {}
  1108. for (k, v) in self.system_facts.items():
  1109. additional_facts["ansible_%s" % k.replace('-', '_')] = v
  1110. self.system_facts.update(additional_facts)
  1111. except UnboundLocalError:
  1112. # ansible-2.2,2.3
  1113. self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
  1114. self.facts = self.generate_facts(local_facts,
  1115. additive_facts_to_overwrite)
  1116. def generate_facts(self,
  1117. local_facts,
  1118. additive_facts_to_overwrite):
  1119. """ Generate facts
  1120. Args:
  1121. local_facts (dict): local_facts for overriding generated defaults
  1122. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1123. '.' notation ex: ['master.named_certificates']
  1124. Returns:
  1125. dict: The generated facts
  1126. """
  1127. local_facts = self.init_local_facts(local_facts,
  1128. additive_facts_to_overwrite)
  1129. roles = local_facts.keys()
  1130. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1131. deployment_type = local_facts['common']['deployment_type']
  1132. else:
  1133. deployment_type = 'origin'
  1134. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1135. deployment_subtype = local_facts['common']['deployment_subtype']
  1136. else:
  1137. deployment_subtype = 'basic'
  1138. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1139. provider_facts = self.init_provider_facts()
  1140. facts = apply_provider_facts(defaults, provider_facts)
  1141. facts = merge_facts(facts,
  1142. local_facts,
  1143. additive_facts_to_overwrite)
  1144. facts['current_config'] = get_current_config(facts)
  1145. facts = set_url_facts_if_unset(facts)
  1146. facts = set_identity_providers_if_unset(facts)
  1147. facts = set_deployment_facts_if_unset(facts)
  1148. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1149. facts = set_container_facts_if_unset(facts)
  1150. facts = build_controller_args(facts)
  1151. facts = build_api_server_args(facts)
  1152. facts = set_version_facts_if_unset(facts)
  1153. facts = set_aggregate_facts(facts)
  1154. facts = set_proxy_facts(facts)
  1155. facts = set_builddefaults_facts(facts)
  1156. facts = set_buildoverrides_facts(facts)
  1157. facts = set_nodename(facts)
  1158. return dict(openshift=facts)
  1159. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1160. """ Get default fact values
  1161. Args:
  1162. roles (list): list of roles for this host
  1163. Returns:
  1164. dict: The generated default facts
  1165. """
  1166. defaults = {}
  1167. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1168. exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
  1169. hostname_f = output.strip() if exit_code == 0 else ''
  1170. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1171. self.system_facts['ansible_fqdn']]
  1172. hostname = choose_hostname(hostname_values, ip_addr).lower()
  1173. defaults['common'] = dict(ip=ip_addr,
  1174. public_ip=ip_addr,
  1175. deployment_type=deployment_type,
  1176. deployment_subtype=deployment_subtype,
  1177. hostname=hostname,
  1178. public_hostname=hostname,
  1179. portal_net='172.30.0.0/16',
  1180. dns_domain='cluster.local',
  1181. config_base='/etc/origin')
  1182. if 'master' in roles:
  1183. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1184. controllers_port='8444',
  1185. console_use_ssl=True,
  1186. console_path='/console',
  1187. console_port='8443', etcd_use_ssl=True,
  1188. etcd_hosts='', etcd_port='2379',
  1189. portal_net='172.30.0.0/16',
  1190. embedded_kube=True,
  1191. embedded_dns=True,
  1192. bind_addr='0.0.0.0',
  1193. session_max_seconds=3600,
  1194. session_name='ssn',
  1195. session_secrets_file='',
  1196. access_token_max_seconds=86400,
  1197. auth_token_max_seconds=500,
  1198. oauth_grant_method='auto',
  1199. dynamic_provisioning_enabled=True,
  1200. max_requests_inflight=500)
  1201. if 'cloudprovider' in roles:
  1202. defaults['cloudprovider'] = dict(kind=None)
  1203. return defaults
  1204. def guess_host_provider(self):
  1205. """ Guess the host provider
  1206. Returns:
  1207. dict: The generated default facts for the detected provider
  1208. """
  1209. # TODO: cloud provider facts should probably be submitted upstream
  1210. product_name = self.system_facts['ansible_product_name']
  1211. product_version = self.system_facts['ansible_product_version']
  1212. virt_type = self.system_facts['ansible_virtualization_type']
  1213. virt_role = self.system_facts['ansible_virtualization_role']
  1214. bios_vendor = self.system_facts['ansible_system_vendor']
  1215. provider = None
  1216. metadata = None
  1217. if bios_vendor == 'Google':
  1218. provider = 'gce'
  1219. metadata_url = ('http://metadata.google.internal/'
  1220. 'computeMetadata/v1/?recursive=true')
  1221. headers = {'Metadata-Flavor': 'Google'}
  1222. metadata = get_provider_metadata(metadata_url, True, headers,
  1223. True)
  1224. # Filter sshKeys and serviceAccounts from gce metadata
  1225. if metadata:
  1226. metadata['project']['attributes'].pop('sshKeys', None)
  1227. metadata['instance'].pop('serviceAccounts', None)
  1228. elif bios_vendor == 'Amazon EC2':
  1229. # Adds support for Amazon EC2 C5 instance types
  1230. provider = 'aws'
  1231. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1232. metadata = get_provider_metadata(metadata_url)
  1233. elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
  1234. provider = 'aws'
  1235. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1236. metadata = get_provider_metadata(metadata_url)
  1237. elif re.search(r'OpenStack', product_name):
  1238. provider = 'openstack'
  1239. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1240. 'meta_data.json')
  1241. metadata = get_provider_metadata(metadata_url, True, None,
  1242. True)
  1243. if metadata:
  1244. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1245. metadata['ec2_compat'] = get_provider_metadata(
  1246. ec2_compat_url
  1247. )
  1248. # disable pylint maybe-no-member because overloaded use of
  1249. # the module name causes pylint to not detect that results
  1250. # is an array or hash
  1251. # pylint: disable=maybe-no-member
  1252. # Filter public_keys and random_seed from openstack metadata
  1253. metadata.pop('public_keys', None)
  1254. metadata.pop('random_seed', None)
  1255. if not metadata['ec2_compat']:
  1256. metadata = None
  1257. return dict(name=provider, metadata=metadata)
  1258. def init_provider_facts(self):
  1259. """ Initialize the provider facts
  1260. Returns:
  1261. dict: The normalized provider facts
  1262. """
  1263. provider_info = self.guess_host_provider()
  1264. provider_facts = normalize_provider_facts(
  1265. provider_info.get('name'),
  1266. provider_info.get('metadata')
  1267. )
  1268. return provider_facts
  1269. # Disabling too-many-branches and too-many-locals.
  1270. # This should be cleaned up as a TODO item.
  1271. # pylint: disable=too-many-branches, too-many-locals
  1272. def init_local_facts(self, facts=None,
  1273. additive_facts_to_overwrite=None):
  1274. """ Initialize the local facts
  1275. Args:
  1276. facts (dict): local facts to set
  1277. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1278. '.' notation ex: ['master.named_certificates']
  1279. Returns:
  1280. dict: The result of merging the provided facts with existing
  1281. local facts
  1282. """
  1283. changed = False
  1284. facts_to_set = dict()
  1285. if facts is not None:
  1286. facts_to_set[self.role] = facts
  1287. local_facts = get_local_facts_from_file(self.filename)
  1288. migrated_facts = migrate_local_facts(local_facts)
  1289. new_local_facts = merge_facts(migrated_facts,
  1290. facts_to_set,
  1291. additive_facts_to_overwrite)
  1292. new_local_facts = self.remove_empty_facts(new_local_facts)
  1293. if new_local_facts != local_facts:
  1294. self.validate_local_facts(new_local_facts)
  1295. changed = True
  1296. if not module.check_mode: # noqa: F405
  1297. save_local_facts(self.filename, new_local_facts)
  1298. self.changed = changed
  1299. return new_local_facts
  1300. def remove_empty_facts(self, facts=None):
  1301. """ Remove empty facts
  1302. Args:
  1303. facts (dict): facts to clean
  1304. """
  1305. facts_to_remove = []
  1306. for fact, value in iteritems(facts):
  1307. if isinstance(facts[fact], dict):
  1308. facts[fact] = self.remove_empty_facts(facts[fact])
  1309. else:
  1310. if value == "" or value == [""] or value is None:
  1311. facts_to_remove.append(fact)
  1312. for fact in facts_to_remove:
  1313. del facts[fact]
  1314. return facts
  1315. def validate_local_facts(self, facts=None):
  1316. """ Validate local facts
  1317. Args:
  1318. facts (dict): local facts to validate
  1319. """
  1320. invalid_facts = dict()
  1321. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1322. if invalid_facts:
  1323. msg = 'Invalid facts detected:\n'
  1324. # pylint: disable=consider-iterating-dictionary
  1325. for key in invalid_facts.keys():
  1326. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1327. module.fail_json(msg=msg, changed=self.changed) # noqa: F405
  1328. # disabling pylint errors for line-too-long since we're dealing
  1329. # with best effort reduction of error messages here.
  1330. # disabling errors for too-many-branches since we require checking
  1331. # many conditions.
  1332. # pylint: disable=line-too-long, too-many-branches
  1333. @staticmethod
  1334. def validate_master_facts(facts, invalid_facts):
  1335. """ Validate master facts
  1336. Args:
  1337. facts (dict): local facts to validate
  1338. invalid_facts (dict): collected invalid_facts
  1339. Returns:
  1340. dict: Invalid facts
  1341. """
  1342. if 'master' in facts:
  1343. # openshift.master.session_auth_secrets
  1344. if 'session_auth_secrets' in facts['master']:
  1345. session_auth_secrets = facts['master']['session_auth_secrets']
  1346. if not issubclass(type(session_auth_secrets), list):
  1347. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  1348. elif 'session_encryption_secrets' not in facts['master']:
  1349. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  1350. 'if openshift_master_session_auth_secrets is provided.')
  1351. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  1352. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  1353. 'openshift_master_session_encryption_secrets must be '
  1354. 'equal length.')
  1355. else:
  1356. for secret in session_auth_secrets:
  1357. if len(secret) < 32:
  1358. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  1359. 'Secrets must be at least 32 characters in length.')
  1360. # openshift.master.session_encryption_secrets
  1361. if 'session_encryption_secrets' in facts['master']:
  1362. session_encryption_secrets = facts['master']['session_encryption_secrets']
  1363. if not issubclass(type(session_encryption_secrets), list):
  1364. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  1365. elif 'session_auth_secrets' not in facts['master']:
  1366. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  1367. 'set if openshift_master_session_encryption_secrets '
  1368. 'is provided.')
  1369. else:
  1370. for secret in session_encryption_secrets:
  1371. if len(secret) not in [16, 24, 32]:
  1372. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  1373. 'Secrets must be 16, 24, or 32 characters in length.')
  1374. return invalid_facts
  1375. def main():
  1376. """ main """
  1377. # disabling pylint errors for global-variable-undefined and invalid-name
  1378. # for 'global module' usage, since it is required to use ansible_facts
  1379. # pylint: disable=global-variable-undefined, invalid-name
  1380. global module
  1381. module = AnsibleModule( # noqa: F405
  1382. argument_spec=dict(
  1383. role=dict(default='common', required=False,
  1384. choices=OpenShiftFacts.known_roles),
  1385. local_facts=dict(default=None, type='dict', required=False),
  1386. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  1387. ),
  1388. supports_check_mode=True,
  1389. add_file_common_args=True,
  1390. )
  1391. if not HAVE_DBUS:
  1392. module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
  1393. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
  1394. module.params['gather_timeout'] = 10 # noqa: F405
  1395. module.params['filter'] = '*' # noqa: F405
  1396. role = module.params['role'] # noqa: F405
  1397. local_facts = module.params['local_facts'] # noqa: F405
  1398. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
  1399. fact_file = '/etc/ansible/facts.d/openshift.fact'
  1400. openshift_facts = OpenShiftFacts(role,
  1401. fact_file,
  1402. local_facts,
  1403. additive_facts_to_overwrite)
  1404. file_params = module.params.copy() # noqa: F405
  1405. file_params['path'] = fact_file
  1406. file_args = module.load_file_common_arguments(file_params) # noqa: F405
  1407. changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
  1408. openshift_facts.changed)
  1409. return module.exit_json(changed=changed, # noqa: F405
  1410. ansible_facts=openshift_facts.facts)
  1411. if __name__ == '__main__':
  1412. main()