openshift_facts.py 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  5. # Status: Permanently disabled to keep this module as self-contained as possible.
  6. """Ansible module for retrieving and setting openshift related facts"""
  7. # pylint: disable=no-name-in-module, import-error, wrong-import-order
  8. import copy
  9. import errno
  10. import json
  11. import re
  12. import os
  13. import yaml
  14. import struct
  15. import socket
  16. from distutils.util import strtobool
  17. from distutils.version import LooseVersion
  18. from ansible.module_utils.six import string_types
  19. from ansible.module_utils.six.moves import configparser
  20. # ignore pylint errors related to the module_utils import
  21. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  22. # import module snippets
  23. from ansible.module_utils.basic import * # noqa: F403
  24. from ansible.module_utils.facts import * # noqa: F403
  25. from ansible.module_utils.urls import * # noqa: F403
  26. from ansible.module_utils.six import iteritems, itervalues
  27. from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
  28. from ansible.module_utils._text import to_native
  29. HAVE_DBUS = False
  30. try:
  31. from dbus import SystemBus, Interface
  32. from dbus.exceptions import DBusException
  33. HAVE_DBUS = True
  34. except ImportError:
  35. pass
  36. DOCUMENTATION = '''
  37. ---
  38. module: openshift_facts
  39. short_description: Cluster Facts
  40. author: Jason DeTiberus
  41. requirements: [ ]
  42. '''
  43. EXAMPLES = '''
  44. '''
  45. # TODO: We should add a generic migration function that takes source and destination
  46. # paths and does the right thing rather than one function for common, one for node, etc.
  47. def migrate_common_facts(facts):
  48. """ Migrate facts from various roles into common """
  49. params = {
  50. 'node': ('portal_net'),
  51. 'master': ('portal_net')
  52. }
  53. if 'common' not in facts:
  54. facts['common'] = {}
  55. # pylint: disable=consider-iterating-dictionary
  56. for role in params.keys():
  57. if role in facts:
  58. for param in params[role]:
  59. if param in facts[role]:
  60. facts['common'][param] = facts[role].pop(param)
  61. return facts
  62. def migrate_node_facts(facts):
  63. """ Migrate facts from various roles into node """
  64. params = {
  65. 'common': ('dns_ip'),
  66. }
  67. if 'node' not in facts:
  68. facts['node'] = {}
  69. # pylint: disable=consider-iterating-dictionary
  70. for role in params.keys():
  71. if role in facts:
  72. for param in params[role]:
  73. if param in facts[role]:
  74. facts['node'][param] = facts[role].pop(param)
  75. return facts
  76. def migrate_admission_plugin_facts(facts):
  77. """ Apply migrations for admission plugin facts """
  78. if 'master' in facts:
  79. if 'kube_admission_plugin_config' in facts['master']:
  80. if 'admission_plugin_config' not in facts['master']:
  81. facts['master']['admission_plugin_config'] = dict()
  82. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  83. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  84. facts['master']['kube_admission_plugin_config'],
  85. additive_facts_to_overwrite=[])
  86. # Remove kube_admission_plugin_config fact
  87. facts['master'].pop('kube_admission_plugin_config', None)
  88. return facts
  89. def migrate_local_facts(facts):
  90. """ Apply migrations of local facts """
  91. migrated_facts = copy.deepcopy(facts)
  92. migrated_facts = migrate_common_facts(migrated_facts)
  93. migrated_facts = migrate_node_facts(migrated_facts)
  94. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  95. return migrated_facts
  96. def first_ip(network):
  97. """ Return the first IPv4 address in network
  98. Args:
  99. network (str): network in CIDR format
  100. Returns:
  101. str: first IPv4 address
  102. """
  103. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
  104. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
  105. (address, netmask) = network.split('/')
  106. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  107. return itoa((atoi(address) & netmask_i) + 1)
  108. def hostname_valid(hostname):
  109. """ Test if specified hostname should be considered valid
  110. Args:
  111. hostname (str): hostname to test
  112. Returns:
  113. bool: True if valid, otherwise False
  114. """
  115. if (not hostname or
  116. hostname.startswith('localhost') or
  117. hostname.endswith('localdomain') or
  118. # OpenShift will not allow a node with more than 63 chars in name.
  119. len(hostname) > 63):
  120. return False
  121. return True
  122. def choose_hostname(hostnames=None, fallback=''):
  123. """ Choose a hostname from the provided hostnames
  124. Given a list of hostnames and a fallback value, choose a hostname to
  125. use. This function will prefer fqdns if they exist (excluding any that
  126. begin with localhost or end with localdomain) over ip addresses.
  127. Args:
  128. hostnames (list): list of hostnames
  129. fallback (str): default value to set if hostnames does not contain
  130. a valid hostname
  131. Returns:
  132. str: chosen hostname
  133. """
  134. hostname = fallback
  135. if hostnames is None:
  136. return hostname
  137. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  138. ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
  139. hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
  140. for host_list in (hosts, ips):
  141. for host in host_list:
  142. if hostname_valid(host):
  143. return host
  144. return hostname
  145. def query_metadata(metadata_url, headers=None, expect_json=False):
  146. """ Return metadata from the provided metadata_url
  147. Args:
  148. metadata_url (str): metadata url
  149. headers (dict): headers to set for metadata request
  150. expect_json (bool): does the metadata_url return json
  151. Returns:
  152. dict or list: metadata request result
  153. """
  154. result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
  155. if info['status'] != 200:
  156. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  157. if expect_json:
  158. return module.from_json(to_native(result.read())) # noqa: F405
  159. else:
  160. return [to_native(line.strip()) for line in result.readlines()]
  161. def walk_metadata(metadata_url, headers=None, expect_json=False):
  162. """ Walk the metadata tree and return a dictionary of the entire tree
  163. Args:
  164. metadata_url (str): metadata url
  165. headers (dict): headers to set for metadata request
  166. expect_json (bool): does the metadata_url return json
  167. Returns:
  168. dict: the result of walking the metadata tree
  169. """
  170. metadata = dict()
  171. for line in query_metadata(metadata_url, headers, expect_json):
  172. if line.endswith('/') and not line == 'public-keys/':
  173. key = line[:-1]
  174. metadata[key] = walk_metadata(metadata_url + line,
  175. headers, expect_json)
  176. else:
  177. results = query_metadata(metadata_url + line, headers,
  178. expect_json)
  179. if len(results) == 1:
  180. # disable pylint maybe-no-member because overloaded use of
  181. # the module name causes pylint to not detect that results
  182. # is an array or hash
  183. # pylint: disable=maybe-no-member
  184. metadata[line] = results.pop()
  185. else:
  186. metadata[line] = results
  187. return metadata
  188. def get_provider_metadata(metadata_url, supports_recursive=False,
  189. headers=None, expect_json=False):
  190. """ Retrieve the provider metadata
  191. Args:
  192. metadata_url (str): metadata url
  193. supports_recursive (bool): does the provider metadata api support
  194. recursion
  195. headers (dict): headers to set for metadata request
  196. expect_json (bool): does the metadata_url return json
  197. Returns:
  198. dict: the provider metadata
  199. """
  200. try:
  201. if supports_recursive:
  202. metadata = query_metadata(metadata_url, headers,
  203. expect_json)
  204. else:
  205. metadata = walk_metadata(metadata_url, headers,
  206. expect_json)
  207. except OpenShiftFactsMetadataUnavailableError:
  208. metadata = None
  209. return metadata
  210. def normalize_gce_facts(metadata, facts):
  211. """ Normalize gce facts
  212. Args:
  213. metadata (dict): provider metadata
  214. facts (dict): facts to update
  215. Returns:
  216. dict: the result of adding the normalized metadata to the provided
  217. facts dict
  218. """
  219. for interface in metadata['instance']['networkInterfaces']:
  220. int_info = dict(ips=[interface['ip']], network_type='gce')
  221. int_info['public_ips'] = [ac['externalIp'] for ac
  222. in interface['accessConfigs']]
  223. int_info['public_ips'].extend(interface['forwardedIps'])
  224. _, _, network_id = interface['network'].rpartition('/')
  225. int_info['network_id'] = network_id
  226. facts['network']['interfaces'].append(int_info)
  227. _, _, zone = metadata['instance']['zone'].rpartition('/')
  228. facts['zone'] = zone
  229. # GCE currently only supports a single interface
  230. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  231. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  232. facts['network']['public_ip'] = pub_ip
  233. # Split instance hostname from GCE metadata to use the short instance name
  234. facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
  235. # TODO: attempt to resolve public_hostname
  236. facts['network']['public_hostname'] = facts['network']['public_ip']
  237. return facts
  238. def normalize_aws_facts(metadata, facts):
  239. """ Normalize aws facts
  240. Args:
  241. metadata (dict): provider metadata
  242. facts (dict): facts to update
  243. Returns:
  244. dict: the result of adding the normalized metadata to the provided
  245. facts dict
  246. """
  247. for interface in sorted(
  248. metadata['network']['interfaces']['macs'].values(),
  249. key=lambda x: x['device-number']
  250. ):
  251. int_info = dict()
  252. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  253. for ips_var, int_var in iteritems(var_map):
  254. ips = interface.get(int_var)
  255. if isinstance(ips, string_types):
  256. int_info[ips_var] = [ips]
  257. else:
  258. int_info[ips_var] = ips
  259. if 'vpc-id' in interface:
  260. int_info['network_type'] = 'vpc'
  261. else:
  262. int_info['network_type'] = 'classic'
  263. if int_info['network_type'] == 'vpc':
  264. int_info['network_id'] = interface['subnet-id']
  265. else:
  266. int_info['network_id'] = None
  267. facts['network']['interfaces'].append(int_info)
  268. facts['zone'] = metadata['placement']['availability-zone']
  269. # TODO: actually attempt to determine default local and public ips
  270. # by using the ansible default ip fact and the ipv4-associations
  271. # from the ec2 metadata
  272. facts['network']['ip'] = metadata.get('local-ipv4')
  273. facts['network']['public_ip'] = metadata.get('public-ipv4')
  274. # TODO: verify that local hostname makes sense and is resolvable
  275. facts['network']['hostname'] = metadata.get('local-hostname')
  276. # TODO: verify that public hostname makes sense and is resolvable
  277. facts['network']['public_hostname'] = metadata.get('public-hostname')
  278. return facts
  279. def normalize_openstack_facts(metadata, facts):
  280. """ Normalize openstack facts
  281. Args:
  282. metadata (dict): provider metadata
  283. facts (dict): facts to update
  284. Returns:
  285. dict: the result of adding the normalized metadata to the provided
  286. facts dict
  287. """
  288. # openstack ec2 compat api does not support network interfaces and
  289. # the version tested on did not include the info in the openstack
  290. # metadata api, should be updated if neutron exposes this.
  291. facts['zone'] = metadata['availability_zone']
  292. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  293. facts['network']['ip'] = local_ipv4
  294. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  295. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  296. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  297. try:
  298. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  299. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  300. else:
  301. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  302. except socket.gaierror:
  303. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  304. return facts
  305. def normalize_provider_facts(provider, metadata):
  306. """ Normalize provider facts
  307. Args:
  308. provider (str): host provider
  309. metadata (dict): provider metadata
  310. Returns:
  311. dict: the normalized provider facts
  312. """
  313. if provider is None or metadata is None:
  314. return {}
  315. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  316. # and configure ipv6 facts if available
  317. # TODO: add support for setting user_data if available
  318. facts = dict(name=provider, metadata=metadata,
  319. network=dict(interfaces=[], ipv6_enabled=False))
  320. if provider == 'gce':
  321. facts = normalize_gce_facts(metadata, facts)
  322. elif provider == 'aws':
  323. facts = normalize_aws_facts(metadata, facts)
  324. elif provider == 'openstack':
  325. facts = normalize_openstack_facts(metadata, facts)
  326. return facts
  327. def set_identity_providers_if_unset(facts):
  328. """ Set identity_providers fact if not already present in facts dict
  329. Args:
  330. facts (dict): existing facts
  331. Returns:
  332. dict: the facts dict updated with the generated identity providers
  333. facts if they were not already present
  334. """
  335. if 'master' in facts:
  336. deployment_type = facts['common']['deployment_type']
  337. if 'identity_providers' not in facts['master']:
  338. identity_provider = dict(
  339. name='allow_all', challenge=True, login=True,
  340. kind='AllowAllPasswordIdentityProvider'
  341. )
  342. if deployment_type == 'openshift-enterprise':
  343. identity_provider = dict(
  344. name='deny_all', challenge=True, login=True,
  345. kind='DenyAllPasswordIdentityProvider'
  346. )
  347. facts['master']['identity_providers'] = [identity_provider]
  348. return facts
  349. def set_url_facts_if_unset(facts):
  350. """ Set url facts if not already present in facts dict
  351. Args:
  352. facts (dict): existing facts
  353. Returns:
  354. dict: the facts dict updated with the generated url facts if they
  355. were not already present
  356. """
  357. if 'master' in facts:
  358. hostname = facts['common']['hostname']
  359. cluster_hostname = facts['master'].get('cluster_hostname')
  360. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  361. public_hostname = facts['common']['public_hostname']
  362. api_hostname = cluster_hostname if cluster_hostname else hostname
  363. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  364. console_path = facts['master']['console_path']
  365. etcd_hosts = facts['master']['etcd_hosts']
  366. use_ssl = dict(
  367. api=facts['master']['api_use_ssl'],
  368. public_api=facts['master']['api_use_ssl'],
  369. loopback_api=facts['master']['api_use_ssl'],
  370. console=facts['master']['console_use_ssl'],
  371. public_console=facts['master']['console_use_ssl'],
  372. etcd=facts['master']['etcd_use_ssl']
  373. )
  374. ports = dict(
  375. api=facts['master']['api_port'],
  376. public_api=facts['master']['api_port'],
  377. loopback_api=facts['master']['api_port'],
  378. console=facts['master']['console_port'],
  379. public_console=facts['master']['console_port'],
  380. etcd=facts['master']['etcd_port'],
  381. )
  382. etcd_urls = []
  383. if etcd_hosts != '':
  384. facts['master']['etcd_port'] = ports['etcd']
  385. for host in etcd_hosts:
  386. etcd_urls.append(format_url(use_ssl['etcd'], host,
  387. ports['etcd']))
  388. else:
  389. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  390. ports['etcd'])]
  391. facts['master'].setdefault('etcd_urls', etcd_urls)
  392. prefix_hosts = [('api', api_hostname),
  393. ('public_api', api_public_hostname),
  394. ('loopback_api', hostname)]
  395. for prefix, host in prefix_hosts:
  396. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  397. host,
  398. ports[prefix]))
  399. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  400. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  401. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  402. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  403. facts['master'].setdefault('loopback_user', r_lhu)
  404. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  405. for prefix, host in prefix_hosts:
  406. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  407. host,
  408. ports[prefix],
  409. console_path))
  410. return facts
  411. def set_aggregate_facts(facts):
  412. """ Set aggregate facts
  413. Args:
  414. facts (dict): existing facts
  415. Returns:
  416. dict: the facts dict updated with aggregated facts
  417. """
  418. all_hostnames = set()
  419. internal_hostnames = set()
  420. kube_svc_ip = first_ip(facts['common']['portal_net'])
  421. if 'common' in facts:
  422. all_hostnames.add(facts['common']['hostname'])
  423. all_hostnames.add(facts['common']['public_hostname'])
  424. all_hostnames.add(facts['common']['ip'])
  425. all_hostnames.add(facts['common']['public_ip'])
  426. facts['common']['kube_svc_ip'] = kube_svc_ip
  427. internal_hostnames.add(facts['common']['hostname'])
  428. internal_hostnames.add(facts['common']['ip'])
  429. cluster_domain = facts['common']['dns_domain']
  430. if 'master' in facts:
  431. if 'cluster_hostname' in facts['master']:
  432. all_hostnames.add(facts['master']['cluster_hostname'])
  433. if 'cluster_public_hostname' in facts['master']:
  434. all_hostnames.add(facts['master']['cluster_public_hostname'])
  435. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  436. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  437. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  438. all_hostnames.update(svc_names)
  439. internal_hostnames.update(svc_names)
  440. all_hostnames.add(kube_svc_ip)
  441. internal_hostnames.add(kube_svc_ip)
  442. facts['common']['all_hostnames'] = list(all_hostnames)
  443. facts['common']['internal_hostnames'] = list(internal_hostnames)
  444. return facts
  445. def set_deployment_facts_if_unset(facts):
  446. """ Set Facts that vary based on deployment_type. This currently
  447. includes master.registry_url, node.registry_url,
  448. node.storage_plugin_deps
  449. Args:
  450. facts (dict): existing facts
  451. Returns:
  452. dict: the facts dict updated with the generated deployment_type
  453. facts
  454. """
  455. # disabled to avoid breaking up facts related to deployment type into
  456. # multiple methods for now.
  457. # pylint: disable=too-many-statements, too-many-branches
  458. for role in ('master', 'node'):
  459. if role in facts:
  460. deployment_type = facts['common']['deployment_type']
  461. if 'registry_url' not in facts[role]:
  462. registry_url = 'openshift/origin-${component}:${version}'
  463. if deployment_type == 'openshift-enterprise':
  464. registry_url = 'openshift3/ose-${component}:${version}'
  465. facts[role]['registry_url'] = registry_url
  466. if 'master' in facts:
  467. deployment_type = facts['common']['deployment_type']
  468. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  469. if 'disabled_features' not in facts['master']:
  470. if facts['common']['deployment_subtype'] == 'registry':
  471. facts['master']['disabled_features'] = openshift_features
  472. if 'node' in facts:
  473. deployment_type = facts['common']['deployment_type']
  474. if 'storage_plugin_deps' not in facts['node']:
  475. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  476. return facts
  477. # pylint: disable=too-many-statements
  478. def set_version_facts_if_unset(facts):
  479. """ Set version facts. This currently includes common.version and
  480. common.version_gte_3_x
  481. Args:
  482. facts (dict): existing facts
  483. Returns:
  484. dict: the facts dict updated with version facts.
  485. """
  486. if 'common' in facts:
  487. openshift_version = get_openshift_version(facts)
  488. if openshift_version and openshift_version != "latest":
  489. version = LooseVersion(openshift_version)
  490. facts['common']['version'] = openshift_version
  491. facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
  492. version_gte_3_6 = version >= LooseVersion('3.6')
  493. version_gte_3_7 = version >= LooseVersion('3.7')
  494. version_gte_3_8 = version >= LooseVersion('3.8')
  495. version_gte_3_9 = version >= LooseVersion('3.9')
  496. else:
  497. # 'Latest' version is set to True, 'Next' versions set to False
  498. version_gte_3_6 = True
  499. version_gte_3_7 = True
  500. version_gte_3_8 = False
  501. version_gte_3_9 = False
  502. facts['common']['version_gte_3_6'] = version_gte_3_6
  503. facts['common']['version_gte_3_7'] = version_gte_3_7
  504. facts['common']['version_gte_3_8'] = version_gte_3_8
  505. facts['common']['version_gte_3_9'] = version_gte_3_9
  506. if version_gte_3_9:
  507. examples_content_version = 'v3.9'
  508. elif version_gte_3_8:
  509. examples_content_version = 'v3.8'
  510. elif version_gte_3_7:
  511. examples_content_version = 'v3.7'
  512. elif version_gte_3_6:
  513. examples_content_version = 'v3.6'
  514. else:
  515. examples_content_version = 'v1.5'
  516. facts['common']['examples_content_version'] = examples_content_version
  517. return facts
  518. def set_sdn_facts_if_unset(facts, system_facts):
  519. """ Set sdn facts if not already present in facts dict
  520. Args:
  521. facts (dict): existing facts
  522. system_facts (dict): ansible_facts
  523. Returns:
  524. dict: the facts dict updated with the generated sdn facts if they
  525. were not already present
  526. """
  527. if 'master' in facts:
  528. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  529. # these might be overridden if they exist in the master config file
  530. sdn_cluster_network_cidr = '10.128.0.0/14'
  531. sdn_host_subnet_length = '9'
  532. master_cfg_path = os.path.join(facts['common']['config_base'],
  533. 'master/master-config.yaml')
  534. if os.path.isfile(master_cfg_path):
  535. with open(master_cfg_path, 'r') as master_cfg_f:
  536. config = yaml.safe_load(master_cfg_f.read())
  537. if 'networkConfig' in config:
  538. if 'clusterNetworkCIDR' in config['networkConfig']:
  539. sdn_cluster_network_cidr = \
  540. config['networkConfig']['clusterNetworkCIDR']
  541. if 'hostSubnetLength' in config['networkConfig']:
  542. sdn_host_subnet_length = \
  543. config['networkConfig']['hostSubnetLength']
  544. if 'sdn_cluster_network_cidr' not in facts['master']:
  545. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  546. if 'sdn_host_subnet_length' not in facts['master']:
  547. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  548. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  549. node_ip = facts['common']['ip']
  550. # default MTU if interface MTU cannot be detected
  551. facts['node']['sdn_mtu'] = '1450'
  552. for val in itervalues(system_facts):
  553. if isinstance(val, dict) and 'mtu' in val:
  554. mtu = val['mtu']
  555. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  556. facts['node']['sdn_mtu'] = str(mtu - 50)
  557. return facts
  558. def set_nodename(facts):
  559. """ set nodename """
  560. if 'node' in facts and 'common' in facts:
  561. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
  562. facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
  563. # TODO: The openstack cloudprovider nodename setting was too opinionaed.
  564. # It needs to be generalized before it can be enabled again.
  565. # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  566. # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  567. else:
  568. facts['node']['nodename'] = facts['common']['hostname'].lower()
  569. return facts
  570. def migrate_oauth_template_facts(facts):
  571. """
  572. Migrate an old oauth template fact to a newer format if it's present.
  573. The legacy 'oauth_template' fact was just a filename, and assumed you were
  574. setting the 'login' template.
  575. The new pluralized 'oauth_templates' fact is a dict mapping the template
  576. name to a filename.
  577. Simplify the code after this by merging the old fact into the new.
  578. """
  579. if 'master' in facts and 'oauth_template' in facts['master']:
  580. if 'oauth_templates' not in facts['master']:
  581. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  582. elif 'login' not in facts['master']['oauth_templates']:
  583. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  584. return facts
  585. def format_url(use_ssl, hostname, port, path=''):
  586. """ Format url based on ssl flag, hostname, port and path
  587. Args:
  588. use_ssl (bool): is ssl enabled
  589. hostname (str): hostname
  590. port (str): port
  591. path (str): url path
  592. Returns:
  593. str: The generated url string
  594. """
  595. scheme = 'https' if use_ssl else 'http'
  596. netloc = hostname
  597. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  598. netloc += ":%s" % port
  599. try:
  600. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  601. except AttributeError:
  602. # pylint: disable=undefined-variable
  603. url = urlunparse((scheme, netloc, path, '', '', ''))
  604. return url
  605. def get_current_config(facts):
  606. """ Get current openshift config
  607. Args:
  608. facts (dict): existing facts
  609. Returns:
  610. dict: the facts dict updated with the current openshift config
  611. """
  612. current_config = dict()
  613. roles = [role for role in facts if role not in ['common', 'provider']]
  614. for role in roles:
  615. if 'roles' in current_config:
  616. current_config['roles'].append(role)
  617. else:
  618. current_config['roles'] = [role]
  619. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  620. # determine the location of files.
  621. # TODO: I suspect this isn't working right now, but it doesn't prevent
  622. # anything from working properly as far as I can tell, perhaps because
  623. # we override the kubeconfig path everywhere we use it?
  624. # Query kubeconfig settings
  625. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  626. if role == 'node':
  627. kubeconfig_dir = os.path.join(
  628. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  629. )
  630. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  631. if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
  632. try:
  633. _, output, _ = module.run_command( # noqa: F405
  634. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  635. "json", "--kubeconfig=%s" % kubeconfig_path],
  636. check_rc=False
  637. )
  638. config = json.loads(output)
  639. cad = 'certificate-authority-data'
  640. try:
  641. for cluster in config['clusters']:
  642. config['clusters'][cluster][cad] = 'masked'
  643. except KeyError:
  644. pass
  645. try:
  646. for user in config['users']:
  647. config['users'][user][cad] = 'masked'
  648. config['users'][user]['client-key-data'] = 'masked'
  649. except KeyError:
  650. pass
  651. current_config['kubeconfig'] = config
  652. # override pylint broad-except warning, since we do not want
  653. # to bubble up any exceptions if oc config view
  654. # fails
  655. # pylint: disable=broad-except
  656. except Exception:
  657. pass
  658. return current_config
  659. def build_kubelet_args(facts):
  660. """Build node kubelet_args
  661. In the node-config.yaml file, kubeletArgument sub-keys have their
  662. values provided as a list. Hence the gratuitous use of ['foo'] below.
  663. """
  664. cloud_cfg_path = os.path.join(
  665. facts['common']['config_base'],
  666. 'cloudprovider')
  667. # We only have to do this stuff on hosts that are nodes
  668. if 'node' in facts:
  669. # Any changes to the kubeletArguments parameter are stored
  670. # here first.
  671. kubelet_args = {}
  672. if 'cloudprovider' in facts:
  673. # EVERY cloud is special <3
  674. if 'kind' in facts['cloudprovider']:
  675. if facts['cloudprovider']['kind'] == 'aws':
  676. kubelet_args['cloud-provider'] = ['aws']
  677. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  678. if facts['cloudprovider']['kind'] == 'openstack':
  679. kubelet_args['cloud-provider'] = ['openstack']
  680. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  681. if facts['cloudprovider']['kind'] == 'gce':
  682. kubelet_args['cloud-provider'] = ['gce']
  683. kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  684. # Automatically add node-labels to the kubeletArguments
  685. # parameter. See BZ1359848 for additional details.
  686. #
  687. # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
  688. if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
  689. # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
  690. # into ['foo=bar', 'a=b']
  691. #
  692. # On the openshift_node_labels inventory variable we loop
  693. # over each key-value tuple (from .items()) and join the
  694. # key to the value with an '=' character, this produces a
  695. # list.
  696. #
  697. # map() seems to be returning an itertools.imap object
  698. # instead of a list. We cast it to a list ourselves.
  699. # pylint: disable=unnecessary-lambda
  700. labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
  701. if labels_str != '':
  702. kubelet_args['node-labels'] = labels_str
  703. # If we've added items to the kubelet_args dict then we need
  704. # to merge the new items back into the main facts object.
  705. if kubelet_args != {}:
  706. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [])
  707. return facts
  708. def build_controller_args(facts):
  709. """ Build master controller_args """
  710. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  711. 'cloudprovider')
  712. if 'master' in facts:
  713. controller_args = {}
  714. if 'cloudprovider' in facts:
  715. if 'kind' in facts['cloudprovider']:
  716. if facts['cloudprovider']['kind'] == 'aws':
  717. controller_args['cloud-provider'] = ['aws']
  718. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  719. if facts['cloudprovider']['kind'] == 'openstack':
  720. controller_args['cloud-provider'] = ['openstack']
  721. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  722. if facts['cloudprovider']['kind'] == 'gce':
  723. controller_args['cloud-provider'] = ['gce']
  724. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  725. if controller_args != {}:
  726. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
  727. return facts
  728. def build_api_server_args(facts):
  729. """ Build master api_server_args """
  730. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  731. 'cloudprovider')
  732. if 'master' in facts:
  733. api_server_args = {}
  734. if 'cloudprovider' in facts:
  735. if 'kind' in facts['cloudprovider']:
  736. if facts['cloudprovider']['kind'] == 'aws':
  737. api_server_args['cloud-provider'] = ['aws']
  738. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  739. if facts['cloudprovider']['kind'] == 'openstack':
  740. api_server_args['cloud-provider'] = ['openstack']
  741. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  742. if facts['cloudprovider']['kind'] == 'gce':
  743. api_server_args['cloud-provider'] = ['gce']
  744. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  745. if api_server_args != {}:
  746. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
  747. return facts
  748. def is_service_running(service):
  749. """ Queries systemd through dbus to see if the service is running """
  750. service_running = False
  751. try:
  752. bus = SystemBus()
  753. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  754. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  755. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  756. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  757. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  758. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  759. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  760. if service_load_state == 'loaded' and service_active_state == 'active':
  761. service_running = True
  762. except DBusException:
  763. # TODO: do not swallow exception, as it may be hiding useful debugging
  764. # information.
  765. pass
  766. return service_running
  767. def rpm_rebuilddb():
  768. """
  769. Runs rpm --rebuilddb to ensure the db is in good shape.
  770. """
  771. module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
  772. def get_version_output(binary, version_cmd):
  773. """ runs and returns the version output for a command """
  774. cmd = []
  775. for item in (binary, version_cmd):
  776. if isinstance(item, list):
  777. cmd.extend(item)
  778. else:
  779. cmd.append(item)
  780. if os.path.isfile(cmd[0]):
  781. _, output, _ = module.run_command(cmd) # noqa: F405
  782. return output
  783. # We may need this in the future.
  784. def get_docker_version_info():
  785. """ Parses and returns the docker version info """
  786. result = None
  787. if is_service_running('docker') or is_service_running('container-engine'):
  788. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  789. if 'Server' in version_info:
  790. result = {
  791. 'api_version': version_info['Server']['API version'],
  792. 'version': version_info['Server']['Version']
  793. }
  794. return result
  795. def get_openshift_version(facts):
  796. """ Get current version of openshift on the host.
  797. Checks a variety of ways ranging from fastest to slowest.
  798. Args:
  799. facts (dict): existing facts
  800. optional cli_image for pulling the version number
  801. Returns:
  802. version: the current openshift version
  803. """
  804. version = None
  805. # No need to run this method repeatedly on a system if we already know the
  806. # version
  807. # TODO: We need a way to force reload this after upgrading bits.
  808. if 'common' in facts:
  809. if 'version' in facts['common'] and facts['common']['version'] is not None:
  810. return chomp_commit_offset(facts['common']['version'])
  811. if os.path.isfile('/usr/bin/openshift'):
  812. _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
  813. version = parse_openshift_version(output)
  814. elif 'common' in facts and 'is_containerized' in facts['common']:
  815. version = get_container_openshift_version(facts)
  816. # Handle containerized masters that have not yet been configured as a node.
  817. # This can be very slow and may get re-run multiple times, so we only use this
  818. # if other methods failed to find a version.
  819. if not version and os.path.isfile('/usr/local/bin/openshift'):
  820. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
  821. version = parse_openshift_version(output)
  822. return chomp_commit_offset(version)
  823. def chomp_commit_offset(version):
  824. """Chomp any "+git.foo" commit offset string from the given `version`
  825. and return the modified version string.
  826. Ex:
  827. - chomp_commit_offset(None) => None
  828. - chomp_commit_offset(1337) => "1337"
  829. - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
  830. - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
  831. - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
  832. """
  833. if version is None:
  834. return version
  835. else:
  836. # Stringify, just in case it's a Number type. Split by '+' and
  837. # return the first split. No concerns about strings without a
  838. # '+', .split() returns an array of the original string.
  839. return str(version).split('+')[0]
  840. def get_container_openshift_version(facts):
  841. """
  842. If containerized, see if we can determine the installed version via the
  843. systemd environment files.
  844. """
  845. deployment_type = facts['common']['deployment_type']
  846. service_type_dict = {'origin': 'origin',
  847. 'openshift-enterprise': 'atomic-openshift'}
  848. service_type = service_type_dict[deployment_type]
  849. for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
  850. env_path = filename % service_type
  851. if not os.path.exists(env_path):
  852. continue
  853. with open(env_path) as env_file:
  854. for line in env_file:
  855. if line.startswith("IMAGE_VERSION="):
  856. tag = line[len("IMAGE_VERSION="):].strip()
  857. # Remove leading "v" and any trailing release info, we just want
  858. # a version number here:
  859. no_v_version = tag[1:] if tag[0] == 'v' else tag
  860. version = no_v_version.split("-")[0]
  861. return version
  862. return None
  863. def parse_openshift_version(output):
  864. """ Apply provider facts to supplied facts dict
  865. Args:
  866. string: output of 'openshift version'
  867. Returns:
  868. string: the version number
  869. """
  870. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  871. ver = versions.get('openshift', '')
  872. # Remove trailing build number and commit hash from older versions, we need to return a straight
  873. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  874. ver = ver.split('-')[0]
  875. return ver
  876. def apply_provider_facts(facts, provider_facts):
  877. """ Apply provider facts to supplied facts dict
  878. Args:
  879. facts (dict): facts dict to update
  880. provider_facts (dict): provider facts to apply
  881. roles: host roles
  882. Returns:
  883. dict: the merged facts
  884. """
  885. if not provider_facts:
  886. return facts
  887. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  888. for h_var, ip_var in common_vars:
  889. ip_value = provider_facts['network'].get(ip_var)
  890. if ip_value:
  891. facts['common'][ip_var] = ip_value
  892. facts['common'][h_var] = choose_hostname(
  893. [provider_facts['network'].get(h_var)],
  894. facts['common'][h_var]
  895. )
  896. facts['provider'] = provider_facts
  897. return facts
  898. # Disabling pylint too many branches. This function needs refactored
  899. # but is a very core part of openshift_facts.
  900. # pylint: disable=too-many-branches, too-many-nested-blocks
  901. def merge_facts(orig, new, additive_facts_to_overwrite):
  902. """ Recursively merge facts dicts
  903. Args:
  904. orig (dict): existing facts
  905. new (dict): facts to update
  906. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  907. '.' notation ex: ['master.named_certificates']
  908. Returns:
  909. dict: the merged facts
  910. """
  911. additive_facts = ['named_certificates']
  912. # Facts we do not ever want to merge. These originate in inventory variables
  913. # and contain JSON dicts. We don't ever want to trigger a merge
  914. # here, just completely overwrite with the new if they are present there.
  915. inventory_json_facts = ['admission_plugin_config',
  916. 'kube_admission_plugin_config',
  917. 'image_policy_config',
  918. "builddefaults",
  919. "buildoverrides"]
  920. facts = dict()
  921. for key, value in iteritems(orig):
  922. # Key exists in both old and new facts.
  923. if key in new:
  924. if key in inventory_json_facts:
  925. # Watchout for JSON facts that sometimes load as strings.
  926. # (can happen if the JSON contains a boolean)
  927. if isinstance(new[key], string_types):
  928. facts[key] = yaml.safe_load(new[key])
  929. else:
  930. facts[key] = copy.deepcopy(new[key])
  931. # Continue to recurse if old and new fact is a dictionary.
  932. elif isinstance(value, dict) and isinstance(new[key], dict):
  933. # Collect the subset of additive facts to overwrite if
  934. # key matches. These will be passed to the subsequent
  935. # merge_facts call.
  936. relevant_additive_facts = []
  937. for item in additive_facts_to_overwrite:
  938. if '.' in item and item.startswith(key + '.'):
  939. relevant_additive_facts.append(item)
  940. facts[key] = merge_facts(value, new[key], relevant_additive_facts)
  941. # Key matches an additive fact and we are not overwriting
  942. # it so we will append the new value to the existing value.
  943. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  944. if isinstance(value, list) and isinstance(new[key], list):
  945. new_fact = []
  946. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  947. if item not in new_fact:
  948. new_fact.append(item)
  949. facts[key] = new_fact
  950. # No other condition has been met. Overwrite the old fact
  951. # with the new value.
  952. else:
  953. facts[key] = copy.deepcopy(new[key])
  954. # Key isn't in new so add it to facts to keep it.
  955. else:
  956. facts[key] = copy.deepcopy(value)
  957. new_keys = set(new.keys()) - set(orig.keys())
  958. for key in new_keys:
  959. # Watchout for JSON facts that sometimes load as strings.
  960. # (can happen if the JSON contains a boolean)
  961. if key in inventory_json_facts and isinstance(new[key], string_types):
  962. facts[key] = yaml.safe_load(new[key])
  963. else:
  964. facts[key] = copy.deepcopy(new[key])
  965. return facts
  966. def save_local_facts(filename, facts):
  967. """ Save local facts
  968. Args:
  969. filename (str): local facts file
  970. facts (dict): facts to set
  971. """
  972. try:
  973. fact_dir = os.path.dirname(filename)
  974. try:
  975. os.makedirs(fact_dir) # try to make the directory
  976. except OSError as exception:
  977. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  978. raise # pass any other exceptions up the chain
  979. with open(filename, 'w') as fact_file:
  980. fact_file.write(module.jsonify(facts)) # noqa: F405
  981. os.chmod(filename, 0o600)
  982. except (IOError, OSError) as ex:
  983. raise OpenShiftFactsFileWriteError(
  984. "Could not create fact file: %s, error: %s" % (filename, ex)
  985. )
  986. def get_local_facts_from_file(filename):
  987. """ Retrieve local facts from fact file
  988. Args:
  989. filename (str): local facts file
  990. Returns:
  991. dict: the retrieved facts
  992. """
  993. local_facts = dict()
  994. try:
  995. # Handle conversion of INI style facts file to json style
  996. ini_facts = configparser.SafeConfigParser()
  997. ini_facts.read(filename)
  998. for section in ini_facts.sections():
  999. local_facts[section] = dict()
  1000. for key, value in ini_facts.items(section):
  1001. local_facts[section][key] = value
  1002. except (configparser.MissingSectionHeaderError,
  1003. configparser.ParsingError):
  1004. try:
  1005. with open(filename, 'r') as facts_file:
  1006. local_facts = json.load(facts_file)
  1007. except (ValueError, IOError):
  1008. pass
  1009. return local_facts
  1010. def sort_unique(alist):
  1011. """ Sorts and de-dupes a list
  1012. Args:
  1013. list: a list
  1014. Returns:
  1015. list: a sorted de-duped list
  1016. """
  1017. return sorted(list(set(alist)))
  1018. def safe_get_bool(fact):
  1019. """ Get a boolean fact safely.
  1020. Args:
  1021. facts: fact to convert
  1022. Returns:
  1023. bool: given fact as a bool
  1024. """
  1025. return bool(strtobool(str(fact)))
  1026. def set_proxy_facts(facts):
  1027. """ Set global proxy facts
  1028. Args:
  1029. facts(dict): existing facts
  1030. Returns:
  1031. facts(dict): Updated facts with missing values
  1032. """
  1033. if 'common' in facts:
  1034. common = facts['common']
  1035. if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
  1036. if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
  1037. common['no_proxy'] = common['no_proxy'].split(",")
  1038. elif 'no_proxy' not in common:
  1039. common['no_proxy'] = []
  1040. # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
  1041. # masters behind a proxy need to connect to etcd via IP
  1042. if 'no_proxy_etcd_host_ips' in common:
  1043. if isinstance(common['no_proxy_etcd_host_ips'], string_types):
  1044. common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
  1045. if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
  1046. if 'no_proxy_internal_hostnames' in common:
  1047. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  1048. # We always add local dns domain and ourselves no matter what
  1049. common['no_proxy'].append('.' + common['dns_domain'])
  1050. common['no_proxy'].append('.svc')
  1051. common['no_proxy'].append(common['hostname'])
  1052. common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
  1053. facts['common'] = common
  1054. return facts
  1055. def set_builddefaults_facts(facts):
  1056. """ Set build defaults including setting proxy values from http_proxy, https_proxy,
  1057. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1058. 1. http_proxy, https_proxy, no_proxy
  1059. 2. builddefaults_*
  1060. 3. builddefaults_git_*
  1061. Args:
  1062. facts(dict): existing facts
  1063. Returns:
  1064. facts(dict): Updated facts with missing values
  1065. """
  1066. if 'builddefaults' in facts:
  1067. builddefaults = facts['builddefaults']
  1068. common = facts['common']
  1069. # Copy values from common to builddefaults
  1070. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1071. builddefaults['http_proxy'] = common['http_proxy']
  1072. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1073. builddefaults['https_proxy'] = common['https_proxy']
  1074. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1075. builddefaults['no_proxy'] = common['no_proxy']
  1076. # Create git specific facts from generic values, if git specific values are
  1077. # not defined.
  1078. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1079. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1080. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1081. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1082. if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
  1083. builddefaults['git_no_proxy'] = builddefaults['no_proxy']
  1084. # If we're actually defining a builddefaults config then create admission_plugin_config
  1085. # then merge builddefaults[config] structure into admission_plugin_config
  1086. # 'config' is the 'openshift_builddefaults_json' inventory variable
  1087. if 'config' in builddefaults:
  1088. if 'admission_plugin_config' not in facts['master']:
  1089. # Scaffold out the full expected datastructure
  1090. facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
  1091. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  1092. if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
  1093. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
  1094. return facts
  1095. def delete_empty_keys(keylist):
  1096. """ Delete dictionary elements from keylist where "value" is empty.
  1097. Args:
  1098. keylist(list): A list of builddefault configuration envs.
  1099. Returns:
  1100. none
  1101. Example:
  1102. keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1103. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1104. {'name': 'NO_PROXY', 'value': ''}]
  1105. After calling delete_empty_keys the provided list is modified to become:
  1106. [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1107. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
  1108. """
  1109. count = 0
  1110. for i in range(0, len(keylist)):
  1111. if len(keylist[i - count]['value']) == 0:
  1112. del keylist[i - count]
  1113. count += 1
  1114. def set_buildoverrides_facts(facts):
  1115. """ Set build overrides
  1116. Args:
  1117. facts(dict): existing facts
  1118. Returns:
  1119. facts(dict): Updated facts with missing values
  1120. """
  1121. if 'buildoverrides' in facts:
  1122. buildoverrides = facts['buildoverrides']
  1123. # If we're actually defining a buildoverrides config then create admission_plugin_config
  1124. # then merge buildoverrides[config] structure into admission_plugin_config
  1125. if 'config' in buildoverrides:
  1126. if 'admission_plugin_config' not in facts['master']:
  1127. facts['master']['admission_plugin_config'] = dict()
  1128. facts['master']['admission_plugin_config'].update(buildoverrides['config'])
  1129. return facts
  1130. # pylint: disable=too-many-statements
  1131. def set_container_facts_if_unset(facts):
  1132. """ Set containerized facts.
  1133. Args:
  1134. facts (dict): existing facts
  1135. Returns:
  1136. dict: the facts dict updated with the generated containerization
  1137. facts
  1138. """
  1139. deployment_type = facts['common']['deployment_type']
  1140. if deployment_type == 'openshift-enterprise':
  1141. master_image = 'openshift3/ose'
  1142. node_image = 'openshift3/node'
  1143. ovs_image = 'openshift3/openvswitch'
  1144. pod_image = 'openshift3/ose-pod'
  1145. router_image = 'openshift3/ose-haproxy-router'
  1146. registry_image = 'openshift3/ose-docker-registry'
  1147. deployer_image = 'openshift3/ose-deployer'
  1148. else:
  1149. master_image = 'openshift/origin'
  1150. node_image = 'openshift/node'
  1151. ovs_image = 'openshift/openvswitch'
  1152. pod_image = 'openshift/origin-pod'
  1153. router_image = 'openshift/origin-haproxy-router'
  1154. registry_image = 'openshift/origin-docker-registry'
  1155. deployer_image = 'openshift/origin-deployer'
  1156. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1157. if 'is_containerized' not in facts['common']:
  1158. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1159. if 'pod_image' not in facts['common']:
  1160. facts['common']['pod_image'] = pod_image
  1161. if 'router_image' not in facts['common']:
  1162. facts['common']['router_image'] = router_image
  1163. if 'registry_image' not in facts['common']:
  1164. facts['common']['registry_image'] = registry_image
  1165. if 'deployer_image' not in facts['common']:
  1166. facts['common']['deployer_image'] = deployer_image
  1167. if 'master' in facts and 'master_image' not in facts['master']:
  1168. facts['master']['master_image'] = master_image
  1169. facts['master']['master_system_image'] = master_image
  1170. if 'node' in facts:
  1171. if 'node_image' not in facts['node']:
  1172. facts['node']['node_image'] = node_image
  1173. facts['node']['node_system_image'] = node_image
  1174. if 'ovs_image' not in facts['node']:
  1175. facts['node']['ovs_image'] = ovs_image
  1176. facts['node']['ovs_system_image'] = ovs_image
  1177. if safe_get_bool(facts['common']['is_containerized']):
  1178. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1179. return facts
  1180. def set_installed_variant_rpm_facts(facts):
  1181. """ Set RPM facts of installed variant
  1182. Args:
  1183. facts (dict): existing facts
  1184. Returns:
  1185. dict: the facts dict updated with installed_variant_rpms
  1186. """
  1187. installed_rpms = []
  1188. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1189. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1190. variant_rpms = [base_rpm] + \
  1191. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1192. ['tuned-profiles-%s-node' % base_rpm]
  1193. for rpm in variant_rpms:
  1194. exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
  1195. if exit_code == 0:
  1196. installed_rpms.append(rpm)
  1197. facts['common']['installed_variant_rpms'] = installed_rpms
  1198. return facts
  1199. class OpenShiftFactsInternalError(Exception):
  1200. """Origin Facts Error"""
  1201. pass
  1202. class OpenShiftFactsUnsupportedRoleError(Exception):
  1203. """Origin Facts Unsupported Role Error"""
  1204. pass
  1205. class OpenShiftFactsFileWriteError(Exception):
  1206. """Origin Facts File Write Error"""
  1207. pass
  1208. class OpenShiftFactsMetadataUnavailableError(Exception):
  1209. """Origin Facts Metadata Unavailable Error"""
  1210. pass
  1211. class OpenShiftFacts(object):
  1212. """ Origin Facts
  1213. Attributes:
  1214. facts (dict): facts for the host
  1215. Args:
  1216. module (AnsibleModule): an AnsibleModule object
  1217. role (str): role for setting local facts
  1218. filename (str): local facts file to use
  1219. local_facts (dict): local facts to set
  1220. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1221. '.' notation ex: ['master.named_certificates']
  1222. Raises:
  1223. OpenShiftFactsUnsupportedRoleError:
  1224. """
  1225. known_roles = ['builddefaults',
  1226. 'buildoverrides',
  1227. 'cloudprovider',
  1228. 'common',
  1229. 'etcd',
  1230. 'master',
  1231. 'node']
  1232. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1233. # pylint: disable=too-many-arguments,no-value-for-parameter
  1234. def __init__(self, role, filename, local_facts,
  1235. additive_facts_to_overwrite=None):
  1236. self.changed = False
  1237. self.filename = filename
  1238. if role not in self.known_roles:
  1239. raise OpenShiftFactsUnsupportedRoleError(
  1240. "Role %s is not supported by this module" % role
  1241. )
  1242. self.role = role
  1243. # Collect system facts and preface each fact with 'ansible_'.
  1244. try:
  1245. # pylint: disable=too-many-function-args,invalid-name
  1246. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
  1247. additional_facts = {}
  1248. for (k, v) in self.system_facts.items():
  1249. additional_facts["ansible_%s" % k.replace('-', '_')] = v
  1250. self.system_facts.update(additional_facts)
  1251. except UnboundLocalError:
  1252. # ansible-2.2,2.3
  1253. self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
  1254. self.facts = self.generate_facts(local_facts,
  1255. additive_facts_to_overwrite)
  1256. def generate_facts(self,
  1257. local_facts,
  1258. additive_facts_to_overwrite):
  1259. """ Generate facts
  1260. Args:
  1261. local_facts (dict): local_facts for overriding generated defaults
  1262. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1263. '.' notation ex: ['master.named_certificates']
  1264. Returns:
  1265. dict: The generated facts
  1266. """
  1267. local_facts = self.init_local_facts(local_facts,
  1268. additive_facts_to_overwrite)
  1269. roles = local_facts.keys()
  1270. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1271. deployment_type = local_facts['common']['deployment_type']
  1272. else:
  1273. deployment_type = 'origin'
  1274. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1275. deployment_subtype = local_facts['common']['deployment_subtype']
  1276. else:
  1277. deployment_subtype = 'basic'
  1278. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1279. provider_facts = self.init_provider_facts()
  1280. facts = apply_provider_facts(defaults, provider_facts)
  1281. facts = merge_facts(facts,
  1282. local_facts,
  1283. additive_facts_to_overwrite)
  1284. facts = migrate_oauth_template_facts(facts)
  1285. facts['current_config'] = get_current_config(facts)
  1286. facts = set_url_facts_if_unset(facts)
  1287. facts = set_identity_providers_if_unset(facts)
  1288. facts = set_deployment_facts_if_unset(facts)
  1289. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1290. facts = set_container_facts_if_unset(facts)
  1291. facts = build_kubelet_args(facts)
  1292. facts = build_controller_args(facts)
  1293. facts = build_api_server_args(facts)
  1294. facts = set_version_facts_if_unset(facts)
  1295. facts = set_aggregate_facts(facts)
  1296. facts = set_proxy_facts(facts)
  1297. facts = set_builddefaults_facts(facts)
  1298. facts = set_buildoverrides_facts(facts)
  1299. if not safe_get_bool(facts['common']['is_containerized']):
  1300. facts = set_installed_variant_rpm_facts(facts)
  1301. facts = set_nodename(facts)
  1302. return dict(openshift=facts)
  1303. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1304. """ Get default fact values
  1305. Args:
  1306. roles (list): list of roles for this host
  1307. Returns:
  1308. dict: The generated default facts
  1309. """
  1310. defaults = {}
  1311. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1312. exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
  1313. hostname_f = output.strip() if exit_code == 0 else ''
  1314. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1315. self.system_facts['ansible_fqdn']]
  1316. hostname = choose_hostname(hostname_values, ip_addr).lower()
  1317. defaults['common'] = dict(ip=ip_addr,
  1318. public_ip=ip_addr,
  1319. deployment_type=deployment_type,
  1320. deployment_subtype=deployment_subtype,
  1321. hostname=hostname,
  1322. public_hostname=hostname,
  1323. portal_net='172.30.0.0/16',
  1324. client_binary='oc',
  1325. dns_domain='cluster.local',
  1326. config_base='/etc/origin')
  1327. if 'master' in roles:
  1328. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1329. controllers_port='8444',
  1330. console_use_ssl=True,
  1331. console_path='/console',
  1332. console_port='8443', etcd_use_ssl=True,
  1333. etcd_hosts='', etcd_port='4001',
  1334. portal_net='172.30.0.0/16',
  1335. embedded_kube=True,
  1336. embedded_dns=True,
  1337. bind_addr='0.0.0.0',
  1338. session_max_seconds=3600,
  1339. session_name='ssn',
  1340. session_secrets_file='',
  1341. access_token_max_seconds=86400,
  1342. auth_token_max_seconds=500,
  1343. oauth_grant_method='auto',
  1344. dynamic_provisioning_enabled=True,
  1345. max_requests_inflight=500)
  1346. if 'node' in roles:
  1347. defaults['node'] = dict(labels={}, annotations={},
  1348. iptables_sync_period='30s',
  1349. local_quota_per_fsgroup="",
  1350. set_node_ip=False)
  1351. if 'cloudprovider' in roles:
  1352. defaults['cloudprovider'] = dict(kind=None)
  1353. return defaults
  1354. def guess_host_provider(self):
  1355. """ Guess the host provider
  1356. Returns:
  1357. dict: The generated default facts for the detected provider
  1358. """
  1359. # TODO: cloud provider facts should probably be submitted upstream
  1360. product_name = self.system_facts['ansible_product_name']
  1361. product_version = self.system_facts['ansible_product_version']
  1362. virt_type = self.system_facts['ansible_virtualization_type']
  1363. virt_role = self.system_facts['ansible_virtualization_role']
  1364. bios_vendor = self.system_facts['ansible_system_vendor']
  1365. provider = None
  1366. metadata = None
  1367. if bios_vendor == 'Google':
  1368. provider = 'gce'
  1369. metadata_url = ('http://metadata.google.internal/'
  1370. 'computeMetadata/v1/?recursive=true')
  1371. headers = {'Metadata-Flavor': 'Google'}
  1372. metadata = get_provider_metadata(metadata_url, True, headers,
  1373. True)
  1374. # Filter sshKeys and serviceAccounts from gce metadata
  1375. if metadata:
  1376. metadata['project']['attributes'].pop('sshKeys', None)
  1377. metadata['instance'].pop('serviceAccounts', None)
  1378. elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
  1379. provider = 'aws'
  1380. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1381. metadata = get_provider_metadata(metadata_url)
  1382. elif re.search(r'OpenStack', product_name):
  1383. provider = 'openstack'
  1384. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1385. 'meta_data.json')
  1386. metadata = get_provider_metadata(metadata_url, True, None,
  1387. True)
  1388. if metadata:
  1389. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1390. metadata['ec2_compat'] = get_provider_metadata(
  1391. ec2_compat_url
  1392. )
  1393. # disable pylint maybe-no-member because overloaded use of
  1394. # the module name causes pylint to not detect that results
  1395. # is an array or hash
  1396. # pylint: disable=maybe-no-member
  1397. # Filter public_keys and random_seed from openstack metadata
  1398. metadata.pop('public_keys', None)
  1399. metadata.pop('random_seed', None)
  1400. if not metadata['ec2_compat']:
  1401. metadata = None
  1402. return dict(name=provider, metadata=metadata)
  1403. def init_provider_facts(self):
  1404. """ Initialize the provider facts
  1405. Returns:
  1406. dict: The normalized provider facts
  1407. """
  1408. provider_info = self.guess_host_provider()
  1409. provider_facts = normalize_provider_facts(
  1410. provider_info.get('name'),
  1411. provider_info.get('metadata')
  1412. )
  1413. return provider_facts
  1414. # Disabling too-many-branches and too-many-locals.
  1415. # This should be cleaned up as a TODO item.
  1416. # pylint: disable=too-many-branches, too-many-locals
  1417. def init_local_facts(self, facts=None,
  1418. additive_facts_to_overwrite=None):
  1419. """ Initialize the local facts
  1420. Args:
  1421. facts (dict): local facts to set
  1422. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1423. '.' notation ex: ['master.named_certificates']
  1424. Returns:
  1425. dict: The result of merging the provided facts with existing
  1426. local facts
  1427. """
  1428. changed = False
  1429. facts_to_set = dict()
  1430. if facts is not None:
  1431. facts_to_set[self.role] = facts
  1432. local_facts = get_local_facts_from_file(self.filename)
  1433. migrated_facts = migrate_local_facts(local_facts)
  1434. new_local_facts = merge_facts(migrated_facts,
  1435. facts_to_set,
  1436. additive_facts_to_overwrite)
  1437. new_local_facts = self.remove_empty_facts(new_local_facts)
  1438. if new_local_facts != local_facts:
  1439. self.validate_local_facts(new_local_facts)
  1440. changed = True
  1441. if not module.check_mode: # noqa: F405
  1442. save_local_facts(self.filename, new_local_facts)
  1443. self.changed = changed
  1444. return new_local_facts
  1445. def remove_empty_facts(self, facts=None):
  1446. """ Remove empty facts
  1447. Args:
  1448. facts (dict): facts to clean
  1449. """
  1450. facts_to_remove = []
  1451. for fact, value in iteritems(facts):
  1452. if isinstance(facts[fact], dict):
  1453. facts[fact] = self.remove_empty_facts(facts[fact])
  1454. else:
  1455. if value == "" or value == [""] or value is None:
  1456. facts_to_remove.append(fact)
  1457. for fact in facts_to_remove:
  1458. del facts[fact]
  1459. return facts
  1460. def validate_local_facts(self, facts=None):
  1461. """ Validate local facts
  1462. Args:
  1463. facts (dict): local facts to validate
  1464. """
  1465. invalid_facts = dict()
  1466. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1467. if invalid_facts:
  1468. msg = 'Invalid facts detected:\n'
  1469. # pylint: disable=consider-iterating-dictionary
  1470. for key in invalid_facts.keys():
  1471. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1472. module.fail_json(msg=msg, changed=self.changed) # noqa: F405
  1473. # disabling pylint errors for line-too-long since we're dealing
  1474. # with best effort reduction of error messages here.
  1475. # disabling errors for too-many-branches since we require checking
  1476. # many conditions.
  1477. # pylint: disable=line-too-long, too-many-branches
  1478. @staticmethod
  1479. def validate_master_facts(facts, invalid_facts):
  1480. """ Validate master facts
  1481. Args:
  1482. facts (dict): local facts to validate
  1483. invalid_facts (dict): collected invalid_facts
  1484. Returns:
  1485. dict: Invalid facts
  1486. """
  1487. if 'master' in facts:
  1488. # openshift.master.session_auth_secrets
  1489. if 'session_auth_secrets' in facts['master']:
  1490. session_auth_secrets = facts['master']['session_auth_secrets']
  1491. if not issubclass(type(session_auth_secrets), list):
  1492. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  1493. elif 'session_encryption_secrets' not in facts['master']:
  1494. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  1495. 'if openshift_master_session_auth_secrets is provided.')
  1496. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  1497. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  1498. 'openshift_master_session_encryption_secrets must be '
  1499. 'equal length.')
  1500. else:
  1501. for secret in session_auth_secrets:
  1502. if len(secret) < 32:
  1503. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  1504. 'Secrets must be at least 32 characters in length.')
  1505. # openshift.master.session_encryption_secrets
  1506. if 'session_encryption_secrets' in facts['master']:
  1507. session_encryption_secrets = facts['master']['session_encryption_secrets']
  1508. if not issubclass(type(session_encryption_secrets), list):
  1509. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  1510. elif 'session_auth_secrets' not in facts['master']:
  1511. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  1512. 'set if openshift_master_session_encryption_secrets '
  1513. 'is provided.')
  1514. else:
  1515. for secret in session_encryption_secrets:
  1516. if len(secret) not in [16, 24, 32]:
  1517. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  1518. 'Secrets must be 16, 24, or 32 characters in length.')
  1519. return invalid_facts
  1520. def main():
  1521. """ main """
  1522. # disabling pylint errors for global-variable-undefined and invalid-name
  1523. # for 'global module' usage, since it is required to use ansible_facts
  1524. # pylint: disable=global-variable-undefined, invalid-name
  1525. global module
  1526. module = AnsibleModule( # noqa: F405
  1527. argument_spec=dict(
  1528. role=dict(default='common', required=False,
  1529. choices=OpenShiftFacts.known_roles),
  1530. local_facts=dict(default=None, type='dict', required=False),
  1531. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  1532. ),
  1533. supports_check_mode=True,
  1534. add_file_common_args=True,
  1535. )
  1536. if not HAVE_DBUS:
  1537. module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
  1538. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
  1539. module.params['gather_timeout'] = 10 # noqa: F405
  1540. module.params['filter'] = '*' # noqa: F405
  1541. role = module.params['role'] # noqa: F405
  1542. local_facts = module.params['local_facts'] # noqa: F405
  1543. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
  1544. fact_file = '/etc/ansible/facts.d/openshift.fact'
  1545. openshift_facts = OpenShiftFacts(role,
  1546. fact_file,
  1547. local_facts,
  1548. additive_facts_to_overwrite)
  1549. file_params = module.params.copy() # noqa: F405
  1550. file_params['path'] = fact_file
  1551. file_args = module.load_file_common_arguments(file_params) # noqa: F405
  1552. changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
  1553. openshift_facts.changed)
  1554. return module.exit_json(changed=changed, # noqa: F405
  1555. ansible_facts=openshift_facts.facts)
  1556. if __name__ == '__main__':
  1557. main()