openshift_facts.py 97 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # vim: expandtab:tabstop=4:shiftwidth=4
  5. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  6. # Status: Permanently disabled to keep this module as self-contained as possible.
  7. """Ansible module for retrieving and setting openshift related facts"""
  8. try:
  9. # python2
  10. import ConfigParser
  11. except ImportError:
  12. # python3
  13. import configparser as ConfigParser
  14. # pylint: disable=no-name-in-module, import-error, wrong-import-order
  15. import copy
  16. import errno
  17. import json
  18. import re
  19. import io
  20. import os
  21. import yaml
  22. import struct
  23. import socket
  24. from distutils.util import strtobool
  25. from distutils.version import LooseVersion
  26. from six import string_types
  27. from six import text_type
  28. # ignore pylint errors related to the module_utils import
  29. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  30. # import module snippets
  31. from ansible.module_utils.basic import * # noqa: F403
  32. from ansible.module_utils.facts import * # noqa: F403
  33. from ansible.module_utils.urls import * # noqa: F403
  34. from ansible.module_utils.six import iteritems, itervalues
  35. from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
  36. from ansible.module_utils._text import to_native
  37. HAVE_DBUS = False
  38. try:
  39. from dbus import SystemBus, Interface
  40. from dbus.exceptions import DBusException
  41. HAVE_DBUS = True
  42. except ImportError:
  43. pass
  44. DOCUMENTATION = '''
  45. ---
  46. module: openshift_facts
  47. short_description: Cluster Facts
  48. author: Jason DeTiberus
  49. requirements: [ ]
  50. '''
  51. EXAMPLES = '''
  52. '''
  53. def migrate_docker_facts(facts):
  54. """ Apply migrations for docker facts """
  55. params = {
  56. 'common': (
  57. 'additional_registries',
  58. 'insecure_registries',
  59. 'blocked_registries',
  60. 'options'
  61. ),
  62. 'node': (
  63. 'log_driver',
  64. 'log_options'
  65. )
  66. }
  67. if 'docker' not in facts:
  68. facts['docker'] = {}
  69. # pylint: disable=consider-iterating-dictionary
  70. for role in params.keys():
  71. if role in facts:
  72. for param in params[role]:
  73. old_param = 'docker_' + param
  74. if old_param in facts[role]:
  75. facts['docker'][param] = facts[role].pop(old_param)
  76. if 'node' in facts and 'portal_net' in facts['node']:
  77. facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
  78. # log_options was originally meant to be a comma separated string, but
  79. # we now prefer an actual list, with backward compatibility:
  80. if 'log_options' in facts['docker'] and \
  81. isinstance(facts['docker']['log_options'], string_types):
  82. facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
  83. return facts
  84. # TODO: We should add a generic migration function that takes source and destination
  85. # paths and does the right thing rather than one function for common, one for node, etc.
  86. def migrate_common_facts(facts):
  87. """ Migrate facts from various roles into common """
  88. params = {
  89. 'node': ('portal_net'),
  90. 'master': ('portal_net')
  91. }
  92. if 'common' not in facts:
  93. facts['common'] = {}
  94. # pylint: disable=consider-iterating-dictionary
  95. for role in params.keys():
  96. if role in facts:
  97. for param in params[role]:
  98. if param in facts[role]:
  99. facts['common'][param] = facts[role].pop(param)
  100. return facts
  101. def migrate_node_facts(facts):
  102. """ Migrate facts from various roles into node """
  103. params = {
  104. 'common': ('dns_ip'),
  105. }
  106. if 'node' not in facts:
  107. facts['node'] = {}
  108. # pylint: disable=consider-iterating-dictionary
  109. for role in params.keys():
  110. if role in facts:
  111. for param in params[role]:
  112. if param in facts[role]:
  113. facts['node'][param] = facts[role].pop(param)
  114. return facts
  115. def migrate_hosted_facts(facts):
  116. """ Apply migrations for master facts """
  117. if 'master' in facts:
  118. if 'router_selector' in facts['master']:
  119. if 'hosted' not in facts:
  120. facts['hosted'] = {}
  121. if 'router' not in facts['hosted']:
  122. facts['hosted']['router'] = {}
  123. facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
  124. if 'registry_selector' in facts['master']:
  125. if 'hosted' not in facts:
  126. facts['hosted'] = {}
  127. if 'registry' not in facts['hosted']:
  128. facts['hosted']['registry'] = {}
  129. facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
  130. return facts
  131. def migrate_admission_plugin_facts(facts):
  132. """ Apply migrations for admission plugin facts """
  133. if 'master' in facts:
  134. if 'kube_admission_plugin_config' in facts['master']:
  135. if 'admission_plugin_config' not in facts['master']:
  136. facts['master']['admission_plugin_config'] = dict()
  137. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  138. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  139. facts['master']['kube_admission_plugin_config'],
  140. additive_facts_to_overwrite=[],
  141. protected_facts_to_overwrite=[])
  142. # Remove kube_admission_plugin_config fact
  143. facts['master'].pop('kube_admission_plugin_config', None)
  144. return facts
  145. def migrate_local_facts(facts):
  146. """ Apply migrations of local facts """
  147. migrated_facts = copy.deepcopy(facts)
  148. migrated_facts = migrate_docker_facts(migrated_facts)
  149. migrated_facts = migrate_common_facts(migrated_facts)
  150. migrated_facts = migrate_node_facts(migrated_facts)
  151. migrated_facts = migrate_hosted_facts(migrated_facts)
  152. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  153. return migrated_facts
  154. def first_ip(network):
  155. """ Return the first IPv4 address in network
  156. Args:
  157. network (str): network in CIDR format
  158. Returns:
  159. str: first IPv4 address
  160. """
  161. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
  162. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
  163. (address, netmask) = network.split('/')
  164. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  165. return itoa((atoi(address) & netmask_i) + 1)
  166. def hostname_valid(hostname):
  167. """ Test if specified hostname should be considered valid
  168. Args:
  169. hostname (str): hostname to test
  170. Returns:
  171. bool: True if valid, otherwise False
  172. """
  173. if (not hostname or
  174. hostname.startswith('localhost') or
  175. hostname.endswith('localdomain') or
  176. hostname.endswith('novalocal') or
  177. len(hostname.split('.')) < 2):
  178. return False
  179. return True
  180. def choose_hostname(hostnames=None, fallback=''):
  181. """ Choose a hostname from the provided hostnames
  182. Given a list of hostnames and a fallback value, choose a hostname to
  183. use. This function will prefer fqdns if they exist (excluding any that
  184. begin with localhost or end with localdomain) over ip addresses.
  185. Args:
  186. hostnames (list): list of hostnames
  187. fallback (str): default value to set if hostnames does not contain
  188. a valid hostname
  189. Returns:
  190. str: chosen hostname
  191. """
  192. hostname = fallback
  193. if hostnames is None:
  194. return hostname
  195. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  196. ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
  197. hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
  198. for host_list in (hosts, ips):
  199. for host in host_list:
  200. if hostname_valid(host):
  201. return host
  202. return hostname
  203. def query_metadata(metadata_url, headers=None, expect_json=False):
  204. """ Return metadata from the provided metadata_url
  205. Args:
  206. metadata_url (str): metadata url
  207. headers (dict): headers to set for metadata request
  208. expect_json (bool): does the metadata_url return json
  209. Returns:
  210. dict or list: metadata request result
  211. """
  212. result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
  213. if info['status'] != 200:
  214. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  215. if expect_json:
  216. return module.from_json(to_native(result.read())) # noqa: F405
  217. else:
  218. return [to_native(line.strip()) for line in result.readlines()]
  219. def walk_metadata(metadata_url, headers=None, expect_json=False):
  220. """ Walk the metadata tree and return a dictionary of the entire tree
  221. Args:
  222. metadata_url (str): metadata url
  223. headers (dict): headers to set for metadata request
  224. expect_json (bool): does the metadata_url return json
  225. Returns:
  226. dict: the result of walking the metadata tree
  227. """
  228. metadata = dict()
  229. for line in query_metadata(metadata_url, headers, expect_json):
  230. if line.endswith('/') and not line == 'public-keys/':
  231. key = line[:-1]
  232. metadata[key] = walk_metadata(metadata_url + line,
  233. headers, expect_json)
  234. else:
  235. results = query_metadata(metadata_url + line, headers,
  236. expect_json)
  237. if len(results) == 1:
  238. # disable pylint maybe-no-member because overloaded use of
  239. # the module name causes pylint to not detect that results
  240. # is an array or hash
  241. # pylint: disable=maybe-no-member
  242. metadata[line] = results.pop()
  243. else:
  244. metadata[line] = results
  245. return metadata
  246. def get_provider_metadata(metadata_url, supports_recursive=False,
  247. headers=None, expect_json=False):
  248. """ Retrieve the provider metadata
  249. Args:
  250. metadata_url (str): metadata url
  251. supports_recursive (bool): does the provider metadata api support
  252. recursion
  253. headers (dict): headers to set for metadata request
  254. expect_json (bool): does the metadata_url return json
  255. Returns:
  256. dict: the provider metadata
  257. """
  258. try:
  259. if supports_recursive:
  260. metadata = query_metadata(metadata_url, headers,
  261. expect_json)
  262. else:
  263. metadata = walk_metadata(metadata_url, headers,
  264. expect_json)
  265. except OpenShiftFactsMetadataUnavailableError:
  266. metadata = None
  267. return metadata
  268. def normalize_gce_facts(metadata, facts):
  269. """ Normalize gce facts
  270. Args:
  271. metadata (dict): provider metadata
  272. facts (dict): facts to update
  273. Returns:
  274. dict: the result of adding the normalized metadata to the provided
  275. facts dict
  276. """
  277. for interface in metadata['instance']['networkInterfaces']:
  278. int_info = dict(ips=[interface['ip']], network_type='gce')
  279. int_info['public_ips'] = [ac['externalIp'] for ac
  280. in interface['accessConfigs']]
  281. int_info['public_ips'].extend(interface['forwardedIps'])
  282. _, _, network_id = interface['network'].rpartition('/')
  283. int_info['network_id'] = network_id
  284. facts['network']['interfaces'].append(int_info)
  285. _, _, zone = metadata['instance']['zone'].rpartition('/')
  286. facts['zone'] = zone
  287. # GCE currently only supports a single interface
  288. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  289. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  290. facts['network']['public_ip'] = pub_ip
  291. facts['network']['hostname'] = metadata['instance']['hostname']
  292. # TODO: attempt to resolve public_hostname
  293. facts['network']['public_hostname'] = facts['network']['public_ip']
  294. return facts
  295. def normalize_aws_facts(metadata, facts):
  296. """ Normalize aws facts
  297. Args:
  298. metadata (dict): provider metadata
  299. facts (dict): facts to update
  300. Returns:
  301. dict: the result of adding the normalized metadata to the provided
  302. facts dict
  303. """
  304. for interface in sorted(
  305. metadata['network']['interfaces']['macs'].values(),
  306. key=lambda x: x['device-number']
  307. ):
  308. int_info = dict()
  309. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  310. for ips_var, int_var in iteritems(var_map):
  311. ips = interface.get(int_var)
  312. if isinstance(ips, string_types):
  313. int_info[ips_var] = [ips]
  314. else:
  315. int_info[ips_var] = ips
  316. if 'vpc-id' in interface:
  317. int_info['network_type'] = 'vpc'
  318. else:
  319. int_info['network_type'] = 'classic'
  320. if int_info['network_type'] == 'vpc':
  321. int_info['network_id'] = interface['subnet-id']
  322. else:
  323. int_info['network_id'] = None
  324. facts['network']['interfaces'].append(int_info)
  325. facts['zone'] = metadata['placement']['availability-zone']
  326. # TODO: actually attempt to determine default local and public ips
  327. # by using the ansible default ip fact and the ipv4-associations
  328. # from the ec2 metadata
  329. facts['network']['ip'] = metadata.get('local-ipv4')
  330. facts['network']['public_ip'] = metadata.get('public-ipv4')
  331. # TODO: verify that local hostname makes sense and is resolvable
  332. facts['network']['hostname'] = metadata.get('local-hostname')
  333. # TODO: verify that public hostname makes sense and is resolvable
  334. facts['network']['public_hostname'] = metadata.get('public-hostname')
  335. return facts
  336. def normalize_openstack_facts(metadata, facts):
  337. """ Normalize openstack facts
  338. Args:
  339. metadata (dict): provider metadata
  340. facts (dict): facts to update
  341. Returns:
  342. dict: the result of adding the normalized metadata to the provided
  343. facts dict
  344. """
  345. # openstack ec2 compat api does not support network interfaces and
  346. # the version tested on did not include the info in the openstack
  347. # metadata api, should be updated if neutron exposes this.
  348. facts['zone'] = metadata['availability_zone']
  349. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  350. facts['network']['ip'] = local_ipv4
  351. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  352. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  353. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  354. try:
  355. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  356. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  357. else:
  358. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  359. except socket.gaierror:
  360. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  361. return facts
  362. def normalize_provider_facts(provider, metadata):
  363. """ Normalize provider facts
  364. Args:
  365. provider (str): host provider
  366. metadata (dict): provider metadata
  367. Returns:
  368. dict: the normalized provider facts
  369. """
  370. if provider is None or metadata is None:
  371. return {}
  372. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  373. # and configure ipv6 facts if available
  374. # TODO: add support for setting user_data if available
  375. facts = dict(name=provider, metadata=metadata,
  376. network=dict(interfaces=[], ipv6_enabled=False))
  377. if provider == 'gce':
  378. facts = normalize_gce_facts(metadata, facts)
  379. elif provider == 'aws':
  380. facts = normalize_aws_facts(metadata, facts)
  381. elif provider == 'openstack':
  382. facts = normalize_openstack_facts(metadata, facts)
  383. return facts
  384. def set_flannel_facts_if_unset(facts):
  385. """ Set flannel facts if not already present in facts dict
  386. dict: the facts dict updated with the flannel facts if
  387. missing
  388. Args:
  389. facts (dict): existing facts
  390. Returns:
  391. dict: the facts dict updated with the flannel
  392. facts if they were not already present
  393. """
  394. if 'common' in facts:
  395. if 'use_flannel' not in facts['common']:
  396. use_flannel = False
  397. facts['common']['use_flannel'] = use_flannel
  398. return facts
  399. def set_nuage_facts_if_unset(facts):
  400. """ Set nuage facts if not already present in facts dict
  401. dict: the facts dict updated with the nuage facts if
  402. missing
  403. Args:
  404. facts (dict): existing facts
  405. Returns:
  406. dict: the facts dict updated with the nuage
  407. facts if they were not already present
  408. """
  409. if 'common' in facts:
  410. if 'use_nuage' not in facts['common']:
  411. use_nuage = False
  412. facts['common']['use_nuage'] = use_nuage
  413. return facts
  414. def set_node_schedulability(facts):
  415. """ Set schedulable facts if not already present in facts dict
  416. Args:
  417. facts (dict): existing facts
  418. Returns:
  419. dict: the facts dict updated with the generated schedulable
  420. facts if they were not already present
  421. """
  422. if 'node' in facts:
  423. if 'schedulable' not in facts['node']:
  424. if 'master' in facts:
  425. facts['node']['schedulable'] = False
  426. else:
  427. facts['node']['schedulable'] = True
  428. return facts
  429. def set_selectors(facts):
  430. """ Set selectors facts if not already present in facts dict
  431. Args:
  432. facts (dict): existing facts
  433. Returns:
  434. dict: the facts dict updated with the generated selectors
  435. facts if they were not already present
  436. """
  437. deployment_type = facts['common']['deployment_type']
  438. if deployment_type == 'online':
  439. selector = "type=infra"
  440. else:
  441. selector = "region=infra"
  442. if 'hosted' not in facts:
  443. facts['hosted'] = {}
  444. if 'router' not in facts['hosted']:
  445. facts['hosted']['router'] = {}
  446. if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
  447. facts['hosted']['router']['selector'] = selector
  448. if 'registry' not in facts['hosted']:
  449. facts['hosted']['registry'] = {}
  450. if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
  451. facts['hosted']['registry']['selector'] = selector
  452. if 'metrics' not in facts['hosted']:
  453. facts['hosted']['metrics'] = {}
  454. if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
  455. facts['hosted']['metrics']['selector'] = None
  456. if 'logging' not in facts['hosted']:
  457. facts['hosted']['logging'] = {}
  458. if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
  459. facts['hosted']['logging']['selector'] = None
  460. return facts
  461. def set_dnsmasq_facts_if_unset(facts):
  462. """ Set dnsmasq facts if not already present in facts
  463. Args:
  464. facts (dict) existing facts
  465. Returns:
  466. facts (dict) updated facts with values set if not previously set
  467. """
  468. if 'common' in facts:
  469. if 'use_dnsmasq' not in facts['common']:
  470. facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
  471. if 'master' in facts and 'dns_port' not in facts['master']:
  472. if safe_get_bool(facts['common']['use_dnsmasq']):
  473. facts['master']['dns_port'] = 8053
  474. else:
  475. facts['master']['dns_port'] = 53
  476. return facts
  477. def set_project_cfg_facts_if_unset(facts):
  478. """ Set Project Configuration facts if not already present in facts dict
  479. dict:
  480. Args:
  481. facts (dict): existing facts
  482. Returns:
  483. dict: the facts dict updated with the generated Project Configuration
  484. facts if they were not already present
  485. """
  486. config = {
  487. 'default_node_selector': '',
  488. 'project_request_message': '',
  489. 'project_request_template': '',
  490. 'mcs_allocator_range': 's0:/2',
  491. 'mcs_labels_per_project': 5,
  492. 'uid_allocator_range': '1000000000-1999999999/10000'
  493. }
  494. if 'master' in facts:
  495. for key, value in config.items():
  496. if key not in facts['master']:
  497. facts['master'][key] = value
  498. return facts
  499. def set_identity_providers_if_unset(facts):
  500. """ Set identity_providers fact if not already present in facts dict
  501. Args:
  502. facts (dict): existing facts
  503. Returns:
  504. dict: the facts dict updated with the generated identity providers
  505. facts if they were not already present
  506. """
  507. if 'master' in facts:
  508. deployment_type = facts['common']['deployment_type']
  509. if 'identity_providers' not in facts['master']:
  510. identity_provider = dict(
  511. name='allow_all', challenge=True, login=True,
  512. kind='AllowAllPasswordIdentityProvider'
  513. )
  514. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  515. identity_provider = dict(
  516. name='deny_all', challenge=True, login=True,
  517. kind='DenyAllPasswordIdentityProvider'
  518. )
  519. facts['master']['identity_providers'] = [identity_provider]
  520. return facts
  521. def set_url_facts_if_unset(facts):
  522. """ Set url facts if not already present in facts dict
  523. Args:
  524. facts (dict): existing facts
  525. Returns:
  526. dict: the facts dict updated with the generated url facts if they
  527. were not already present
  528. """
  529. if 'master' in facts:
  530. hostname = facts['common']['hostname']
  531. cluster_hostname = facts['master'].get('cluster_hostname')
  532. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  533. public_hostname = facts['common']['public_hostname']
  534. api_hostname = cluster_hostname if cluster_hostname else hostname
  535. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  536. console_path = facts['master']['console_path']
  537. etcd_hosts = facts['master']['etcd_hosts']
  538. use_ssl = dict(
  539. api=facts['master']['api_use_ssl'],
  540. public_api=facts['master']['api_use_ssl'],
  541. loopback_api=facts['master']['api_use_ssl'],
  542. console=facts['master']['console_use_ssl'],
  543. public_console=facts['master']['console_use_ssl'],
  544. etcd=facts['master']['etcd_use_ssl']
  545. )
  546. ports = dict(
  547. api=facts['master']['api_port'],
  548. public_api=facts['master']['api_port'],
  549. loopback_api=facts['master']['api_port'],
  550. console=facts['master']['console_port'],
  551. public_console=facts['master']['console_port'],
  552. etcd=facts['master']['etcd_port'],
  553. )
  554. etcd_urls = []
  555. if etcd_hosts != '':
  556. facts['master']['etcd_port'] = ports['etcd']
  557. facts['master']['embedded_etcd'] = False
  558. for host in etcd_hosts:
  559. etcd_urls.append(format_url(use_ssl['etcd'], host,
  560. ports['etcd']))
  561. else:
  562. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  563. ports['etcd'])]
  564. facts['master'].setdefault('etcd_urls', etcd_urls)
  565. prefix_hosts = [('api', api_hostname),
  566. ('public_api', api_public_hostname),
  567. ('loopback_api', hostname)]
  568. for prefix, host in prefix_hosts:
  569. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  570. host,
  571. ports[prefix]))
  572. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  573. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  574. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  575. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  576. facts['master'].setdefault('loopback_user', r_lhu)
  577. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  578. for prefix, host in prefix_hosts:
  579. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  580. host,
  581. ports[prefix],
  582. console_path))
  583. return facts
  584. def set_aggregate_facts(facts):
  585. """ Set aggregate facts
  586. Args:
  587. facts (dict): existing facts
  588. Returns:
  589. dict: the facts dict updated with aggregated facts
  590. """
  591. all_hostnames = set()
  592. internal_hostnames = set()
  593. kube_svc_ip = first_ip(facts['common']['portal_net'])
  594. if 'common' in facts:
  595. all_hostnames.add(facts['common']['hostname'])
  596. all_hostnames.add(facts['common']['public_hostname'])
  597. all_hostnames.add(facts['common']['ip'])
  598. all_hostnames.add(facts['common']['public_ip'])
  599. facts['common']['kube_svc_ip'] = kube_svc_ip
  600. internal_hostnames.add(facts['common']['hostname'])
  601. internal_hostnames.add(facts['common']['ip'])
  602. cluster_domain = facts['common']['dns_domain']
  603. if 'master' in facts:
  604. if 'cluster_hostname' in facts['master']:
  605. all_hostnames.add(facts['master']['cluster_hostname'])
  606. if 'cluster_public_hostname' in facts['master']:
  607. all_hostnames.add(facts['master']['cluster_public_hostname'])
  608. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  609. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  610. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  611. all_hostnames.update(svc_names)
  612. internal_hostnames.update(svc_names)
  613. all_hostnames.add(kube_svc_ip)
  614. internal_hostnames.add(kube_svc_ip)
  615. facts['common']['all_hostnames'] = list(all_hostnames)
  616. facts['common']['internal_hostnames'] = list(internal_hostnames)
  617. return facts
  618. def set_etcd_facts_if_unset(facts):
  619. """
  620. If using embedded etcd, loads the data directory from master-config.yaml.
  621. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
  622. If anything goes wrong parsing these, the fact will not be set.
  623. """
  624. if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
  625. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  626. if 'etcd_data_dir' not in etcd_facts:
  627. try:
  628. # Parse master config to find actual etcd data dir:
  629. master_cfg_path = os.path.join(facts['common']['config_base'],
  630. 'master/master-config.yaml')
  631. master_cfg_f = open(master_cfg_path, 'r')
  632. config = yaml.safe_load(master_cfg_f.read())
  633. master_cfg_f.close()
  634. etcd_facts['etcd_data_dir'] = \
  635. config['etcdConfig']['storageDirectory']
  636. facts['etcd'] = etcd_facts
  637. # We don't want exceptions bubbling up here:
  638. # pylint: disable=broad-except
  639. except Exception:
  640. pass
  641. else:
  642. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  643. # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
  644. try:
  645. # Add a fake section for parsing:
  646. ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
  647. ini_fp = io.StringIO(ini_str)
  648. config = ConfigParser.RawConfigParser()
  649. config.readfp(ini_fp)
  650. etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
  651. if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
  652. etcd_data_dir = etcd_data_dir[1:-1]
  653. etcd_facts['etcd_data_dir'] = etcd_data_dir
  654. facts['etcd'] = etcd_facts
  655. # We don't want exceptions bubbling up here:
  656. # pylint: disable=broad-except
  657. except Exception:
  658. pass
  659. return facts
  660. def set_deployment_facts_if_unset(facts):
  661. """ Set Facts that vary based on deployment_type. This currently
  662. includes common.service_type, common.config_base, master.registry_url,
  663. node.registry_url, node.storage_plugin_deps
  664. Args:
  665. facts (dict): existing facts
  666. Returns:
  667. dict: the facts dict updated with the generated deployment_type
  668. facts
  669. """
  670. # disabled to avoid breaking up facts related to deployment type into
  671. # multiple methods for now.
  672. # pylint: disable=too-many-statements, too-many-branches
  673. if 'common' in facts:
  674. deployment_type = facts['common']['deployment_type']
  675. if 'service_type' not in facts['common']:
  676. service_type = 'atomic-openshift'
  677. if deployment_type == 'origin':
  678. service_type = 'origin'
  679. elif deployment_type in ['enterprise']:
  680. service_type = 'openshift'
  681. facts['common']['service_type'] = service_type
  682. if 'config_base' not in facts['common']:
  683. config_base = '/etc/origin'
  684. if deployment_type in ['enterprise']:
  685. config_base = '/etc/openshift'
  686. # Handle upgrade scenarios when symlinks don't yet exist:
  687. if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
  688. config_base = '/etc/openshift'
  689. facts['common']['config_base'] = config_base
  690. if 'data_dir' not in facts['common']:
  691. data_dir = '/var/lib/origin'
  692. if deployment_type in ['enterprise']:
  693. data_dir = '/var/lib/openshift'
  694. # Handle upgrade scenarios when symlinks don't yet exist:
  695. if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
  696. data_dir = '/var/lib/openshift'
  697. facts['common']['data_dir'] = data_dir
  698. if 'docker' in facts:
  699. deployment_type = facts['common']['deployment_type']
  700. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  701. addtl_regs = facts['docker'].get('additional_registries', [])
  702. ent_reg = 'registry.access.redhat.com'
  703. if ent_reg not in addtl_regs:
  704. facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
  705. for role in ('master', 'node'):
  706. if role in facts:
  707. deployment_type = facts['common']['deployment_type']
  708. if 'registry_url' not in facts[role]:
  709. registry_url = 'openshift/origin-${component}:${version}'
  710. if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
  711. registry_url = 'openshift3/ose-${component}:${version}'
  712. elif deployment_type == 'atomic-enterprise':
  713. registry_url = 'aep3_beta/aep-${component}:${version}'
  714. facts[role]['registry_url'] = registry_url
  715. if 'master' in facts:
  716. deployment_type = facts['common']['deployment_type']
  717. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  718. if 'disabled_features' in facts['master']:
  719. if deployment_type == 'atomic-enterprise':
  720. curr_disabled_features = set(facts['master']['disabled_features'])
  721. facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
  722. else:
  723. if facts['common']['deployment_subtype'] == 'registry':
  724. facts['master']['disabled_features'] = openshift_features
  725. if 'node' in facts:
  726. deployment_type = facts['common']['deployment_type']
  727. if 'storage_plugin_deps' not in facts['node']:
  728. if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
  729. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  730. else:
  731. facts['node']['storage_plugin_deps'] = []
  732. return facts
  733. def set_version_facts_if_unset(facts):
  734. """ Set version facts. This currently includes common.version and
  735. common.version_gte_3_1_or_1_1.
  736. Args:
  737. facts (dict): existing facts
  738. Returns:
  739. dict: the facts dict updated with version facts.
  740. """
  741. if 'common' in facts:
  742. deployment_type = facts['common']['deployment_type']
  743. openshift_version = get_openshift_version(facts)
  744. if openshift_version:
  745. version = LooseVersion(openshift_version)
  746. facts['common']['version'] = openshift_version
  747. facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
  748. if deployment_type == 'origin':
  749. version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
  750. version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
  751. version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
  752. version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
  753. version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
  754. version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
  755. version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
  756. else:
  757. version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
  758. version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
  759. version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
  760. version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
  761. version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
  762. version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
  763. version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
  764. else:
  765. version_gte_3_1_or_1_1 = True
  766. version_gte_3_1_1_or_1_1_1 = True
  767. version_gte_3_2_or_1_2 = True
  768. version_gte_3_3_or_1_3 = True
  769. version_gte_3_4_or_1_4 = False
  770. version_gte_3_5_or_1_5 = False
  771. version_gte_3_6_or_1_6 = False
  772. facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
  773. facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
  774. facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
  775. facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
  776. facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
  777. facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
  778. facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
  779. if version_gte_3_4_or_1_4:
  780. examples_content_version = 'v1.4'
  781. elif version_gte_3_3_or_1_3:
  782. examples_content_version = 'v1.3'
  783. elif version_gte_3_2_or_1_2:
  784. examples_content_version = 'v1.2'
  785. elif version_gte_3_1_or_1_1:
  786. examples_content_version = 'v1.1'
  787. else:
  788. examples_content_version = 'v1.0'
  789. facts['common']['examples_content_version'] = examples_content_version
  790. return facts
  791. def set_manageiq_facts_if_unset(facts):
  792. """ Set manageiq facts. This currently includes common.use_manageiq.
  793. Args:
  794. facts (dict): existing facts
  795. Returns:
  796. dict: the facts dict updated with version facts.
  797. Raises:
  798. OpenShiftFactsInternalError:
  799. """
  800. if 'common' not in facts:
  801. if 'version_gte_3_1_or_1_1' not in facts['common']:
  802. raise OpenShiftFactsInternalError(
  803. "Invalid invocation: The required facts are not set"
  804. )
  805. if 'use_manageiq' not in facts['common']:
  806. facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
  807. return facts
  808. def set_sdn_facts_if_unset(facts, system_facts):
  809. """ Set sdn facts if not already present in facts dict
  810. Args:
  811. facts (dict): existing facts
  812. system_facts (dict): ansible_facts
  813. Returns:
  814. dict: the facts dict updated with the generated sdn facts if they
  815. were not already present
  816. """
  817. # pylint: disable=too-many-branches
  818. if 'common' in facts:
  819. use_sdn = facts['common']['use_openshift_sdn']
  820. if not (use_sdn == '' or isinstance(use_sdn, bool)):
  821. use_sdn = safe_get_bool(use_sdn)
  822. facts['common']['use_openshift_sdn'] = use_sdn
  823. if 'sdn_network_plugin_name' not in facts['common']:
  824. plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
  825. facts['common']['sdn_network_plugin_name'] = plugin
  826. if 'master' in facts:
  827. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  828. # these might be overridden if they exist in the master config file
  829. sdn_cluster_network_cidr = '10.128.0.0/14'
  830. sdn_host_subnet_length = '9'
  831. master_cfg_path = os.path.join(facts['common']['config_base'],
  832. 'master/master-config.yaml')
  833. if os.path.isfile(master_cfg_path):
  834. with open(master_cfg_path, 'r') as master_cfg_f:
  835. config = yaml.safe_load(master_cfg_f.read())
  836. if 'networkConfig' in config:
  837. if 'clusterNetworkCIDR' in config['networkConfig']:
  838. sdn_cluster_network_cidr = \
  839. config['networkConfig']['clusterNetworkCIDR']
  840. if 'hostSubnetLength' in config['networkConfig']:
  841. sdn_host_subnet_length = \
  842. config['networkConfig']['hostSubnetLength']
  843. if 'sdn_cluster_network_cidr' not in facts['master']:
  844. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  845. if 'sdn_host_subnet_length' not in facts['master']:
  846. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  847. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  848. node_ip = facts['common']['ip']
  849. # default MTU if interface MTU cannot be detected
  850. facts['node']['sdn_mtu'] = '1450'
  851. for val in itervalues(system_facts):
  852. if isinstance(val, dict) and 'mtu' in val:
  853. mtu = val['mtu']
  854. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  855. facts['node']['sdn_mtu'] = str(mtu - 50)
  856. return facts
  857. def set_nodename(facts):
  858. """ set nodename """
  859. if 'node' in facts and 'common' in facts:
  860. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  861. facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  862. else:
  863. facts['node']['nodename'] = facts['common']['hostname'].lower()
  864. return facts
  865. def migrate_oauth_template_facts(facts):
  866. """
  867. Migrate an old oauth template fact to a newer format if it's present.
  868. The legacy 'oauth_template' fact was just a filename, and assumed you were
  869. setting the 'login' template.
  870. The new pluralized 'oauth_templates' fact is a dict mapping the template
  871. name to a filename.
  872. Simplify the code after this by merging the old fact into the new.
  873. """
  874. if 'master' in facts and 'oauth_template' in facts['master']:
  875. if 'oauth_templates' not in facts['master']:
  876. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  877. elif 'login' not in facts['master']['oauth_templates']:
  878. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  879. return facts
  880. def format_url(use_ssl, hostname, port, path=''):
  881. """ Format url based on ssl flag, hostname, port and path
  882. Args:
  883. use_ssl (bool): is ssl enabled
  884. hostname (str): hostname
  885. port (str): port
  886. path (str): url path
  887. Returns:
  888. str: The generated url string
  889. """
  890. scheme = 'https' if use_ssl else 'http'
  891. netloc = hostname
  892. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  893. netloc += ":%s" % port
  894. try:
  895. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  896. except AttributeError:
  897. # pylint: disable=undefined-variable
  898. url = urlunparse((scheme, netloc, path, '', '', ''))
  899. return url
  900. def get_current_config(facts):
  901. """ Get current openshift config
  902. Args:
  903. facts (dict): existing facts
  904. Returns:
  905. dict: the facts dict updated with the current openshift config
  906. """
  907. current_config = dict()
  908. roles = [role for role in facts if role not in ['common', 'provider']]
  909. for role in roles:
  910. if 'roles' in current_config:
  911. current_config['roles'].append(role)
  912. else:
  913. current_config['roles'] = [role]
  914. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  915. # determine the location of files.
  916. # TODO: I suspect this isn't working right now, but it doesn't prevent
  917. # anything from working properly as far as I can tell, perhaps because
  918. # we override the kubeconfig path everywhere we use it?
  919. # Query kubeconfig settings
  920. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  921. if role == 'node':
  922. kubeconfig_dir = os.path.join(
  923. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  924. )
  925. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  926. if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
  927. try:
  928. _, output, _ = module.run_command( # noqa: F405
  929. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  930. "json", "--kubeconfig=%s" % kubeconfig_path],
  931. check_rc=False
  932. )
  933. config = json.loads(output)
  934. cad = 'certificate-authority-data'
  935. try:
  936. for cluster in config['clusters']:
  937. config['clusters'][cluster][cad] = 'masked'
  938. except KeyError:
  939. pass
  940. try:
  941. for user in config['users']:
  942. config['users'][user][cad] = 'masked'
  943. config['users'][user]['client-key-data'] = 'masked'
  944. except KeyError:
  945. pass
  946. current_config['kubeconfig'] = config
  947. # override pylint broad-except warning, since we do not want
  948. # to bubble up any exceptions if oc config view
  949. # fails
  950. # pylint: disable=broad-except
  951. except Exception:
  952. pass
  953. return current_config
  954. def build_kubelet_args(facts):
  955. """Build node kubelet_args
  956. In the node-config.yaml file, kubeletArgument sub-keys have their
  957. values provided as a list. Hence the gratuitous use of ['foo'] below.
  958. """
  959. cloud_cfg_path = os.path.join(
  960. facts['common']['config_base'],
  961. 'cloudprovider')
  962. # We only have to do this stuff on hosts that are nodes
  963. if 'node' in facts:
  964. # Any changes to the kubeletArguments parameter are stored
  965. # here first.
  966. kubelet_args = {}
  967. if 'cloudprovider' in facts:
  968. # EVERY cloud is special <3
  969. if 'kind' in facts['cloudprovider']:
  970. if facts['cloudprovider']['kind'] == 'aws':
  971. kubelet_args['cloud-provider'] = ['aws']
  972. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  973. if facts['cloudprovider']['kind'] == 'openstack':
  974. kubelet_args['cloud-provider'] = ['openstack']
  975. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  976. if facts['cloudprovider']['kind'] == 'gce':
  977. kubelet_args['cloud-provider'] = ['gce']
  978. kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  979. # Automatically add node-labels to the kubeletArguments
  980. # parameter. See BZ1359848 for additional details.
  981. #
  982. # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
  983. if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
  984. # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
  985. # into ['foo=bar', 'a=b']
  986. #
  987. # On the openshift_node_labels inventory variable we loop
  988. # over each key-value tuple (from .items()) and join the
  989. # key to the value with an '=' character, this produces a
  990. # list.
  991. #
  992. # map() seems to be returning an itertools.imap object
  993. # instead of a list. We cast it to a list ourselves.
  994. # pylint: disable=unnecessary-lambda
  995. labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
  996. if labels_str != '':
  997. kubelet_args['node-labels'] = labels_str
  998. # If we've added items to the kubelet_args dict then we need
  999. # to merge the new items back into the main facts object.
  1000. if kubelet_args != {}:
  1001. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
  1002. return facts
  1003. def build_controller_args(facts):
  1004. """ Build master controller_args """
  1005. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  1006. 'cloudprovider')
  1007. if 'master' in facts:
  1008. controller_args = {}
  1009. if 'cloudprovider' in facts:
  1010. if 'kind' in facts['cloudprovider']:
  1011. if facts['cloudprovider']['kind'] == 'aws':
  1012. controller_args['cloud-provider'] = ['aws']
  1013. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  1014. if facts['cloudprovider']['kind'] == 'openstack':
  1015. controller_args['cloud-provider'] = ['openstack']
  1016. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  1017. if facts['cloudprovider']['kind'] == 'gce':
  1018. controller_args['cloud-provider'] = ['gce']
  1019. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1020. if controller_args != {}:
  1021. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
  1022. return facts
  1023. def build_api_server_args(facts):
  1024. """ Build master api_server_args """
  1025. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  1026. 'cloudprovider')
  1027. if 'master' in facts:
  1028. api_server_args = {}
  1029. if 'cloudprovider' in facts:
  1030. if 'kind' in facts['cloudprovider']:
  1031. if facts['cloudprovider']['kind'] == 'aws':
  1032. api_server_args['cloud-provider'] = ['aws']
  1033. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  1034. if facts['cloudprovider']['kind'] == 'openstack':
  1035. api_server_args['cloud-provider'] = ['openstack']
  1036. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  1037. if facts['cloudprovider']['kind'] == 'gce':
  1038. api_server_args['cloud-provider'] = ['gce']
  1039. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1040. if api_server_args != {}:
  1041. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
  1042. return facts
  1043. def is_service_running(service):
  1044. """ Queries systemd through dbus to see if the service is running """
  1045. service_running = False
  1046. bus = SystemBus()
  1047. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  1048. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  1049. try:
  1050. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  1051. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  1052. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  1053. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  1054. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  1055. if service_load_state == 'loaded' and service_active_state == 'active':
  1056. service_running = True
  1057. except DBusException:
  1058. pass
  1059. return service_running
  1060. def rpm_rebuilddb():
  1061. """
  1062. Runs rpm --rebuilddb to ensure the db is in good shape.
  1063. """
  1064. module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
  1065. def get_version_output(binary, version_cmd):
  1066. """ runs and returns the version output for a command """
  1067. cmd = []
  1068. for item in (binary, version_cmd):
  1069. if isinstance(item, list):
  1070. cmd.extend(item)
  1071. else:
  1072. cmd.append(item)
  1073. if os.path.isfile(cmd[0]):
  1074. _, output, _ = module.run_command(cmd) # noqa: F405
  1075. return output
  1076. def get_docker_version_info():
  1077. """ Parses and returns the docker version info """
  1078. result = None
  1079. if is_service_running('docker'):
  1080. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  1081. if 'Server' in version_info:
  1082. result = {
  1083. 'api_version': version_info['Server']['API version'],
  1084. 'version': version_info['Server']['Version']
  1085. }
  1086. return result
  1087. def get_hosted_registry_insecure():
  1088. """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
  1089. registry is currently insecure.
  1090. """
  1091. hosted_registry_insecure = None
  1092. if os.path.exists('/etc/sysconfig/docker'):
  1093. try:
  1094. ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
  1095. ini_fp = io.StringIO(ini_str)
  1096. config = ConfigParser.RawConfigParser()
  1097. config.readfp(ini_fp)
  1098. options = config.get('root', 'OPTIONS')
  1099. if 'insecure-registry' in options:
  1100. hosted_registry_insecure = True
  1101. except Exception: # pylint: disable=broad-except
  1102. pass
  1103. return hosted_registry_insecure
  1104. def get_openshift_version(facts):
  1105. """ Get current version of openshift on the host.
  1106. Checks a variety of ways ranging from fastest to slowest.
  1107. Args:
  1108. facts (dict): existing facts
  1109. optional cli_image for pulling the version number
  1110. Returns:
  1111. version: the current openshift version
  1112. """
  1113. version = None
  1114. # No need to run this method repeatedly on a system if we already know the
  1115. # version
  1116. # TODO: We need a way to force reload this after upgrading bits.
  1117. if 'common' in facts:
  1118. if 'version' in facts['common'] and facts['common']['version'] is not None:
  1119. return chomp_commit_offset(facts['common']['version'])
  1120. if os.path.isfile('/usr/bin/openshift'):
  1121. _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
  1122. version = parse_openshift_version(output)
  1123. elif 'common' in facts and 'is_containerized' in facts['common']:
  1124. version = get_container_openshift_version(facts)
  1125. # Handle containerized masters that have not yet been configured as a node.
  1126. # This can be very slow and may get re-run multiple times, so we only use this
  1127. # if other methods failed to find a version.
  1128. if not version and os.path.isfile('/usr/local/bin/openshift'):
  1129. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
  1130. version = parse_openshift_version(output)
  1131. return chomp_commit_offset(version)
  1132. def chomp_commit_offset(version):
  1133. """Chomp any "+git.foo" commit offset string from the given `version`
  1134. and return the modified version string.
  1135. Ex:
  1136. - chomp_commit_offset(None) => None
  1137. - chomp_commit_offset(1337) => "1337"
  1138. - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
  1139. - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
  1140. - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
  1141. """
  1142. if version is None:
  1143. return version
  1144. else:
  1145. # Stringify, just in case it's a Number type. Split by '+' and
  1146. # return the first split. No concerns about strings without a
  1147. # '+', .split() returns an array of the original string.
  1148. return str(version).split('+')[0]
  1149. def get_container_openshift_version(facts):
  1150. """
  1151. If containerized, see if we can determine the installed version via the
  1152. systemd environment files.
  1153. """
  1154. for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
  1155. env_path = filename % facts['common']['service_type']
  1156. if not os.path.exists(env_path):
  1157. continue
  1158. with open(env_path) as env_file:
  1159. for line in env_file:
  1160. if line.startswith("IMAGE_VERSION="):
  1161. tag = line[len("IMAGE_VERSION="):].strip()
  1162. # Remove leading "v" and any trailing release info, we just want
  1163. # a version number here:
  1164. version = tag[1:].split("-")[0]
  1165. return version
  1166. return None
  1167. def parse_openshift_version(output):
  1168. """ Apply provider facts to supplied facts dict
  1169. Args:
  1170. string: output of 'openshift version'
  1171. Returns:
  1172. string: the version number
  1173. """
  1174. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  1175. ver = versions.get('openshift', '')
  1176. # Remove trailing build number and commit hash from older versions, we need to return a straight
  1177. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  1178. ver = ver.split('-')[0]
  1179. return ver
  1180. def apply_provider_facts(facts, provider_facts):
  1181. """ Apply provider facts to supplied facts dict
  1182. Args:
  1183. facts (dict): facts dict to update
  1184. provider_facts (dict): provider facts to apply
  1185. roles: host roles
  1186. Returns:
  1187. dict: the merged facts
  1188. """
  1189. if not provider_facts:
  1190. return facts
  1191. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  1192. for h_var, ip_var in common_vars:
  1193. ip_value = provider_facts['network'].get(ip_var)
  1194. if ip_value:
  1195. facts['common'][ip_var] = ip_value
  1196. facts['common'][h_var] = choose_hostname(
  1197. [provider_facts['network'].get(h_var)],
  1198. facts['common'][h_var]
  1199. )
  1200. facts['provider'] = provider_facts
  1201. return facts
  1202. # Disabling pylint too many branches. This function needs refactored
  1203. # but is a very core part of openshift_facts.
  1204. # pylint: disable=too-many-branches, too-many-nested-blocks
  1205. def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
  1206. """ Recursively merge facts dicts
  1207. Args:
  1208. orig (dict): existing facts
  1209. new (dict): facts to update
  1210. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1211. '.' notation ex: ['master.named_certificates']
  1212. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1213. '.' notation ex: ['master.master_count']
  1214. Returns:
  1215. dict: the merged facts
  1216. """
  1217. additive_facts = ['named_certificates']
  1218. protected_facts = ['ha', 'master_count']
  1219. # Facts we do not ever want to merge. These originate in inventory variables
  1220. # and contain JSON dicts. We don't ever want to trigger a merge
  1221. # here, just completely overwrite with the new if they are present there.
  1222. inventory_json_facts = ['admission_plugin_config',
  1223. 'kube_admission_plugin_config',
  1224. 'image_policy_config']
  1225. facts = dict()
  1226. for key, value in iteritems(orig):
  1227. # Key exists in both old and new facts.
  1228. if key in new:
  1229. if key in inventory_json_facts:
  1230. # Watchout for JSON facts that sometimes load as strings.
  1231. # (can happen if the JSON contains a boolean)
  1232. if isinstance(new[key], string_types):
  1233. facts[key] = yaml.safe_load(new[key])
  1234. else:
  1235. facts[key] = copy.deepcopy(new[key])
  1236. # Continue to recurse if old and new fact is a dictionary.
  1237. elif isinstance(value, dict) and isinstance(new[key], dict):
  1238. # Collect the subset of additive facts to overwrite if
  1239. # key matches. These will be passed to the subsequent
  1240. # merge_facts call.
  1241. relevant_additive_facts = []
  1242. for item in additive_facts_to_overwrite:
  1243. if '.' in item and item.startswith(key + '.'):
  1244. relevant_additive_facts.append(item)
  1245. # Collect the subset of protected facts to overwrite
  1246. # if key matches. These will be passed to the
  1247. # subsequent merge_facts call.
  1248. relevant_protected_facts = []
  1249. for item in protected_facts_to_overwrite:
  1250. if '.' in item and item.startswith(key + '.'):
  1251. relevant_protected_facts.append(item)
  1252. facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
  1253. # Key matches an additive fact and we are not overwriting
  1254. # it so we will append the new value to the existing value.
  1255. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  1256. if isinstance(value, list) and isinstance(new[key], list):
  1257. new_fact = []
  1258. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  1259. if item not in new_fact:
  1260. new_fact.append(item)
  1261. facts[key] = new_fact
  1262. # Key matches a protected fact and we are not overwriting
  1263. # it so we will determine if it is okay to change this
  1264. # fact.
  1265. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
  1266. # The master count (int) can only increase unless it
  1267. # has been passed as a protected fact to overwrite.
  1268. if key == 'master_count' and new[key] is not None and new[key] is not '':
  1269. if int(value) <= int(new[key]):
  1270. facts[key] = copy.deepcopy(new[key])
  1271. else:
  1272. # pylint: disable=line-too-long
  1273. module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # noqa: F405
  1274. # ha (bool) can not change unless it has been passed
  1275. # as a protected fact to overwrite.
  1276. if key == 'ha':
  1277. if safe_get_bool(value) != safe_get_bool(new[key]):
  1278. # pylint: disable=line-too-long
  1279. module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
  1280. else:
  1281. facts[key] = value
  1282. # No other condition has been met. Overwrite the old fact
  1283. # with the new value.
  1284. else:
  1285. facts[key] = copy.deepcopy(new[key])
  1286. # Key isn't in new so add it to facts to keep it.
  1287. else:
  1288. facts[key] = copy.deepcopy(value)
  1289. new_keys = set(new.keys()) - set(orig.keys())
  1290. for key in new_keys:
  1291. # Watchout for JSON facts that sometimes load as strings.
  1292. # (can happen if the JSON contains a boolean)
  1293. if key in inventory_json_facts and isinstance(new[key], string_types):
  1294. facts[key] = yaml.safe_load(new[key])
  1295. else:
  1296. facts[key] = copy.deepcopy(new[key])
  1297. return facts
  1298. def save_local_facts(filename, facts):
  1299. """ Save local facts
  1300. Args:
  1301. filename (str): local facts file
  1302. facts (dict): facts to set
  1303. """
  1304. try:
  1305. fact_dir = os.path.dirname(filename)
  1306. try:
  1307. os.makedirs(fact_dir) # try to make the directory
  1308. except OSError as exception:
  1309. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  1310. raise # pass any other exceptions up the chain
  1311. with open(filename, 'w') as fact_file:
  1312. fact_file.write(module.jsonify(facts)) # noqa: F405
  1313. os.chmod(filename, 0o600)
  1314. except (IOError, OSError) as ex:
  1315. raise OpenShiftFactsFileWriteError(
  1316. "Could not create fact file: %s, error: %s" % (filename, ex)
  1317. )
  1318. def get_local_facts_from_file(filename):
  1319. """ Retrieve local facts from fact file
  1320. Args:
  1321. filename (str): local facts file
  1322. Returns:
  1323. dict: the retrieved facts
  1324. """
  1325. local_facts = dict()
  1326. try:
  1327. # Handle conversion of INI style facts file to json style
  1328. ini_facts = ConfigParser.SafeConfigParser()
  1329. ini_facts.read(filename)
  1330. for section in ini_facts.sections():
  1331. local_facts[section] = dict()
  1332. for key, value in ini_facts.items(section):
  1333. local_facts[section][key] = value
  1334. except (ConfigParser.MissingSectionHeaderError,
  1335. ConfigParser.ParsingError):
  1336. try:
  1337. with open(filename, 'r') as facts_file:
  1338. local_facts = json.load(facts_file)
  1339. except (ValueError, IOError):
  1340. pass
  1341. return local_facts
  1342. def sort_unique(alist):
  1343. """ Sorts and de-dupes a list
  1344. Args:
  1345. list: a list
  1346. Returns:
  1347. list: a sorted de-duped list
  1348. """
  1349. alist.sort()
  1350. out = list()
  1351. for i in alist:
  1352. if i not in out:
  1353. out.append(i)
  1354. return out
  1355. def safe_get_bool(fact):
  1356. """ Get a boolean fact safely.
  1357. Args:
  1358. facts: fact to convert
  1359. Returns:
  1360. bool: given fact as a bool
  1361. """
  1362. return bool(strtobool(str(fact)))
  1363. def set_proxy_facts(facts):
  1364. """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
  1365. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1366. 1. http_proxy, https_proxy, no_proxy
  1367. 2. builddefaults_*
  1368. 3. builddefaults_git_*
  1369. Args:
  1370. facts(dict): existing facts
  1371. Returns:
  1372. facts(dict): Updated facts with missing values
  1373. """
  1374. if 'common' in facts:
  1375. common = facts['common']
  1376. if 'http_proxy' in common or 'https_proxy' in common:
  1377. if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
  1378. common['no_proxy'] = common['no_proxy'].split(",")
  1379. elif 'no_proxy' not in common:
  1380. common['no_proxy'] = []
  1381. if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
  1382. if 'no_proxy_internal_hostnames' in common:
  1383. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  1384. # We always add local dns domain and ourselves no matter what
  1385. common['no_proxy'].append('.' + common['dns_domain'])
  1386. common['no_proxy'].append(common['hostname'])
  1387. common['no_proxy'] = sort_unique(common['no_proxy'])
  1388. facts['common'] = common
  1389. if 'builddefaults' in facts:
  1390. builddefaults = facts['builddefaults']
  1391. common = facts['common']
  1392. # Copy values from common to builddefaults
  1393. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1394. builddefaults['http_proxy'] = common['http_proxy']
  1395. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1396. builddefaults['https_proxy'] = common['https_proxy']
  1397. # make no_proxy into a list if it's not
  1398. if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], string_types):
  1399. builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
  1400. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1401. builddefaults['no_proxy'] = common['no_proxy']
  1402. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1403. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1404. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1405. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1406. # If we're actually defining a proxy config then create admission_plugin_config
  1407. # if it doesn't exist, then merge builddefaults[config] structure
  1408. # into admission_plugin_config
  1409. if 'config' in builddefaults and ('http_proxy' in builddefaults or
  1410. 'https_proxy' in builddefaults):
  1411. if 'admission_plugin_config' not in facts['master']:
  1412. facts['master']['admission_plugin_config'] = dict()
  1413. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  1414. facts['builddefaults'] = builddefaults
  1415. return facts
  1416. # pylint: disable=too-many-statements
  1417. def set_container_facts_if_unset(facts):
  1418. """ Set containerized facts.
  1419. Args:
  1420. facts (dict): existing facts
  1421. Returns:
  1422. dict: the facts dict updated with the generated containerization
  1423. facts
  1424. """
  1425. deployment_type = facts['common']['deployment_type']
  1426. if deployment_type in ['enterprise', 'openshift-enterprise']:
  1427. master_image = 'openshift3/ose'
  1428. cli_image = master_image
  1429. node_image = 'openshift3/node'
  1430. ovs_image = 'openshift3/openvswitch'
  1431. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1432. pod_image = 'openshift3/ose-pod'
  1433. router_image = 'openshift3/ose-haproxy-router'
  1434. registry_image = 'openshift3/ose-docker-registry'
  1435. deployer_image = 'openshift3/ose-deployer'
  1436. elif deployment_type == 'atomic-enterprise':
  1437. master_image = 'aep3_beta/aep'
  1438. cli_image = master_image
  1439. node_image = 'aep3_beta/node'
  1440. ovs_image = 'aep3_beta/openvswitch'
  1441. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1442. pod_image = 'aep3_beta/aep-pod'
  1443. router_image = 'aep3_beta/aep-haproxy-router'
  1444. registry_image = 'aep3_beta/aep-docker-registry'
  1445. deployer_image = 'aep3_beta/aep-deployer'
  1446. else:
  1447. master_image = 'openshift/origin'
  1448. cli_image = master_image
  1449. node_image = 'openshift/node'
  1450. ovs_image = 'openshift/openvswitch'
  1451. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1452. pod_image = 'openshift/origin-pod'
  1453. router_image = 'openshift/origin-haproxy-router'
  1454. registry_image = 'openshift/origin-docker-registry'
  1455. deployer_image = 'openshift/origin-deployer'
  1456. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1457. if 'is_containerized' not in facts['common']:
  1458. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1459. if 'cli_image' not in facts['common']:
  1460. facts['common']['cli_image'] = cli_image
  1461. if 'pod_image' not in facts['common']:
  1462. facts['common']['pod_image'] = pod_image
  1463. if 'router_image' not in facts['common']:
  1464. facts['common']['router_image'] = router_image
  1465. if 'registry_image' not in facts['common']:
  1466. facts['common']['registry_image'] = registry_image
  1467. if 'deployer_image' not in facts['common']:
  1468. facts['common']['deployer_image'] = deployer_image
  1469. if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
  1470. facts['etcd']['etcd_image'] = etcd_image
  1471. if 'master' in facts and 'master_image' not in facts['master']:
  1472. facts['master']['master_image'] = master_image
  1473. if 'node' in facts:
  1474. if 'node_image' not in facts['node']:
  1475. facts['node']['node_image'] = node_image
  1476. if 'ovs_image' not in facts['node']:
  1477. facts['node']['ovs_image'] = ovs_image
  1478. if safe_get_bool(facts['common']['is_containerized']):
  1479. facts['common']['admin_binary'] = '/usr/local/bin/oadm'
  1480. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1481. return facts
  1482. def set_installed_variant_rpm_facts(facts):
  1483. """ Set RPM facts of installed variant
  1484. Args:
  1485. facts (dict): existing facts
  1486. Returns:
  1487. dict: the facts dict updated with installed_variant_rpms
  1488. """
  1489. installed_rpms = []
  1490. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1491. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1492. variant_rpms = [base_rpm] + \
  1493. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1494. ['tuned-profiles-%s-node' % base_rpm]
  1495. for rpm in variant_rpms:
  1496. exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
  1497. if exit_code == 0:
  1498. installed_rpms.append(rpm)
  1499. facts['common']['installed_variant_rpms'] = installed_rpms
  1500. return facts
  1501. class OpenShiftFactsInternalError(Exception):
  1502. """Origin Facts Error"""
  1503. pass
  1504. class OpenShiftFactsUnsupportedRoleError(Exception):
  1505. """Origin Facts Unsupported Role Error"""
  1506. pass
  1507. class OpenShiftFactsFileWriteError(Exception):
  1508. """Origin Facts File Write Error"""
  1509. pass
  1510. class OpenShiftFactsMetadataUnavailableError(Exception):
  1511. """Origin Facts Metadata Unavailable Error"""
  1512. pass
  1513. class OpenShiftFacts(object):
  1514. """ Origin Facts
  1515. Attributes:
  1516. facts (dict): facts for the host
  1517. Args:
  1518. module (AnsibleModule): an AnsibleModule object
  1519. role (str): role for setting local facts
  1520. filename (str): local facts file to use
  1521. local_facts (dict): local facts to set
  1522. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1523. '.' notation ex: ['master.named_certificates']
  1524. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1525. '.' notation ex: ['master.master_count']
  1526. Raises:
  1527. OpenShiftFactsUnsupportedRoleError:
  1528. """
  1529. known_roles = ['builddefaults',
  1530. 'clock',
  1531. 'cloudprovider',
  1532. 'common',
  1533. 'docker',
  1534. 'etcd',
  1535. 'hosted',
  1536. 'master',
  1537. 'node']
  1538. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1539. # pylint: disable=too-many-arguments,no-value-for-parameter
  1540. def __init__(self, role, filename, local_facts,
  1541. additive_facts_to_overwrite=None,
  1542. openshift_env=None,
  1543. openshift_env_structures=None,
  1544. protected_facts_to_overwrite=None):
  1545. self.changed = False
  1546. self.filename = filename
  1547. if role not in self.known_roles:
  1548. raise OpenShiftFactsUnsupportedRoleError(
  1549. "Role %s is not supported by this module" % role
  1550. )
  1551. self.role = role
  1552. try:
  1553. # ansible-2.1
  1554. # pylint: disable=too-many-function-args,invalid-name
  1555. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
  1556. for (k, v) in self.system_facts.items():
  1557. self.system_facts["ansible_%s" % k.replace('-', '_')] = v
  1558. except UnboundLocalError:
  1559. # ansible-2.2
  1560. self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
  1561. self.facts = self.generate_facts(local_facts,
  1562. additive_facts_to_overwrite,
  1563. openshift_env,
  1564. openshift_env_structures,
  1565. protected_facts_to_overwrite)
  1566. def generate_facts(self,
  1567. local_facts,
  1568. additive_facts_to_overwrite,
  1569. openshift_env,
  1570. openshift_env_structures,
  1571. protected_facts_to_overwrite):
  1572. """ Generate facts
  1573. Args:
  1574. local_facts (dict): local_facts for overriding generated defaults
  1575. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1576. '.' notation ex: ['master.named_certificates']
  1577. openshift_env (dict): openshift_env facts for overriding generated defaults
  1578. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1579. '.' notation ex: ['master.master_count']
  1580. Returns:
  1581. dict: The generated facts
  1582. """
  1583. local_facts = self.init_local_facts(local_facts,
  1584. additive_facts_to_overwrite,
  1585. openshift_env,
  1586. openshift_env_structures,
  1587. protected_facts_to_overwrite)
  1588. roles = local_facts.keys()
  1589. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1590. deployment_type = local_facts['common']['deployment_type']
  1591. else:
  1592. deployment_type = 'origin'
  1593. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1594. deployment_subtype = local_facts['common']['deployment_subtype']
  1595. else:
  1596. deployment_subtype = 'basic'
  1597. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1598. provider_facts = self.init_provider_facts()
  1599. facts = apply_provider_facts(defaults, provider_facts)
  1600. facts = merge_facts(facts,
  1601. local_facts,
  1602. additive_facts_to_overwrite,
  1603. protected_facts_to_overwrite)
  1604. facts = migrate_oauth_template_facts(facts)
  1605. facts['current_config'] = get_current_config(facts)
  1606. facts = set_url_facts_if_unset(facts)
  1607. facts = set_project_cfg_facts_if_unset(facts)
  1608. facts = set_flannel_facts_if_unset(facts)
  1609. facts = set_nuage_facts_if_unset(facts)
  1610. facts = set_node_schedulability(facts)
  1611. facts = set_selectors(facts)
  1612. facts = set_identity_providers_if_unset(facts)
  1613. facts = set_deployment_facts_if_unset(facts)
  1614. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1615. facts = set_container_facts_if_unset(facts)
  1616. facts = build_kubelet_args(facts)
  1617. facts = build_controller_args(facts)
  1618. facts = build_api_server_args(facts)
  1619. facts = set_version_facts_if_unset(facts)
  1620. facts = set_dnsmasq_facts_if_unset(facts)
  1621. facts = set_manageiq_facts_if_unset(facts)
  1622. facts = set_aggregate_facts(facts)
  1623. facts = set_etcd_facts_if_unset(facts)
  1624. facts = set_proxy_facts(facts)
  1625. if not safe_get_bool(facts['common']['is_containerized']):
  1626. facts = set_installed_variant_rpm_facts(facts)
  1627. facts = set_nodename(facts)
  1628. return dict(openshift=facts)
  1629. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1630. """ Get default fact values
  1631. Args:
  1632. roles (list): list of roles for this host
  1633. Returns:
  1634. dict: The generated default facts
  1635. """
  1636. defaults = {}
  1637. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1638. exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
  1639. hostname_f = output.strip() if exit_code == 0 else ''
  1640. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1641. self.system_facts['ansible_fqdn']]
  1642. hostname = choose_hostname(hostname_values, ip_addr)
  1643. defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
  1644. public_ip=ip_addr,
  1645. deployment_type=deployment_type,
  1646. deployment_subtype=deployment_subtype,
  1647. hostname=hostname,
  1648. public_hostname=hostname,
  1649. portal_net='172.30.0.0/16',
  1650. client_binary='oc', admin_binary='oadm',
  1651. dns_domain='cluster.local',
  1652. install_examples=True,
  1653. debug_level=2)
  1654. if 'master' in roles:
  1655. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1656. controllers_port='8444',
  1657. console_use_ssl=True,
  1658. console_path='/console',
  1659. console_port='8443', etcd_use_ssl=True,
  1660. etcd_hosts='', etcd_port='4001',
  1661. portal_net='172.30.0.0/16',
  1662. embedded_etcd=True, embedded_kube=True,
  1663. embedded_dns=True,
  1664. bind_addr='0.0.0.0',
  1665. session_max_seconds=3600,
  1666. session_name='ssn',
  1667. session_secrets_file='',
  1668. access_token_max_seconds=86400,
  1669. auth_token_max_seconds=500,
  1670. oauth_grant_method='auto',
  1671. dynamic_provisioning_enabled=True,
  1672. max_requests_inflight=500)
  1673. if 'node' in roles:
  1674. defaults['node'] = dict(labels={}, annotations={},
  1675. iptables_sync_period='30s',
  1676. local_quota_per_fsgroup="",
  1677. set_node_ip=False)
  1678. if 'docker' in roles:
  1679. docker = dict(disable_push_dockerhub=False,
  1680. options='--log-driver=json-file --log-opt max-size=50m')
  1681. # NOTE: This is a workaround for a dnf output racecondition that can occur in
  1682. # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
  1683. if self.system_facts['ansible_pkg_mgr'] == 'dnf':
  1684. rpm_rebuilddb()
  1685. version_info = get_docker_version_info()
  1686. if version_info is not None:
  1687. docker['api_version'] = version_info['api_version']
  1688. docker['version'] = version_info['version']
  1689. docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
  1690. hosted_registry_insecure = get_hosted_registry_insecure()
  1691. if hosted_registry_insecure is not None:
  1692. docker['hosted_registry_insecure'] = hosted_registry_insecure
  1693. defaults['docker'] = docker
  1694. if 'clock' in roles:
  1695. exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
  1696. chrony_installed = bool(exit_code == 0)
  1697. defaults['clock'] = dict(
  1698. enabled=True,
  1699. chrony_installed=chrony_installed)
  1700. if 'cloudprovider' in roles:
  1701. defaults['cloudprovider'] = dict(kind=None)
  1702. if 'hosted' in roles or self.role == 'hosted':
  1703. defaults['hosted'] = dict(
  1704. metrics=dict(
  1705. deploy=False,
  1706. duration=7,
  1707. resolution='10s',
  1708. storage=dict(
  1709. kind=None,
  1710. volume=dict(
  1711. name='metrics',
  1712. size='10Gi'
  1713. ),
  1714. nfs=dict(
  1715. directory='/exports',
  1716. options='*(rw,root_squash)'
  1717. ),
  1718. host=None,
  1719. access=dict(
  1720. modes=['ReadWriteOnce']
  1721. ),
  1722. create_pv=True,
  1723. create_pvc=False
  1724. )
  1725. ),
  1726. logging=dict(
  1727. storage=dict(
  1728. kind=None,
  1729. volume=dict(
  1730. name='logging-es',
  1731. size='10Gi'
  1732. ),
  1733. nfs=dict(
  1734. directory='/exports',
  1735. options='*(rw,root_squash)'
  1736. ),
  1737. host=None,
  1738. access=dict(
  1739. modes=['ReadWriteOnce']
  1740. ),
  1741. create_pv=True,
  1742. create_pvc=False
  1743. )
  1744. ),
  1745. registry=dict(
  1746. storage=dict(
  1747. kind=None,
  1748. volume=dict(
  1749. name='registry',
  1750. size='5Gi'
  1751. ),
  1752. nfs=dict(
  1753. directory='/exports',
  1754. options='*(rw,root_squash)'),
  1755. host=None,
  1756. access=dict(
  1757. modes=['ReadWriteMany']
  1758. ),
  1759. create_pv=True,
  1760. create_pvc=True
  1761. )
  1762. ),
  1763. router=dict()
  1764. )
  1765. return defaults
  1766. def guess_host_provider(self):
  1767. """ Guess the host provider
  1768. Returns:
  1769. dict: The generated default facts for the detected provider
  1770. """
  1771. # TODO: cloud provider facts should probably be submitted upstream
  1772. product_name = self.system_facts['ansible_product_name']
  1773. product_version = self.system_facts['ansible_product_version']
  1774. virt_type = self.system_facts['ansible_virtualization_type']
  1775. virt_role = self.system_facts['ansible_virtualization_role']
  1776. provider = None
  1777. metadata = None
  1778. # TODO: this is not exposed through module_utils/facts.py in ansible,
  1779. # need to create PR for ansible to expose it
  1780. bios_vendor = get_file_content( # noqa: F405
  1781. '/sys/devices/virtual/dmi/id/bios_vendor'
  1782. )
  1783. if bios_vendor == 'Google':
  1784. provider = 'gce'
  1785. metadata_url = ('http://metadata.google.internal/'
  1786. 'computeMetadata/v1/?recursive=true')
  1787. headers = {'Metadata-Flavor': 'Google'}
  1788. metadata = get_provider_metadata(metadata_url, True, headers,
  1789. True)
  1790. # Filter sshKeys and serviceAccounts from gce metadata
  1791. if metadata:
  1792. metadata['project']['attributes'].pop('sshKeys', None)
  1793. metadata['instance'].pop('serviceAccounts', None)
  1794. elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
  1795. provider = 'aws'
  1796. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1797. metadata = get_provider_metadata(metadata_url)
  1798. elif re.search(r'OpenStack', product_name):
  1799. provider = 'openstack'
  1800. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1801. 'meta_data.json')
  1802. metadata = get_provider_metadata(metadata_url, True, None,
  1803. True)
  1804. if metadata:
  1805. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1806. metadata['ec2_compat'] = get_provider_metadata(
  1807. ec2_compat_url
  1808. )
  1809. # disable pylint maybe-no-member because overloaded use of
  1810. # the module name causes pylint to not detect that results
  1811. # is an array or hash
  1812. # pylint: disable=maybe-no-member
  1813. # Filter public_keys and random_seed from openstack metadata
  1814. metadata.pop('public_keys', None)
  1815. metadata.pop('random_seed', None)
  1816. if not metadata['ec2_compat']:
  1817. metadata = None
  1818. return dict(name=provider, metadata=metadata)
  1819. def init_provider_facts(self):
  1820. """ Initialize the provider facts
  1821. Returns:
  1822. dict: The normalized provider facts
  1823. """
  1824. provider_info = self.guess_host_provider()
  1825. provider_facts = normalize_provider_facts(
  1826. provider_info.get('name'),
  1827. provider_info.get('metadata')
  1828. )
  1829. return provider_facts
  1830. @staticmethod
  1831. def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
  1832. """ Split openshift_env facts based on openshift_env structures.
  1833. Args:
  1834. openshift_env_fact (string): the openshift_env fact to split
  1835. ex: 'openshift_cloudprovider_openstack_auth_url'
  1836. openshift_env_structures (list): a list of structures to determine fact keys
  1837. ex: ['openshift.cloudprovider.openstack.*']
  1838. Returns:
  1839. list: a list of keys that represent the fact
  1840. ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
  1841. """
  1842. # By default, we'll split an openshift_env fact by underscores.
  1843. fact_keys = openshift_env_fact.split('_')
  1844. # Determine if any of the provided variable structures match the fact.
  1845. matching_structure = None
  1846. if openshift_env_structures is not None:
  1847. for structure in openshift_env_structures:
  1848. if re.match(structure, openshift_env_fact):
  1849. matching_structure = structure
  1850. # Fact didn't match any variable structures so return the default fact keys.
  1851. if matching_structure is None:
  1852. return fact_keys
  1853. final_keys = []
  1854. structure_keys = matching_structure.split('.')
  1855. for structure_key in structure_keys:
  1856. # Matched current key. Add to final keys.
  1857. if structure_key == fact_keys[structure_keys.index(structure_key)]:
  1858. final_keys.append(structure_key)
  1859. # Wildcard means we will be taking everything from here to the end of the fact.
  1860. elif structure_key == '*':
  1861. final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
  1862. # Shouldn't have gotten here, return the fact keys.
  1863. else:
  1864. return fact_keys
  1865. return final_keys
  1866. # Disabling too-many-branches and too-many-locals.
  1867. # This should be cleaned up as a TODO item.
  1868. # pylint: disable=too-many-branches, too-many-locals
  1869. def init_local_facts(self, facts=None,
  1870. additive_facts_to_overwrite=None,
  1871. openshift_env=None,
  1872. openshift_env_structures=None,
  1873. protected_facts_to_overwrite=None):
  1874. """ Initialize the local facts
  1875. Args:
  1876. facts (dict): local facts to set
  1877. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1878. '.' notation ex: ['master.named_certificates']
  1879. openshift_env (dict): openshift env facts to set
  1880. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1881. '.' notation ex: ['master.master_count']
  1882. Returns:
  1883. dict: The result of merging the provided facts with existing
  1884. local facts
  1885. """
  1886. changed = False
  1887. facts_to_set = dict()
  1888. if facts is not None:
  1889. facts_to_set[self.role] = facts
  1890. if openshift_env != {} and openshift_env is not None:
  1891. for fact, value in iteritems(openshift_env):
  1892. oo_env_facts = dict()
  1893. current_level = oo_env_facts
  1894. keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
  1895. if len(keys) > 0 and keys[0] != self.role:
  1896. continue
  1897. for key in keys:
  1898. if key == keys[-1]:
  1899. current_level[key] = value
  1900. elif key not in current_level:
  1901. current_level[key] = dict()
  1902. current_level = current_level[key]
  1903. facts_to_set = merge_facts(orig=facts_to_set,
  1904. new=oo_env_facts,
  1905. additive_facts_to_overwrite=[],
  1906. protected_facts_to_overwrite=[])
  1907. local_facts = get_local_facts_from_file(self.filename)
  1908. migrated_facts = migrate_local_facts(local_facts)
  1909. new_local_facts = merge_facts(migrated_facts,
  1910. facts_to_set,
  1911. additive_facts_to_overwrite,
  1912. protected_facts_to_overwrite)
  1913. if 'docker' in new_local_facts:
  1914. # remove duplicate and empty strings from registry lists
  1915. for cat in ['additional', 'blocked', 'insecure']:
  1916. key = '{0}_registries'.format(cat)
  1917. if key in new_local_facts['docker']:
  1918. val = new_local_facts['docker'][key]
  1919. if isinstance(val, string_types):
  1920. val = [x.strip() for x in val.split(',')]
  1921. new_local_facts['docker'][key] = list(set(val) - set(['']))
  1922. # Convert legacy log_options comma sep string to a list if present:
  1923. if 'log_options' in new_local_facts['docker'] and \
  1924. isinstance(new_local_facts['docker']['log_options'], string_types):
  1925. new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
  1926. new_local_facts = self.remove_empty_facts(new_local_facts)
  1927. if new_local_facts != local_facts:
  1928. self.validate_local_facts(new_local_facts)
  1929. changed = True
  1930. if not module.check_mode: # noqa: F405
  1931. save_local_facts(self.filename, new_local_facts)
  1932. self.changed = changed
  1933. return new_local_facts
  1934. def remove_empty_facts(self, facts=None):
  1935. """ Remove empty facts
  1936. Args:
  1937. facts (dict): facts to clean
  1938. """
  1939. facts_to_remove = []
  1940. for fact, value in iteritems(facts):
  1941. if isinstance(facts[fact], dict):
  1942. facts[fact] = self.remove_empty_facts(facts[fact])
  1943. else:
  1944. if value == "" or value == [""] or value is None:
  1945. facts_to_remove.append(fact)
  1946. for fact in facts_to_remove:
  1947. del facts[fact]
  1948. return facts
  1949. def validate_local_facts(self, facts=None):
  1950. """ Validate local facts
  1951. Args:
  1952. facts (dict): local facts to validate
  1953. """
  1954. invalid_facts = dict()
  1955. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1956. if invalid_facts:
  1957. msg = 'Invalid facts detected:\n'
  1958. # pylint: disable=consider-iterating-dictionary
  1959. for key in invalid_facts.keys():
  1960. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1961. module.fail_json(msg=msg, changed=self.changed) # noqa: F405
  1962. # disabling pylint errors for line-too-long since we're dealing
  1963. # with best effort reduction of error messages here.
  1964. # disabling errors for too-many-branches since we require checking
  1965. # many conditions.
  1966. # pylint: disable=line-too-long, too-many-branches
  1967. @staticmethod
  1968. def validate_master_facts(facts, invalid_facts):
  1969. """ Validate master facts
  1970. Args:
  1971. facts (dict): local facts to validate
  1972. invalid_facts (dict): collected invalid_facts
  1973. Returns:
  1974. dict: Invalid facts
  1975. """
  1976. if 'master' in facts:
  1977. # openshift.master.session_auth_secrets
  1978. if 'session_auth_secrets' in facts['master']:
  1979. session_auth_secrets = facts['master']['session_auth_secrets']
  1980. if not issubclass(type(session_auth_secrets), list):
  1981. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  1982. elif 'session_encryption_secrets' not in facts['master']:
  1983. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  1984. 'if openshift_master_session_auth_secrets is provided.')
  1985. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  1986. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  1987. 'openshift_master_session_encryption_secrets must be '
  1988. 'equal length.')
  1989. else:
  1990. for secret in session_auth_secrets:
  1991. if len(secret) < 32:
  1992. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  1993. 'Secrets must be at least 32 characters in length.')
  1994. # openshift.master.session_encryption_secrets
  1995. if 'session_encryption_secrets' in facts['master']:
  1996. session_encryption_secrets = facts['master']['session_encryption_secrets']
  1997. if not issubclass(type(session_encryption_secrets), list):
  1998. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  1999. elif 'session_auth_secrets' not in facts['master']:
  2000. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  2001. 'set if openshift_master_session_encryption_secrets '
  2002. 'is provided.')
  2003. else:
  2004. for secret in session_encryption_secrets:
  2005. if len(secret) not in [16, 24, 32]:
  2006. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  2007. 'Secrets must be 16, 24, or 32 characters in length.')
  2008. return invalid_facts
  2009. def main():
  2010. """ main """
  2011. # disabling pylint errors for global-variable-undefined and invalid-name
  2012. # for 'global module' usage, since it is required to use ansible_facts
  2013. # pylint: disable=global-variable-undefined, invalid-name
  2014. global module
  2015. module = AnsibleModule( # noqa: F405
  2016. argument_spec=dict(
  2017. role=dict(default='common', required=False,
  2018. choices=OpenShiftFacts.known_roles),
  2019. local_facts=dict(default=None, type='dict', required=False),
  2020. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  2021. openshift_env=dict(default={}, type='dict', required=False),
  2022. openshift_env_structures=dict(default=[], type='list', required=False),
  2023. protected_facts_to_overwrite=dict(default=[], type='list', required=False)
  2024. ),
  2025. supports_check_mode=True,
  2026. add_file_common_args=True,
  2027. )
  2028. if not HAVE_DBUS:
  2029. module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
  2030. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
  2031. module.params['gather_timeout'] = 10 # noqa: F405
  2032. module.params['filter'] = '*' # noqa: F405
  2033. role = module.params['role'] # noqa: F405
  2034. local_facts = module.params['local_facts'] # noqa: F405
  2035. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
  2036. openshift_env = module.params['openshift_env'] # noqa: F405
  2037. openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
  2038. protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
  2039. fact_file = '/etc/ansible/facts.d/openshift.fact'
  2040. openshift_facts = OpenShiftFacts(role,
  2041. fact_file,
  2042. local_facts,
  2043. additive_facts_to_overwrite,
  2044. openshift_env,
  2045. openshift_env_structures,
  2046. protected_facts_to_overwrite)
  2047. file_params = module.params.copy() # noqa: F405
  2048. file_params['path'] = fact_file
  2049. file_args = module.load_file_common_arguments(file_params) # noqa: F405
  2050. changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
  2051. openshift_facts.changed)
  2052. return module.exit_json(changed=changed, # noqa: F405
  2053. ansible_facts=openshift_facts.facts)
  2054. if __name__ == '__main__':
  2055. main()