openshift_facts.py 100 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # vim: expandtab:tabstop=4:shiftwidth=4
  5. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  6. # Status: Permanently disabled to keep this module as self-contained as possible.
  7. """Ansible module for retrieving and setting openshift related facts"""
  8. # pylint: disable=no-name-in-module, import-error, wrong-import-order
  9. import copy
  10. import errno
  11. import json
  12. import re
  13. import io
  14. import os
  15. import yaml
  16. import struct
  17. import socket
  18. from distutils.util import strtobool
  19. from distutils.version import LooseVersion
  20. from six import string_types, text_type
  21. from six.moves import configparser
  22. # ignore pylint errors related to the module_utils import
  23. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  24. # import module snippets
  25. from ansible.module_utils.basic import * # noqa: F403
  26. from ansible.module_utils.facts import * # noqa: F403
  27. from ansible.module_utils.urls import * # noqa: F403
  28. from ansible.module_utils.six import iteritems, itervalues
  29. from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
  30. from ansible.module_utils._text import to_native
  31. HAVE_DBUS = False
  32. try:
  33. from dbus import SystemBus, Interface
  34. from dbus.exceptions import DBusException
  35. HAVE_DBUS = True
  36. except ImportError:
  37. pass
  38. DOCUMENTATION = '''
  39. ---
  40. module: openshift_facts
  41. short_description: Cluster Facts
  42. author: Jason DeTiberus
  43. requirements: [ ]
  44. '''
  45. EXAMPLES = '''
  46. '''
  47. def migrate_docker_facts(facts):
  48. """ Apply migrations for docker facts """
  49. params = {
  50. 'common': (
  51. 'additional_registries',
  52. 'insecure_registries',
  53. 'blocked_registries',
  54. 'options'
  55. ),
  56. 'node': (
  57. 'log_driver',
  58. 'log_options'
  59. )
  60. }
  61. if 'docker' not in facts:
  62. facts['docker'] = {}
  63. # pylint: disable=consider-iterating-dictionary
  64. for role in params.keys():
  65. if role in facts:
  66. for param in params[role]:
  67. old_param = 'docker_' + param
  68. if old_param in facts[role]:
  69. facts['docker'][param] = facts[role].pop(old_param)
  70. if 'node' in facts and 'portal_net' in facts['node']:
  71. facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
  72. # log_options was originally meant to be a comma separated string, but
  73. # we now prefer an actual list, with backward compatibility:
  74. if 'log_options' in facts['docker'] and \
  75. isinstance(facts['docker']['log_options'], string_types):
  76. facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
  77. return facts
  78. # TODO: We should add a generic migration function that takes source and destination
  79. # paths and does the right thing rather than one function for common, one for node, etc.
  80. def migrate_common_facts(facts):
  81. """ Migrate facts from various roles into common """
  82. params = {
  83. 'node': ('portal_net'),
  84. 'master': ('portal_net')
  85. }
  86. if 'common' not in facts:
  87. facts['common'] = {}
  88. # pylint: disable=consider-iterating-dictionary
  89. for role in params.keys():
  90. if role in facts:
  91. for param in params[role]:
  92. if param in facts[role]:
  93. facts['common'][param] = facts[role].pop(param)
  94. return facts
  95. def migrate_node_facts(facts):
  96. """ Migrate facts from various roles into node """
  97. params = {
  98. 'common': ('dns_ip'),
  99. }
  100. if 'node' not in facts:
  101. facts['node'] = {}
  102. # pylint: disable=consider-iterating-dictionary
  103. for role in params.keys():
  104. if role in facts:
  105. for param in params[role]:
  106. if param in facts[role]:
  107. facts['node'][param] = facts[role].pop(param)
  108. return facts
  109. def migrate_hosted_facts(facts):
  110. """ Apply migrations for master facts """
  111. if 'master' in facts:
  112. if 'router_selector' in facts['master']:
  113. if 'hosted' not in facts:
  114. facts['hosted'] = {}
  115. if 'router' not in facts['hosted']:
  116. facts['hosted']['router'] = {}
  117. facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
  118. if 'registry_selector' in facts['master']:
  119. if 'hosted' not in facts:
  120. facts['hosted'] = {}
  121. if 'registry' not in facts['hosted']:
  122. facts['hosted']['registry'] = {}
  123. facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
  124. return facts
  125. def migrate_admission_plugin_facts(facts):
  126. """ Apply migrations for admission plugin facts """
  127. if 'master' in facts:
  128. if 'kube_admission_plugin_config' in facts['master']:
  129. if 'admission_plugin_config' not in facts['master']:
  130. facts['master']['admission_plugin_config'] = dict()
  131. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  132. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  133. facts['master']['kube_admission_plugin_config'],
  134. additive_facts_to_overwrite=[],
  135. protected_facts_to_overwrite=[])
  136. # Remove kube_admission_plugin_config fact
  137. facts['master'].pop('kube_admission_plugin_config', None)
  138. return facts
  139. def migrate_local_facts(facts):
  140. """ Apply migrations of local facts """
  141. migrated_facts = copy.deepcopy(facts)
  142. migrated_facts = migrate_docker_facts(migrated_facts)
  143. migrated_facts = migrate_common_facts(migrated_facts)
  144. migrated_facts = migrate_node_facts(migrated_facts)
  145. migrated_facts = migrate_hosted_facts(migrated_facts)
  146. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  147. return migrated_facts
  148. def first_ip(network):
  149. """ Return the first IPv4 address in network
  150. Args:
  151. network (str): network in CIDR format
  152. Returns:
  153. str: first IPv4 address
  154. """
  155. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
  156. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
  157. (address, netmask) = network.split('/')
  158. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  159. return itoa((atoi(address) & netmask_i) + 1)
  160. def hostname_valid(hostname):
  161. """ Test if specified hostname should be considered valid
  162. Args:
  163. hostname (str): hostname to test
  164. Returns:
  165. bool: True if valid, otherwise False
  166. """
  167. if (not hostname or
  168. hostname.startswith('localhost') or
  169. hostname.endswith('localdomain') or
  170. hostname.endswith('novalocal') or
  171. len(hostname.split('.')) < 2):
  172. return False
  173. return True
  174. def choose_hostname(hostnames=None, fallback=''):
  175. """ Choose a hostname from the provided hostnames
  176. Given a list of hostnames and a fallback value, choose a hostname to
  177. use. This function will prefer fqdns if they exist (excluding any that
  178. begin with localhost or end with localdomain) over ip addresses.
  179. Args:
  180. hostnames (list): list of hostnames
  181. fallback (str): default value to set if hostnames does not contain
  182. a valid hostname
  183. Returns:
  184. str: chosen hostname
  185. """
  186. hostname = fallback
  187. if hostnames is None:
  188. return hostname
  189. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  190. ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
  191. hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
  192. for host_list in (hosts, ips):
  193. for host in host_list:
  194. if hostname_valid(host):
  195. return host
  196. return hostname
  197. def query_metadata(metadata_url, headers=None, expect_json=False):
  198. """ Return metadata from the provided metadata_url
  199. Args:
  200. metadata_url (str): metadata url
  201. headers (dict): headers to set for metadata request
  202. expect_json (bool): does the metadata_url return json
  203. Returns:
  204. dict or list: metadata request result
  205. """
  206. result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
  207. if info['status'] != 200:
  208. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  209. if expect_json:
  210. return module.from_json(to_native(result.read())) # noqa: F405
  211. else:
  212. return [to_native(line.strip()) for line in result.readlines()]
  213. def walk_metadata(metadata_url, headers=None, expect_json=False):
  214. """ Walk the metadata tree and return a dictionary of the entire tree
  215. Args:
  216. metadata_url (str): metadata url
  217. headers (dict): headers to set for metadata request
  218. expect_json (bool): does the metadata_url return json
  219. Returns:
  220. dict: the result of walking the metadata tree
  221. """
  222. metadata = dict()
  223. for line in query_metadata(metadata_url, headers, expect_json):
  224. if line.endswith('/') and not line == 'public-keys/':
  225. key = line[:-1]
  226. metadata[key] = walk_metadata(metadata_url + line,
  227. headers, expect_json)
  228. else:
  229. results = query_metadata(metadata_url + line, headers,
  230. expect_json)
  231. if len(results) == 1:
  232. # disable pylint maybe-no-member because overloaded use of
  233. # the module name causes pylint to not detect that results
  234. # is an array or hash
  235. # pylint: disable=maybe-no-member
  236. metadata[line] = results.pop()
  237. else:
  238. metadata[line] = results
  239. return metadata
  240. def get_provider_metadata(metadata_url, supports_recursive=False,
  241. headers=None, expect_json=False):
  242. """ Retrieve the provider metadata
  243. Args:
  244. metadata_url (str): metadata url
  245. supports_recursive (bool): does the provider metadata api support
  246. recursion
  247. headers (dict): headers to set for metadata request
  248. expect_json (bool): does the metadata_url return json
  249. Returns:
  250. dict: the provider metadata
  251. """
  252. try:
  253. if supports_recursive:
  254. metadata = query_metadata(metadata_url, headers,
  255. expect_json)
  256. else:
  257. metadata = walk_metadata(metadata_url, headers,
  258. expect_json)
  259. except OpenShiftFactsMetadataUnavailableError:
  260. metadata = None
  261. return metadata
  262. def normalize_gce_facts(metadata, facts):
  263. """ Normalize gce facts
  264. Args:
  265. metadata (dict): provider metadata
  266. facts (dict): facts to update
  267. Returns:
  268. dict: the result of adding the normalized metadata to the provided
  269. facts dict
  270. """
  271. for interface in metadata['instance']['networkInterfaces']:
  272. int_info = dict(ips=[interface['ip']], network_type='gce')
  273. int_info['public_ips'] = [ac['externalIp'] for ac
  274. in interface['accessConfigs']]
  275. int_info['public_ips'].extend(interface['forwardedIps'])
  276. _, _, network_id = interface['network'].rpartition('/')
  277. int_info['network_id'] = network_id
  278. facts['network']['interfaces'].append(int_info)
  279. _, _, zone = metadata['instance']['zone'].rpartition('/')
  280. facts['zone'] = zone
  281. # GCE currently only supports a single interface
  282. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  283. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  284. facts['network']['public_ip'] = pub_ip
  285. facts['network']['hostname'] = metadata['instance']['hostname']
  286. # TODO: attempt to resolve public_hostname
  287. facts['network']['public_hostname'] = facts['network']['public_ip']
  288. return facts
  289. def normalize_aws_facts(metadata, facts):
  290. """ Normalize aws facts
  291. Args:
  292. metadata (dict): provider metadata
  293. facts (dict): facts to update
  294. Returns:
  295. dict: the result of adding the normalized metadata to the provided
  296. facts dict
  297. """
  298. for interface in sorted(
  299. metadata['network']['interfaces']['macs'].values(),
  300. key=lambda x: x['device-number']
  301. ):
  302. int_info = dict()
  303. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  304. for ips_var, int_var in iteritems(var_map):
  305. ips = interface.get(int_var)
  306. if isinstance(ips, string_types):
  307. int_info[ips_var] = [ips]
  308. else:
  309. int_info[ips_var] = ips
  310. if 'vpc-id' in interface:
  311. int_info['network_type'] = 'vpc'
  312. else:
  313. int_info['network_type'] = 'classic'
  314. if int_info['network_type'] == 'vpc':
  315. int_info['network_id'] = interface['subnet-id']
  316. else:
  317. int_info['network_id'] = None
  318. facts['network']['interfaces'].append(int_info)
  319. facts['zone'] = metadata['placement']['availability-zone']
  320. # TODO: actually attempt to determine default local and public ips
  321. # by using the ansible default ip fact and the ipv4-associations
  322. # from the ec2 metadata
  323. facts['network']['ip'] = metadata.get('local-ipv4')
  324. facts['network']['public_ip'] = metadata.get('public-ipv4')
  325. # TODO: verify that local hostname makes sense and is resolvable
  326. facts['network']['hostname'] = metadata.get('local-hostname')
  327. # TODO: verify that public hostname makes sense and is resolvable
  328. facts['network']['public_hostname'] = metadata.get('public-hostname')
  329. return facts
  330. def normalize_openstack_facts(metadata, facts):
  331. """ Normalize openstack facts
  332. Args:
  333. metadata (dict): provider metadata
  334. facts (dict): facts to update
  335. Returns:
  336. dict: the result of adding the normalized metadata to the provided
  337. facts dict
  338. """
  339. # openstack ec2 compat api does not support network interfaces and
  340. # the version tested on did not include the info in the openstack
  341. # metadata api, should be updated if neutron exposes this.
  342. facts['zone'] = metadata['availability_zone']
  343. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  344. facts['network']['ip'] = local_ipv4
  345. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  346. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  347. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  348. try:
  349. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  350. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  351. else:
  352. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  353. except socket.gaierror:
  354. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  355. return facts
  356. def normalize_provider_facts(provider, metadata):
  357. """ Normalize provider facts
  358. Args:
  359. provider (str): host provider
  360. metadata (dict): provider metadata
  361. Returns:
  362. dict: the normalized provider facts
  363. """
  364. if provider is None or metadata is None:
  365. return {}
  366. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  367. # and configure ipv6 facts if available
  368. # TODO: add support for setting user_data if available
  369. facts = dict(name=provider, metadata=metadata,
  370. network=dict(interfaces=[], ipv6_enabled=False))
  371. if provider == 'gce':
  372. facts = normalize_gce_facts(metadata, facts)
  373. elif provider == 'aws':
  374. facts = normalize_aws_facts(metadata, facts)
  375. elif provider == 'openstack':
  376. facts = normalize_openstack_facts(metadata, facts)
  377. return facts
  378. def set_flannel_facts_if_unset(facts):
  379. """ Set flannel facts if not already present in facts dict
  380. dict: the facts dict updated with the flannel facts if
  381. missing
  382. Args:
  383. facts (dict): existing facts
  384. Returns:
  385. dict: the facts dict updated with the flannel
  386. facts if they were not already present
  387. """
  388. if 'common' in facts:
  389. if 'use_flannel' not in facts['common']:
  390. use_flannel = False
  391. facts['common']['use_flannel'] = use_flannel
  392. return facts
  393. def set_nuage_facts_if_unset(facts):
  394. """ Set nuage facts if not already present in facts dict
  395. dict: the facts dict updated with the nuage facts if
  396. missing
  397. Args:
  398. facts (dict): existing facts
  399. Returns:
  400. dict: the facts dict updated with the nuage
  401. facts if they were not already present
  402. """
  403. if 'common' in facts:
  404. if 'use_nuage' not in facts['common']:
  405. use_nuage = False
  406. facts['common']['use_nuage'] = use_nuage
  407. return facts
  408. def set_node_schedulability(facts):
  409. """ Set schedulable facts if not already present in facts dict
  410. Args:
  411. facts (dict): existing facts
  412. Returns:
  413. dict: the facts dict updated with the generated schedulable
  414. facts if they were not already present
  415. """
  416. if 'node' in facts:
  417. if 'schedulable' not in facts['node']:
  418. if 'master' in facts:
  419. facts['node']['schedulable'] = False
  420. else:
  421. facts['node']['schedulable'] = True
  422. return facts
  423. def set_selectors(facts):
  424. """ Set selectors facts if not already present in facts dict
  425. Args:
  426. facts (dict): existing facts
  427. Returns:
  428. dict: the facts dict updated with the generated selectors
  429. facts if they were not already present
  430. """
  431. deployment_type = facts['common']['deployment_type']
  432. if deployment_type == 'online':
  433. selector = "type=infra"
  434. else:
  435. selector = "region=infra"
  436. if 'hosted' not in facts:
  437. facts['hosted'] = {}
  438. if 'router' not in facts['hosted']:
  439. facts['hosted']['router'] = {}
  440. if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
  441. facts['hosted']['router']['selector'] = selector
  442. if 'registry' not in facts['hosted']:
  443. facts['hosted']['registry'] = {}
  444. if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
  445. facts['hosted']['registry']['selector'] = selector
  446. if 'metrics' not in facts['hosted']:
  447. facts['hosted']['metrics'] = {}
  448. if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
  449. facts['hosted']['metrics']['selector'] = None
  450. if 'logging' not in facts['hosted']:
  451. facts['hosted']['logging'] = {}
  452. if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
  453. facts['hosted']['logging']['selector'] = None
  454. return facts
  455. def set_dnsmasq_facts_if_unset(facts):
  456. """ Set dnsmasq facts if not already present in facts
  457. Args:
  458. facts (dict) existing facts
  459. Returns:
  460. facts (dict) updated facts with values set if not previously set
  461. """
  462. if 'common' in facts:
  463. if 'use_dnsmasq' not in facts['common']:
  464. facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
  465. if 'master' in facts and 'dns_port' not in facts['master']:
  466. if safe_get_bool(facts['common']['use_dnsmasq']):
  467. facts['master']['dns_port'] = 8053
  468. else:
  469. facts['master']['dns_port'] = 53
  470. return facts
  471. def set_project_cfg_facts_if_unset(facts):
  472. """ Set Project Configuration facts if not already present in facts dict
  473. dict:
  474. Args:
  475. facts (dict): existing facts
  476. Returns:
  477. dict: the facts dict updated with the generated Project Configuration
  478. facts if they were not already present
  479. """
  480. config = {
  481. 'default_node_selector': '',
  482. 'project_request_message': '',
  483. 'project_request_template': '',
  484. 'mcs_allocator_range': 's0:/2',
  485. 'mcs_labels_per_project': 5,
  486. 'uid_allocator_range': '1000000000-1999999999/10000'
  487. }
  488. if 'master' in facts:
  489. for key, value in config.items():
  490. if key not in facts['master']:
  491. facts['master'][key] = value
  492. return facts
  493. def set_identity_providers_if_unset(facts):
  494. """ Set identity_providers fact if not already present in facts dict
  495. Args:
  496. facts (dict): existing facts
  497. Returns:
  498. dict: the facts dict updated with the generated identity providers
  499. facts if they were not already present
  500. """
  501. if 'master' in facts:
  502. deployment_type = facts['common']['deployment_type']
  503. if 'identity_providers' not in facts['master']:
  504. identity_provider = dict(
  505. name='allow_all', challenge=True, login=True,
  506. kind='AllowAllPasswordIdentityProvider'
  507. )
  508. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  509. identity_provider = dict(
  510. name='deny_all', challenge=True, login=True,
  511. kind='DenyAllPasswordIdentityProvider'
  512. )
  513. facts['master']['identity_providers'] = [identity_provider]
  514. return facts
  515. def set_url_facts_if_unset(facts):
  516. """ Set url facts if not already present in facts dict
  517. Args:
  518. facts (dict): existing facts
  519. Returns:
  520. dict: the facts dict updated with the generated url facts if they
  521. were not already present
  522. """
  523. if 'master' in facts:
  524. hostname = facts['common']['hostname']
  525. cluster_hostname = facts['master'].get('cluster_hostname')
  526. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  527. public_hostname = facts['common']['public_hostname']
  528. api_hostname = cluster_hostname if cluster_hostname else hostname
  529. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  530. console_path = facts['master']['console_path']
  531. etcd_hosts = facts['master']['etcd_hosts']
  532. use_ssl = dict(
  533. api=facts['master']['api_use_ssl'],
  534. public_api=facts['master']['api_use_ssl'],
  535. loopback_api=facts['master']['api_use_ssl'],
  536. console=facts['master']['console_use_ssl'],
  537. public_console=facts['master']['console_use_ssl'],
  538. etcd=facts['master']['etcd_use_ssl']
  539. )
  540. ports = dict(
  541. api=facts['master']['api_port'],
  542. public_api=facts['master']['api_port'],
  543. loopback_api=facts['master']['api_port'],
  544. console=facts['master']['console_port'],
  545. public_console=facts['master']['console_port'],
  546. etcd=facts['master']['etcd_port'],
  547. )
  548. etcd_urls = []
  549. if etcd_hosts != '':
  550. facts['master']['etcd_port'] = ports['etcd']
  551. facts['master']['embedded_etcd'] = False
  552. for host in etcd_hosts:
  553. etcd_urls.append(format_url(use_ssl['etcd'], host,
  554. ports['etcd']))
  555. else:
  556. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  557. ports['etcd'])]
  558. facts['master'].setdefault('etcd_urls', etcd_urls)
  559. prefix_hosts = [('api', api_hostname),
  560. ('public_api', api_public_hostname),
  561. ('loopback_api', hostname)]
  562. for prefix, host in prefix_hosts:
  563. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  564. host,
  565. ports[prefix]))
  566. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  567. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  568. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  569. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  570. facts['master'].setdefault('loopback_user', r_lhu)
  571. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  572. for prefix, host in prefix_hosts:
  573. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  574. host,
  575. ports[prefix],
  576. console_path))
  577. return facts
  578. def set_aggregate_facts(facts):
  579. """ Set aggregate facts
  580. Args:
  581. facts (dict): existing facts
  582. Returns:
  583. dict: the facts dict updated with aggregated facts
  584. """
  585. all_hostnames = set()
  586. internal_hostnames = set()
  587. kube_svc_ip = first_ip(facts['common']['portal_net'])
  588. if 'common' in facts:
  589. all_hostnames.add(facts['common']['hostname'])
  590. all_hostnames.add(facts['common']['public_hostname'])
  591. all_hostnames.add(facts['common']['ip'])
  592. all_hostnames.add(facts['common']['public_ip'])
  593. facts['common']['kube_svc_ip'] = kube_svc_ip
  594. internal_hostnames.add(facts['common']['hostname'])
  595. internal_hostnames.add(facts['common']['ip'])
  596. cluster_domain = facts['common']['dns_domain']
  597. if 'master' in facts:
  598. if 'cluster_hostname' in facts['master']:
  599. all_hostnames.add(facts['master']['cluster_hostname'])
  600. if 'cluster_public_hostname' in facts['master']:
  601. all_hostnames.add(facts['master']['cluster_public_hostname'])
  602. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  603. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  604. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  605. all_hostnames.update(svc_names)
  606. internal_hostnames.update(svc_names)
  607. all_hostnames.add(kube_svc_ip)
  608. internal_hostnames.add(kube_svc_ip)
  609. facts['common']['all_hostnames'] = list(all_hostnames)
  610. facts['common']['internal_hostnames'] = list(internal_hostnames)
  611. return facts
  612. def set_etcd_facts_if_unset(facts):
  613. """
  614. If using embedded etcd, loads the data directory from master-config.yaml.
  615. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
  616. If anything goes wrong parsing these, the fact will not be set.
  617. """
  618. if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
  619. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  620. if 'etcd_data_dir' not in etcd_facts:
  621. try:
  622. # Parse master config to find actual etcd data dir:
  623. master_cfg_path = os.path.join(facts['common']['config_base'],
  624. 'master/master-config.yaml')
  625. master_cfg_f = open(master_cfg_path, 'r')
  626. config = yaml.safe_load(master_cfg_f.read())
  627. master_cfg_f.close()
  628. etcd_facts['etcd_data_dir'] = \
  629. config['etcdConfig']['storageDirectory']
  630. facts['etcd'] = etcd_facts
  631. # We don't want exceptions bubbling up here:
  632. # pylint: disable=broad-except
  633. except Exception:
  634. pass
  635. else:
  636. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  637. # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
  638. try:
  639. # Add a fake section for parsing:
  640. ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
  641. ini_fp = io.StringIO(ini_str)
  642. config = configparser.RawConfigParser()
  643. config.readfp(ini_fp)
  644. etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
  645. if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
  646. etcd_data_dir = etcd_data_dir[1:-1]
  647. etcd_facts['etcd_data_dir'] = etcd_data_dir
  648. facts['etcd'] = etcd_facts
  649. # We don't want exceptions bubbling up here:
  650. # pylint: disable=broad-except
  651. except Exception:
  652. pass
  653. return facts
  654. def set_deployment_facts_if_unset(facts):
  655. """ Set Facts that vary based on deployment_type. This currently
  656. includes common.service_type, common.config_base, master.registry_url,
  657. node.registry_url, node.storage_plugin_deps
  658. Args:
  659. facts (dict): existing facts
  660. Returns:
  661. dict: the facts dict updated with the generated deployment_type
  662. facts
  663. """
  664. # disabled to avoid breaking up facts related to deployment type into
  665. # multiple methods for now.
  666. # pylint: disable=too-many-statements, too-many-branches
  667. if 'common' in facts:
  668. deployment_type = facts['common']['deployment_type']
  669. if 'service_type' not in facts['common']:
  670. service_type = 'atomic-openshift'
  671. if deployment_type == 'origin':
  672. service_type = 'origin'
  673. elif deployment_type in ['enterprise']:
  674. service_type = 'openshift'
  675. facts['common']['service_type'] = service_type
  676. if 'config_base' not in facts['common']:
  677. config_base = '/etc/origin'
  678. if deployment_type in ['enterprise']:
  679. config_base = '/etc/openshift'
  680. # Handle upgrade scenarios when symlinks don't yet exist:
  681. if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
  682. config_base = '/etc/openshift'
  683. facts['common']['config_base'] = config_base
  684. if 'data_dir' not in facts['common']:
  685. data_dir = '/var/lib/origin'
  686. if deployment_type in ['enterprise']:
  687. data_dir = '/var/lib/openshift'
  688. # Handle upgrade scenarios when symlinks don't yet exist:
  689. if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
  690. data_dir = '/var/lib/openshift'
  691. facts['common']['data_dir'] = data_dir
  692. if 'docker' in facts:
  693. deployment_type = facts['common']['deployment_type']
  694. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  695. addtl_regs = facts['docker'].get('additional_registries', [])
  696. ent_reg = 'registry.access.redhat.com'
  697. if ent_reg not in addtl_regs:
  698. facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
  699. for role in ('master', 'node'):
  700. if role in facts:
  701. deployment_type = facts['common']['deployment_type']
  702. if 'registry_url' not in facts[role]:
  703. registry_url = 'openshift/origin-${component}:${version}'
  704. if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
  705. registry_url = 'openshift3/ose-${component}:${version}'
  706. elif deployment_type == 'atomic-enterprise':
  707. registry_url = 'aep3_beta/aep-${component}:${version}'
  708. facts[role]['registry_url'] = registry_url
  709. if 'master' in facts:
  710. deployment_type = facts['common']['deployment_type']
  711. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  712. if 'disabled_features' in facts['master']:
  713. if deployment_type == 'atomic-enterprise':
  714. curr_disabled_features = set(facts['master']['disabled_features'])
  715. facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
  716. else:
  717. if facts['common']['deployment_subtype'] == 'registry':
  718. facts['master']['disabled_features'] = openshift_features
  719. if 'node' in facts:
  720. deployment_type = facts['common']['deployment_type']
  721. if 'storage_plugin_deps' not in facts['node']:
  722. if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
  723. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  724. else:
  725. facts['node']['storage_plugin_deps'] = []
  726. return facts
  727. def set_evacuate_or_drain_option(facts):
  728. """OCP before 1.5/3.5 used '--evacuate'. As of 1.5/3.5 OCP uses
  729. '--drain'. Let's make that a fact for easy reference later.
  730. """
  731. if facts['common']['version_gte_3_5_or_1_5']:
  732. # New-style
  733. facts['common']['evacuate_or_drain'] = '--drain'
  734. else:
  735. # Old-style
  736. facts['common']['evacuate_or_drain'] = '--evacuate'
  737. return facts
  738. def set_version_facts_if_unset(facts):
  739. """ Set version facts. This currently includes common.version and
  740. common.version_gte_3_1_or_1_1.
  741. Args:
  742. facts (dict): existing facts
  743. Returns:
  744. dict: the facts dict updated with version facts.
  745. """
  746. if 'common' in facts:
  747. deployment_type = facts['common']['deployment_type']
  748. openshift_version = get_openshift_version(facts)
  749. if openshift_version:
  750. version = LooseVersion(openshift_version)
  751. facts['common']['version'] = openshift_version
  752. facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
  753. if deployment_type == 'origin':
  754. version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
  755. version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
  756. version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
  757. version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
  758. version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
  759. version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
  760. version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
  761. else:
  762. version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
  763. version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
  764. version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
  765. version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
  766. version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
  767. version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
  768. version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
  769. else:
  770. version_gte_3_1_or_1_1 = True
  771. version_gte_3_1_1_or_1_1_1 = True
  772. version_gte_3_2_or_1_2 = True
  773. version_gte_3_3_or_1_3 = True
  774. version_gte_3_4_or_1_4 = False
  775. version_gte_3_5_or_1_5 = False
  776. version_gte_3_6_or_1_6 = False
  777. facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
  778. facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
  779. facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
  780. facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
  781. facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
  782. facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
  783. facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
  784. if version_gte_3_4_or_1_4:
  785. examples_content_version = 'v1.4'
  786. elif version_gte_3_3_or_1_3:
  787. examples_content_version = 'v1.3'
  788. elif version_gte_3_2_or_1_2:
  789. examples_content_version = 'v1.2'
  790. elif version_gte_3_1_or_1_1:
  791. examples_content_version = 'v1.1'
  792. else:
  793. examples_content_version = 'v1.0'
  794. facts['common']['examples_content_version'] = examples_content_version
  795. return facts
  796. def set_manageiq_facts_if_unset(facts):
  797. """ Set manageiq facts. This currently includes common.use_manageiq.
  798. Args:
  799. facts (dict): existing facts
  800. Returns:
  801. dict: the facts dict updated with version facts.
  802. Raises:
  803. OpenShiftFactsInternalError:
  804. """
  805. if 'common' not in facts:
  806. if 'version_gte_3_1_or_1_1' not in facts['common']:
  807. raise OpenShiftFactsInternalError(
  808. "Invalid invocation: The required facts are not set"
  809. )
  810. if 'use_manageiq' not in facts['common']:
  811. facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
  812. return facts
  813. def set_sdn_facts_if_unset(facts, system_facts):
  814. """ Set sdn facts if not already present in facts dict
  815. Args:
  816. facts (dict): existing facts
  817. system_facts (dict): ansible_facts
  818. Returns:
  819. dict: the facts dict updated with the generated sdn facts if they
  820. were not already present
  821. """
  822. # pylint: disable=too-many-branches
  823. if 'common' in facts:
  824. use_sdn = facts['common']['use_openshift_sdn']
  825. if not (use_sdn == '' or isinstance(use_sdn, bool)):
  826. use_sdn = safe_get_bool(use_sdn)
  827. facts['common']['use_openshift_sdn'] = use_sdn
  828. if 'sdn_network_plugin_name' not in facts['common']:
  829. plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
  830. facts['common']['sdn_network_plugin_name'] = plugin
  831. if 'master' in facts:
  832. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  833. # these might be overridden if they exist in the master config file
  834. sdn_cluster_network_cidr = '10.128.0.0/14'
  835. sdn_host_subnet_length = '9'
  836. master_cfg_path = os.path.join(facts['common']['config_base'],
  837. 'master/master-config.yaml')
  838. if os.path.isfile(master_cfg_path):
  839. with open(master_cfg_path, 'r') as master_cfg_f:
  840. config = yaml.safe_load(master_cfg_f.read())
  841. if 'networkConfig' in config:
  842. if 'clusterNetworkCIDR' in config['networkConfig']:
  843. sdn_cluster_network_cidr = \
  844. config['networkConfig']['clusterNetworkCIDR']
  845. if 'hostSubnetLength' in config['networkConfig']:
  846. sdn_host_subnet_length = \
  847. config['networkConfig']['hostSubnetLength']
  848. if 'sdn_cluster_network_cidr' not in facts['master']:
  849. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  850. if 'sdn_host_subnet_length' not in facts['master']:
  851. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  852. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  853. node_ip = facts['common']['ip']
  854. # default MTU if interface MTU cannot be detected
  855. facts['node']['sdn_mtu'] = '1450'
  856. for val in itervalues(system_facts):
  857. if isinstance(val, dict) and 'mtu' in val:
  858. mtu = val['mtu']
  859. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  860. facts['node']['sdn_mtu'] = str(mtu - 50)
  861. return facts
  862. def set_nodename(facts):
  863. """ set nodename """
  864. if 'node' in facts and 'common' in facts:
  865. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  866. facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  867. elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
  868. facts['node']['nodename'] = '.'.split(facts['provider']['metadata']['hostname'])[0]
  869. else:
  870. facts['node']['nodename'] = facts['common']['hostname'].lower()
  871. return facts
  872. def migrate_oauth_template_facts(facts):
  873. """
  874. Migrate an old oauth template fact to a newer format if it's present.
  875. The legacy 'oauth_template' fact was just a filename, and assumed you were
  876. setting the 'login' template.
  877. The new pluralized 'oauth_templates' fact is a dict mapping the template
  878. name to a filename.
  879. Simplify the code after this by merging the old fact into the new.
  880. """
  881. if 'master' in facts and 'oauth_template' in facts['master']:
  882. if 'oauth_templates' not in facts['master']:
  883. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  884. elif 'login' not in facts['master']['oauth_templates']:
  885. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  886. return facts
  887. def format_url(use_ssl, hostname, port, path=''):
  888. """ Format url based on ssl flag, hostname, port and path
  889. Args:
  890. use_ssl (bool): is ssl enabled
  891. hostname (str): hostname
  892. port (str): port
  893. path (str): url path
  894. Returns:
  895. str: The generated url string
  896. """
  897. scheme = 'https' if use_ssl else 'http'
  898. netloc = hostname
  899. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  900. netloc += ":%s" % port
  901. try:
  902. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  903. except AttributeError:
  904. # pylint: disable=undefined-variable
  905. url = urlunparse((scheme, netloc, path, '', '', ''))
  906. return url
  907. def get_current_config(facts):
  908. """ Get current openshift config
  909. Args:
  910. facts (dict): existing facts
  911. Returns:
  912. dict: the facts dict updated with the current openshift config
  913. """
  914. current_config = dict()
  915. roles = [role for role in facts if role not in ['common', 'provider']]
  916. for role in roles:
  917. if 'roles' in current_config:
  918. current_config['roles'].append(role)
  919. else:
  920. current_config['roles'] = [role]
  921. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  922. # determine the location of files.
  923. # TODO: I suspect this isn't working right now, but it doesn't prevent
  924. # anything from working properly as far as I can tell, perhaps because
  925. # we override the kubeconfig path everywhere we use it?
  926. # Query kubeconfig settings
  927. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  928. if role == 'node':
  929. kubeconfig_dir = os.path.join(
  930. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  931. )
  932. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  933. if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
  934. try:
  935. _, output, _ = module.run_command( # noqa: F405
  936. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  937. "json", "--kubeconfig=%s" % kubeconfig_path],
  938. check_rc=False
  939. )
  940. config = json.loads(output)
  941. cad = 'certificate-authority-data'
  942. try:
  943. for cluster in config['clusters']:
  944. config['clusters'][cluster][cad] = 'masked'
  945. except KeyError:
  946. pass
  947. try:
  948. for user in config['users']:
  949. config['users'][user][cad] = 'masked'
  950. config['users'][user]['client-key-data'] = 'masked'
  951. except KeyError:
  952. pass
  953. current_config['kubeconfig'] = config
  954. # override pylint broad-except warning, since we do not want
  955. # to bubble up any exceptions if oc config view
  956. # fails
  957. # pylint: disable=broad-except
  958. except Exception:
  959. pass
  960. return current_config
  961. def build_kubelet_args(facts):
  962. """Build node kubelet_args
  963. In the node-config.yaml file, kubeletArgument sub-keys have their
  964. values provided as a list. Hence the gratuitous use of ['foo'] below.
  965. """
  966. cloud_cfg_path = os.path.join(
  967. facts['common']['config_base'],
  968. 'cloudprovider')
  969. # We only have to do this stuff on hosts that are nodes
  970. if 'node' in facts:
  971. # Any changes to the kubeletArguments parameter are stored
  972. # here first.
  973. kubelet_args = {}
  974. if 'cloudprovider' in facts:
  975. # EVERY cloud is special <3
  976. if 'kind' in facts['cloudprovider']:
  977. if facts['cloudprovider']['kind'] == 'aws':
  978. kubelet_args['cloud-provider'] = ['aws']
  979. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  980. if facts['cloudprovider']['kind'] == 'openstack':
  981. kubelet_args['cloud-provider'] = ['openstack']
  982. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  983. if facts['cloudprovider']['kind'] == 'gce':
  984. kubelet_args['cloud-provider'] = ['gce']
  985. kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  986. # Automatically add node-labels to the kubeletArguments
  987. # parameter. See BZ1359848 for additional details.
  988. #
  989. # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
  990. if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
  991. # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
  992. # into ['foo=bar', 'a=b']
  993. #
  994. # On the openshift_node_labels inventory variable we loop
  995. # over each key-value tuple (from .items()) and join the
  996. # key to the value with an '=' character, this produces a
  997. # list.
  998. #
  999. # map() seems to be returning an itertools.imap object
  1000. # instead of a list. We cast it to a list ourselves.
  1001. # pylint: disable=unnecessary-lambda
  1002. labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
  1003. if labels_str != '':
  1004. kubelet_args['node-labels'] = labels_str
  1005. # If we've added items to the kubelet_args dict then we need
  1006. # to merge the new items back into the main facts object.
  1007. if kubelet_args != {}:
  1008. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
  1009. return facts
  1010. def build_controller_args(facts):
  1011. """ Build master controller_args """
  1012. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  1013. 'cloudprovider')
  1014. if 'master' in facts:
  1015. controller_args = {}
  1016. if 'cloudprovider' in facts:
  1017. if 'kind' in facts['cloudprovider']:
  1018. if facts['cloudprovider']['kind'] == 'aws':
  1019. controller_args['cloud-provider'] = ['aws']
  1020. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  1021. if facts['cloudprovider']['kind'] == 'openstack':
  1022. controller_args['cloud-provider'] = ['openstack']
  1023. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  1024. if facts['cloudprovider']['kind'] == 'gce':
  1025. controller_args['cloud-provider'] = ['gce']
  1026. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1027. if controller_args != {}:
  1028. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
  1029. return facts
  1030. def build_api_server_args(facts):
  1031. """ Build master api_server_args """
  1032. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  1033. 'cloudprovider')
  1034. if 'master' in facts:
  1035. api_server_args = {}
  1036. if 'cloudprovider' in facts:
  1037. if 'kind' in facts['cloudprovider']:
  1038. if facts['cloudprovider']['kind'] == 'aws':
  1039. api_server_args['cloud-provider'] = ['aws']
  1040. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  1041. if facts['cloudprovider']['kind'] == 'openstack':
  1042. api_server_args['cloud-provider'] = ['openstack']
  1043. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  1044. if facts['cloudprovider']['kind'] == 'gce':
  1045. api_server_args['cloud-provider'] = ['gce']
  1046. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1047. if api_server_args != {}:
  1048. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
  1049. return facts
  1050. def is_service_running(service):
  1051. """ Queries systemd through dbus to see if the service is running """
  1052. service_running = False
  1053. try:
  1054. bus = SystemBus()
  1055. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  1056. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  1057. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  1058. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  1059. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  1060. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  1061. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  1062. if service_load_state == 'loaded' and service_active_state == 'active':
  1063. service_running = True
  1064. except DBusException:
  1065. # TODO: do not swallow exception, as it may be hiding useful debugging
  1066. # information.
  1067. pass
  1068. return service_running
  1069. def rpm_rebuilddb():
  1070. """
  1071. Runs rpm --rebuilddb to ensure the db is in good shape.
  1072. """
  1073. module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
  1074. def get_version_output(binary, version_cmd):
  1075. """ runs and returns the version output for a command """
  1076. cmd = []
  1077. for item in (binary, version_cmd):
  1078. if isinstance(item, list):
  1079. cmd.extend(item)
  1080. else:
  1081. cmd.append(item)
  1082. if os.path.isfile(cmd[0]):
  1083. _, output, _ = module.run_command(cmd) # noqa: F405
  1084. return output
  1085. def get_docker_version_info():
  1086. """ Parses and returns the docker version info """
  1087. result = None
  1088. if is_service_running('docker'):
  1089. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  1090. if 'Server' in version_info:
  1091. result = {
  1092. 'api_version': version_info['Server']['API version'],
  1093. 'version': version_info['Server']['Version']
  1094. }
  1095. return result
  1096. def get_hosted_registry_insecure():
  1097. """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
  1098. registry is currently insecure.
  1099. """
  1100. hosted_registry_insecure = None
  1101. if os.path.exists('/etc/sysconfig/docker'):
  1102. try:
  1103. ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
  1104. ini_fp = io.StringIO(ini_str)
  1105. config = configparser.RawConfigParser()
  1106. config.readfp(ini_fp)
  1107. options = config.get('root', 'OPTIONS')
  1108. if 'insecure-registry' in options:
  1109. hosted_registry_insecure = True
  1110. except Exception: # pylint: disable=broad-except
  1111. pass
  1112. return hosted_registry_insecure
  1113. def get_openshift_version(facts):
  1114. """ Get current version of openshift on the host.
  1115. Checks a variety of ways ranging from fastest to slowest.
  1116. Args:
  1117. facts (dict): existing facts
  1118. optional cli_image for pulling the version number
  1119. Returns:
  1120. version: the current openshift version
  1121. """
  1122. version = None
  1123. # No need to run this method repeatedly on a system if we already know the
  1124. # version
  1125. # TODO: We need a way to force reload this after upgrading bits.
  1126. if 'common' in facts:
  1127. if 'version' in facts['common'] and facts['common']['version'] is not None:
  1128. return chomp_commit_offset(facts['common']['version'])
  1129. if os.path.isfile('/usr/bin/openshift'):
  1130. _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
  1131. version = parse_openshift_version(output)
  1132. elif 'common' in facts and 'is_containerized' in facts['common']:
  1133. version = get_container_openshift_version(facts)
  1134. # Handle containerized masters that have not yet been configured as a node.
  1135. # This can be very slow and may get re-run multiple times, so we only use this
  1136. # if other methods failed to find a version.
  1137. if not version and os.path.isfile('/usr/local/bin/openshift'):
  1138. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
  1139. version = parse_openshift_version(output)
  1140. return chomp_commit_offset(version)
  1141. def chomp_commit_offset(version):
  1142. """Chomp any "+git.foo" commit offset string from the given `version`
  1143. and return the modified version string.
  1144. Ex:
  1145. - chomp_commit_offset(None) => None
  1146. - chomp_commit_offset(1337) => "1337"
  1147. - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
  1148. - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
  1149. - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
  1150. """
  1151. if version is None:
  1152. return version
  1153. else:
  1154. # Stringify, just in case it's a Number type. Split by '+' and
  1155. # return the first split. No concerns about strings without a
  1156. # '+', .split() returns an array of the original string.
  1157. return str(version).split('+')[0]
  1158. def get_container_openshift_version(facts):
  1159. """
  1160. If containerized, see if we can determine the installed version via the
  1161. systemd environment files.
  1162. """
  1163. for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
  1164. env_path = filename % facts['common']['service_type']
  1165. if not os.path.exists(env_path):
  1166. continue
  1167. with open(env_path) as env_file:
  1168. for line in env_file:
  1169. if line.startswith("IMAGE_VERSION="):
  1170. tag = line[len("IMAGE_VERSION="):].strip()
  1171. # Remove leading "v" and any trailing release info, we just want
  1172. # a version number here:
  1173. version = tag[1:].split("-")[0]
  1174. return version
  1175. return None
  1176. def parse_openshift_version(output):
  1177. """ Apply provider facts to supplied facts dict
  1178. Args:
  1179. string: output of 'openshift version'
  1180. Returns:
  1181. string: the version number
  1182. """
  1183. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  1184. ver = versions.get('openshift', '')
  1185. # Remove trailing build number and commit hash from older versions, we need to return a straight
  1186. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  1187. ver = ver.split('-')[0]
  1188. return ver
  1189. def apply_provider_facts(facts, provider_facts):
  1190. """ Apply provider facts to supplied facts dict
  1191. Args:
  1192. facts (dict): facts dict to update
  1193. provider_facts (dict): provider facts to apply
  1194. roles: host roles
  1195. Returns:
  1196. dict: the merged facts
  1197. """
  1198. if not provider_facts:
  1199. return facts
  1200. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  1201. for h_var, ip_var in common_vars:
  1202. ip_value = provider_facts['network'].get(ip_var)
  1203. if ip_value:
  1204. facts['common'][ip_var] = ip_value
  1205. facts['common'][h_var] = choose_hostname(
  1206. [provider_facts['network'].get(h_var)],
  1207. facts['common'][h_var]
  1208. )
  1209. facts['provider'] = provider_facts
  1210. return facts
  1211. # Disabling pylint too many branches. This function needs refactored
  1212. # but is a very core part of openshift_facts.
  1213. # pylint: disable=too-many-branches, too-many-nested-blocks
  1214. def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
  1215. """ Recursively merge facts dicts
  1216. Args:
  1217. orig (dict): existing facts
  1218. new (dict): facts to update
  1219. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1220. '.' notation ex: ['master.named_certificates']
  1221. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1222. '.' notation ex: ['master.master_count']
  1223. Returns:
  1224. dict: the merged facts
  1225. """
  1226. additive_facts = ['named_certificates']
  1227. protected_facts = ['ha', 'master_count']
  1228. # Facts we do not ever want to merge. These originate in inventory variables
  1229. # and contain JSON dicts. We don't ever want to trigger a merge
  1230. # here, just completely overwrite with the new if they are present there.
  1231. inventory_json_facts = ['admission_plugin_config',
  1232. 'kube_admission_plugin_config',
  1233. 'image_policy_config',
  1234. "builddefaults",
  1235. "buildoverrides"]
  1236. facts = dict()
  1237. for key, value in iteritems(orig):
  1238. # Key exists in both old and new facts.
  1239. if key in new:
  1240. if key in inventory_json_facts:
  1241. # Watchout for JSON facts that sometimes load as strings.
  1242. # (can happen if the JSON contains a boolean)
  1243. if isinstance(new[key], string_types):
  1244. facts[key] = yaml.safe_load(new[key])
  1245. else:
  1246. facts[key] = copy.deepcopy(new[key])
  1247. # Continue to recurse if old and new fact is a dictionary.
  1248. elif isinstance(value, dict) and isinstance(new[key], dict):
  1249. # Collect the subset of additive facts to overwrite if
  1250. # key matches. These will be passed to the subsequent
  1251. # merge_facts call.
  1252. relevant_additive_facts = []
  1253. for item in additive_facts_to_overwrite:
  1254. if '.' in item and item.startswith(key + '.'):
  1255. relevant_additive_facts.append(item)
  1256. # Collect the subset of protected facts to overwrite
  1257. # if key matches. These will be passed to the
  1258. # subsequent merge_facts call.
  1259. relevant_protected_facts = []
  1260. for item in protected_facts_to_overwrite:
  1261. if '.' in item and item.startswith(key + '.'):
  1262. relevant_protected_facts.append(item)
  1263. facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
  1264. # Key matches an additive fact and we are not overwriting
  1265. # it so we will append the new value to the existing value.
  1266. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  1267. if isinstance(value, list) and isinstance(new[key], list):
  1268. new_fact = []
  1269. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  1270. if item not in new_fact:
  1271. new_fact.append(item)
  1272. facts[key] = new_fact
  1273. # Key matches a protected fact and we are not overwriting
  1274. # it so we will determine if it is okay to change this
  1275. # fact.
  1276. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
  1277. # The master count (int) can only increase unless it
  1278. # has been passed as a protected fact to overwrite.
  1279. if key == 'master_count' and new[key] is not None and new[key] is not '':
  1280. if int(value) <= int(new[key]):
  1281. facts[key] = copy.deepcopy(new[key])
  1282. else:
  1283. # pylint: disable=line-too-long
  1284. module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count') # noqa: F405
  1285. # ha (bool) can not change unless it has been passed
  1286. # as a protected fact to overwrite.
  1287. if key == 'ha':
  1288. if safe_get_bool(value) != safe_get_bool(new[key]):
  1289. # pylint: disable=line-too-long
  1290. module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
  1291. else:
  1292. facts[key] = value
  1293. # No other condition has been met. Overwrite the old fact
  1294. # with the new value.
  1295. else:
  1296. facts[key] = copy.deepcopy(new[key])
  1297. # Key isn't in new so add it to facts to keep it.
  1298. else:
  1299. facts[key] = copy.deepcopy(value)
  1300. new_keys = set(new.keys()) - set(orig.keys())
  1301. for key in new_keys:
  1302. # Watchout for JSON facts that sometimes load as strings.
  1303. # (can happen if the JSON contains a boolean)
  1304. if key in inventory_json_facts and isinstance(new[key], string_types):
  1305. facts[key] = yaml.safe_load(new[key])
  1306. else:
  1307. facts[key] = copy.deepcopy(new[key])
  1308. return facts
  1309. def save_local_facts(filename, facts):
  1310. """ Save local facts
  1311. Args:
  1312. filename (str): local facts file
  1313. facts (dict): facts to set
  1314. """
  1315. try:
  1316. fact_dir = os.path.dirname(filename)
  1317. try:
  1318. os.makedirs(fact_dir) # try to make the directory
  1319. except OSError as exception:
  1320. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  1321. raise # pass any other exceptions up the chain
  1322. with open(filename, 'w') as fact_file:
  1323. fact_file.write(module.jsonify(facts)) # noqa: F405
  1324. os.chmod(filename, 0o600)
  1325. except (IOError, OSError) as ex:
  1326. raise OpenShiftFactsFileWriteError(
  1327. "Could not create fact file: %s, error: %s" % (filename, ex)
  1328. )
  1329. def get_local_facts_from_file(filename):
  1330. """ Retrieve local facts from fact file
  1331. Args:
  1332. filename (str): local facts file
  1333. Returns:
  1334. dict: the retrieved facts
  1335. """
  1336. local_facts = dict()
  1337. try:
  1338. # Handle conversion of INI style facts file to json style
  1339. ini_facts = configparser.SafeConfigParser()
  1340. ini_facts.read(filename)
  1341. for section in ini_facts.sections():
  1342. local_facts[section] = dict()
  1343. for key, value in ini_facts.items(section):
  1344. local_facts[section][key] = value
  1345. except (configparser.MissingSectionHeaderError,
  1346. configparser.ParsingError):
  1347. try:
  1348. with open(filename, 'r') as facts_file:
  1349. local_facts = json.load(facts_file)
  1350. except (ValueError, IOError):
  1351. pass
  1352. return local_facts
  1353. def sort_unique(alist):
  1354. """ Sorts and de-dupes a list
  1355. Args:
  1356. list: a list
  1357. Returns:
  1358. list: a sorted de-duped list
  1359. """
  1360. alist.sort()
  1361. out = list()
  1362. for i in alist:
  1363. if i not in out:
  1364. out.append(i)
  1365. return out
  1366. def safe_get_bool(fact):
  1367. """ Get a boolean fact safely.
  1368. Args:
  1369. facts: fact to convert
  1370. Returns:
  1371. bool: given fact as a bool
  1372. """
  1373. return bool(strtobool(str(fact)))
  1374. def set_proxy_facts(facts):
  1375. """ Set global proxy facts
  1376. Args:
  1377. facts(dict): existing facts
  1378. Returns:
  1379. facts(dict): Updated facts with missing values
  1380. """
  1381. if 'common' in facts:
  1382. common = facts['common']
  1383. if 'http_proxy' in common or 'https_proxy' in common:
  1384. if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
  1385. common['no_proxy'] = common['no_proxy'].split(",")
  1386. elif 'no_proxy' not in common:
  1387. common['no_proxy'] = []
  1388. if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
  1389. if 'no_proxy_internal_hostnames' in common:
  1390. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  1391. # We always add local dns domain and ourselves no matter what
  1392. common['no_proxy'].append('.' + common['dns_domain'])
  1393. common['no_proxy'].append(common['hostname'])
  1394. common['no_proxy'] = sort_unique(common['no_proxy'])
  1395. facts['common'] = common
  1396. return facts
  1397. def set_builddefaults_facts(facts):
  1398. """ Set build defaults including setting proxy values from http_proxy, https_proxy,
  1399. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1400. 1. http_proxy, https_proxy, no_proxy
  1401. 2. builddefaults_*
  1402. 3. builddefaults_git_*
  1403. Args:
  1404. facts(dict): existing facts
  1405. Returns:
  1406. facts(dict): Updated facts with missing values
  1407. """
  1408. if 'builddefaults' in facts:
  1409. builddefaults = facts['builddefaults']
  1410. common = facts['common']
  1411. # Copy values from common to builddefaults
  1412. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1413. builddefaults['http_proxy'] = common['http_proxy']
  1414. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1415. builddefaults['https_proxy'] = common['https_proxy']
  1416. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1417. builddefaults['no_proxy'] = common['no_proxy']
  1418. # Create git specific facts from generic values, if git specific values are
  1419. # not defined.
  1420. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1421. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1422. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1423. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1424. if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
  1425. builddefaults['git_no_proxy'] = builddefaults['no_proxy']
  1426. # If we're actually defining a builddefaults config then create admission_plugin_config
  1427. # then merge builddefaults[config] structure into admission_plugin_config
  1428. if 'config' in builddefaults:
  1429. if 'admission_plugin_config' not in facts['master']:
  1430. facts['master']['admission_plugin_config'] = dict()
  1431. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  1432. # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
  1433. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
  1434. return facts
  1435. def delete_empty_keys(keylist):
  1436. """ Delete dictionary elements from keylist where "value" is empty.
  1437. Args:
  1438. keylist(list): A list of builddefault configuration envs.
  1439. Returns:
  1440. none
  1441. Example:
  1442. keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1443. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1444. {'name': 'NO_PROXY', 'value': ''}]
  1445. After calling delete_empty_keys the provided list is modified to become:
  1446. [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1447. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
  1448. """
  1449. count = 0
  1450. for i in range(0, len(keylist)):
  1451. if len(keylist[i - count]['value']) == 0:
  1452. del keylist[i - count]
  1453. count += 1
  1454. def set_buildoverrides_facts(facts):
  1455. """ Set build overrides
  1456. Args:
  1457. facts(dict): existing facts
  1458. Returns:
  1459. facts(dict): Updated facts with missing values
  1460. """
  1461. if 'buildoverrides' in facts:
  1462. buildoverrides = facts['buildoverrides']
  1463. # If we're actually defining a buildoverrides config then create admission_plugin_config
  1464. # then merge buildoverrides[config] structure into admission_plugin_config
  1465. if 'config' in buildoverrides:
  1466. if 'admission_plugin_config' not in facts['master']:
  1467. facts['master']['admission_plugin_config'] = dict()
  1468. facts['master']['admission_plugin_config'].update(buildoverrides['config'])
  1469. return facts
  1470. # pylint: disable=too-many-statements
  1471. def set_container_facts_if_unset(facts):
  1472. """ Set containerized facts.
  1473. Args:
  1474. facts (dict): existing facts
  1475. Returns:
  1476. dict: the facts dict updated with the generated containerization
  1477. facts
  1478. """
  1479. deployment_type = facts['common']['deployment_type']
  1480. if deployment_type in ['enterprise', 'openshift-enterprise']:
  1481. master_image = 'openshift3/ose'
  1482. cli_image = master_image
  1483. node_image = 'openshift3/node'
  1484. ovs_image = 'openshift3/openvswitch'
  1485. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1486. pod_image = 'openshift3/ose-pod'
  1487. router_image = 'openshift3/ose-haproxy-router'
  1488. registry_image = 'openshift3/ose-docker-registry'
  1489. deployer_image = 'openshift3/ose-deployer'
  1490. elif deployment_type == 'atomic-enterprise':
  1491. master_image = 'aep3_beta/aep'
  1492. cli_image = master_image
  1493. node_image = 'aep3_beta/node'
  1494. ovs_image = 'aep3_beta/openvswitch'
  1495. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1496. pod_image = 'aep3_beta/aep-pod'
  1497. router_image = 'aep3_beta/aep-haproxy-router'
  1498. registry_image = 'aep3_beta/aep-docker-registry'
  1499. deployer_image = 'aep3_beta/aep-deployer'
  1500. else:
  1501. master_image = 'openshift/origin'
  1502. cli_image = master_image
  1503. node_image = 'openshift/node'
  1504. ovs_image = 'openshift/openvswitch'
  1505. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1506. pod_image = 'openshift/origin-pod'
  1507. router_image = 'openshift/origin-haproxy-router'
  1508. registry_image = 'openshift/origin-docker-registry'
  1509. deployer_image = 'openshift/origin-deployer'
  1510. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1511. if 'is_containerized' not in facts['common']:
  1512. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1513. if 'cli_image' not in facts['common']:
  1514. facts['common']['cli_image'] = cli_image
  1515. if 'pod_image' not in facts['common']:
  1516. facts['common']['pod_image'] = pod_image
  1517. if 'router_image' not in facts['common']:
  1518. facts['common']['router_image'] = router_image
  1519. if 'registry_image' not in facts['common']:
  1520. facts['common']['registry_image'] = registry_image
  1521. if 'deployer_image' not in facts['common']:
  1522. facts['common']['deployer_image'] = deployer_image
  1523. if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
  1524. facts['etcd']['etcd_image'] = etcd_image
  1525. if 'master' in facts and 'master_image' not in facts['master']:
  1526. facts['master']['master_image'] = master_image
  1527. if 'node' in facts:
  1528. if 'node_image' not in facts['node']:
  1529. facts['node']['node_image'] = node_image
  1530. if 'ovs_image' not in facts['node']:
  1531. facts['node']['ovs_image'] = ovs_image
  1532. if safe_get_bool(facts['common']['is_containerized']):
  1533. facts['common']['admin_binary'] = '/usr/local/bin/oadm'
  1534. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1535. return facts
  1536. def set_installed_variant_rpm_facts(facts):
  1537. """ Set RPM facts of installed variant
  1538. Args:
  1539. facts (dict): existing facts
  1540. Returns:
  1541. dict: the facts dict updated with installed_variant_rpms
  1542. """
  1543. installed_rpms = []
  1544. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1545. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1546. variant_rpms = [base_rpm] + \
  1547. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1548. ['tuned-profiles-%s-node' % base_rpm]
  1549. for rpm in variant_rpms:
  1550. exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
  1551. if exit_code == 0:
  1552. installed_rpms.append(rpm)
  1553. facts['common']['installed_variant_rpms'] = installed_rpms
  1554. return facts
  1555. class OpenShiftFactsInternalError(Exception):
  1556. """Origin Facts Error"""
  1557. pass
  1558. class OpenShiftFactsUnsupportedRoleError(Exception):
  1559. """Origin Facts Unsupported Role Error"""
  1560. pass
  1561. class OpenShiftFactsFileWriteError(Exception):
  1562. """Origin Facts File Write Error"""
  1563. pass
  1564. class OpenShiftFactsMetadataUnavailableError(Exception):
  1565. """Origin Facts Metadata Unavailable Error"""
  1566. pass
  1567. class OpenShiftFacts(object):
  1568. """ Origin Facts
  1569. Attributes:
  1570. facts (dict): facts for the host
  1571. Args:
  1572. module (AnsibleModule): an AnsibleModule object
  1573. role (str): role for setting local facts
  1574. filename (str): local facts file to use
  1575. local_facts (dict): local facts to set
  1576. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1577. '.' notation ex: ['master.named_certificates']
  1578. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1579. '.' notation ex: ['master.master_count']
  1580. Raises:
  1581. OpenShiftFactsUnsupportedRoleError:
  1582. """
  1583. known_roles = ['builddefaults',
  1584. 'buildoverrides',
  1585. 'clock',
  1586. 'cloudprovider',
  1587. 'common',
  1588. 'docker',
  1589. 'etcd',
  1590. 'hosted',
  1591. 'master',
  1592. 'node']
  1593. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1594. # pylint: disable=too-many-arguments,no-value-for-parameter
  1595. def __init__(self, role, filename, local_facts,
  1596. additive_facts_to_overwrite=None,
  1597. openshift_env=None,
  1598. openshift_env_structures=None,
  1599. protected_facts_to_overwrite=None):
  1600. self.changed = False
  1601. self.filename = filename
  1602. if role not in self.known_roles:
  1603. raise OpenShiftFactsUnsupportedRoleError(
  1604. "Role %s is not supported by this module" % role
  1605. )
  1606. self.role = role
  1607. try:
  1608. # ansible-2.1
  1609. # pylint: disable=too-many-function-args,invalid-name
  1610. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
  1611. for (k, v) in self.system_facts.items():
  1612. self.system_facts["ansible_%s" % k.replace('-', '_')] = v
  1613. except UnboundLocalError:
  1614. # ansible-2.2
  1615. self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
  1616. self.facts = self.generate_facts(local_facts,
  1617. additive_facts_to_overwrite,
  1618. openshift_env,
  1619. openshift_env_structures,
  1620. protected_facts_to_overwrite)
  1621. def generate_facts(self,
  1622. local_facts,
  1623. additive_facts_to_overwrite,
  1624. openshift_env,
  1625. openshift_env_structures,
  1626. protected_facts_to_overwrite):
  1627. """ Generate facts
  1628. Args:
  1629. local_facts (dict): local_facts for overriding generated defaults
  1630. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1631. '.' notation ex: ['master.named_certificates']
  1632. openshift_env (dict): openshift_env facts for overriding generated defaults
  1633. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1634. '.' notation ex: ['master.master_count']
  1635. Returns:
  1636. dict: The generated facts
  1637. """
  1638. local_facts = self.init_local_facts(local_facts,
  1639. additive_facts_to_overwrite,
  1640. openshift_env,
  1641. openshift_env_structures,
  1642. protected_facts_to_overwrite)
  1643. roles = local_facts.keys()
  1644. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1645. deployment_type = local_facts['common']['deployment_type']
  1646. else:
  1647. deployment_type = 'origin'
  1648. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1649. deployment_subtype = local_facts['common']['deployment_subtype']
  1650. else:
  1651. deployment_subtype = 'basic'
  1652. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1653. provider_facts = self.init_provider_facts()
  1654. facts = apply_provider_facts(defaults, provider_facts)
  1655. facts = merge_facts(facts,
  1656. local_facts,
  1657. additive_facts_to_overwrite,
  1658. protected_facts_to_overwrite)
  1659. facts = migrate_oauth_template_facts(facts)
  1660. facts['current_config'] = get_current_config(facts)
  1661. facts = set_url_facts_if_unset(facts)
  1662. facts = set_project_cfg_facts_if_unset(facts)
  1663. facts = set_flannel_facts_if_unset(facts)
  1664. facts = set_nuage_facts_if_unset(facts)
  1665. facts = set_node_schedulability(facts)
  1666. facts = set_selectors(facts)
  1667. facts = set_identity_providers_if_unset(facts)
  1668. facts = set_deployment_facts_if_unset(facts)
  1669. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1670. facts = set_container_facts_if_unset(facts)
  1671. facts = build_kubelet_args(facts)
  1672. facts = build_controller_args(facts)
  1673. facts = build_api_server_args(facts)
  1674. facts = set_version_facts_if_unset(facts)
  1675. facts = set_evacuate_or_drain_option(facts)
  1676. facts = set_dnsmasq_facts_if_unset(facts)
  1677. facts = set_manageiq_facts_if_unset(facts)
  1678. facts = set_aggregate_facts(facts)
  1679. facts = set_etcd_facts_if_unset(facts)
  1680. facts = set_proxy_facts(facts)
  1681. facts = set_builddefaults_facts(facts)
  1682. facts = set_buildoverrides_facts(facts)
  1683. if not safe_get_bool(facts['common']['is_containerized']):
  1684. facts = set_installed_variant_rpm_facts(facts)
  1685. facts = set_nodename(facts)
  1686. return dict(openshift=facts)
  1687. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1688. """ Get default fact values
  1689. Args:
  1690. roles (list): list of roles for this host
  1691. Returns:
  1692. dict: The generated default facts
  1693. """
  1694. defaults = {}
  1695. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1696. exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
  1697. hostname_f = output.strip() if exit_code == 0 else ''
  1698. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1699. self.system_facts['ansible_fqdn']]
  1700. hostname = choose_hostname(hostname_values, ip_addr)
  1701. defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
  1702. public_ip=ip_addr,
  1703. deployment_type=deployment_type,
  1704. deployment_subtype=deployment_subtype,
  1705. hostname=hostname,
  1706. public_hostname=hostname,
  1707. portal_net='172.30.0.0/16',
  1708. client_binary='oc', admin_binary='oadm',
  1709. dns_domain='cluster.local',
  1710. install_examples=True,
  1711. debug_level=2)
  1712. if 'master' in roles:
  1713. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1714. controllers_port='8444',
  1715. console_use_ssl=True,
  1716. console_path='/console',
  1717. console_port='8443', etcd_use_ssl=True,
  1718. etcd_hosts='', etcd_port='4001',
  1719. portal_net='172.30.0.0/16',
  1720. embedded_etcd=True, embedded_kube=True,
  1721. embedded_dns=True,
  1722. bind_addr='0.0.0.0',
  1723. session_max_seconds=3600,
  1724. session_name='ssn',
  1725. session_secrets_file='',
  1726. access_token_max_seconds=86400,
  1727. auth_token_max_seconds=500,
  1728. oauth_grant_method='auto',
  1729. dynamic_provisioning_enabled=True,
  1730. max_requests_inflight=500)
  1731. if 'node' in roles:
  1732. defaults['node'] = dict(labels={}, annotations={},
  1733. iptables_sync_period='30s',
  1734. local_quota_per_fsgroup="",
  1735. set_node_ip=False)
  1736. if 'docker' in roles:
  1737. docker = dict(disable_push_dockerhub=False,
  1738. options='--log-driver=json-file --log-opt max-size=50m')
  1739. # NOTE: This is a workaround for a dnf output racecondition that can occur in
  1740. # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
  1741. if self.system_facts['ansible_pkg_mgr'] == 'dnf':
  1742. rpm_rebuilddb()
  1743. version_info = get_docker_version_info()
  1744. if version_info is not None:
  1745. docker['api_version'] = version_info['api_version']
  1746. docker['version'] = version_info['version']
  1747. docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
  1748. hosted_registry_insecure = get_hosted_registry_insecure()
  1749. if hosted_registry_insecure is not None:
  1750. docker['hosted_registry_insecure'] = hosted_registry_insecure
  1751. defaults['docker'] = docker
  1752. if 'clock' in roles:
  1753. exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
  1754. chrony_installed = bool(exit_code == 0)
  1755. defaults['clock'] = dict(
  1756. enabled=True,
  1757. chrony_installed=chrony_installed)
  1758. if 'cloudprovider' in roles:
  1759. defaults['cloudprovider'] = dict(kind=None)
  1760. if 'hosted' in roles or self.role == 'hosted':
  1761. defaults['hosted'] = dict(
  1762. metrics=dict(
  1763. deploy=False,
  1764. duration=7,
  1765. resolution='10s',
  1766. storage=dict(
  1767. kind=None,
  1768. volume=dict(
  1769. name='metrics',
  1770. size='10Gi'
  1771. ),
  1772. nfs=dict(
  1773. directory='/exports',
  1774. options='*(rw,root_squash)'
  1775. ),
  1776. host=None,
  1777. access=dict(
  1778. modes=['ReadWriteOnce']
  1779. ),
  1780. create_pv=True,
  1781. create_pvc=False
  1782. )
  1783. ),
  1784. logging=dict(
  1785. storage=dict(
  1786. kind=None,
  1787. volume=dict(
  1788. name='logging-es',
  1789. size='10Gi'
  1790. ),
  1791. nfs=dict(
  1792. directory='/exports',
  1793. options='*(rw,root_squash)'
  1794. ),
  1795. host=None,
  1796. access=dict(
  1797. modes=['ReadWriteOnce']
  1798. ),
  1799. create_pv=True,
  1800. create_pvc=False
  1801. )
  1802. ),
  1803. registry=dict(
  1804. storage=dict(
  1805. kind=None,
  1806. volume=dict(
  1807. name='registry',
  1808. size='5Gi'
  1809. ),
  1810. nfs=dict(
  1811. directory='/exports',
  1812. options='*(rw,root_squash)'),
  1813. host=None,
  1814. access=dict(
  1815. modes=['ReadWriteMany']
  1816. ),
  1817. create_pv=True,
  1818. create_pvc=True
  1819. )
  1820. ),
  1821. router=dict()
  1822. )
  1823. return defaults
  1824. def guess_host_provider(self):
  1825. """ Guess the host provider
  1826. Returns:
  1827. dict: The generated default facts for the detected provider
  1828. """
  1829. # TODO: cloud provider facts should probably be submitted upstream
  1830. product_name = self.system_facts['ansible_product_name']
  1831. product_version = self.system_facts['ansible_product_version']
  1832. virt_type = self.system_facts['ansible_virtualization_type']
  1833. virt_role = self.system_facts['ansible_virtualization_role']
  1834. provider = None
  1835. metadata = None
  1836. # TODO: this is not exposed through module_utils/facts.py in ansible,
  1837. # need to create PR for ansible to expose it
  1838. bios_vendor = get_file_content( # noqa: F405
  1839. '/sys/devices/virtual/dmi/id/bios_vendor'
  1840. )
  1841. if bios_vendor == 'Google':
  1842. provider = 'gce'
  1843. metadata_url = ('http://metadata.google.internal/'
  1844. 'computeMetadata/v1/?recursive=true')
  1845. headers = {'Metadata-Flavor': 'Google'}
  1846. metadata = get_provider_metadata(metadata_url, True, headers,
  1847. True)
  1848. # Filter sshKeys and serviceAccounts from gce metadata
  1849. if metadata:
  1850. metadata['project']['attributes'].pop('sshKeys', None)
  1851. metadata['instance'].pop('serviceAccounts', None)
  1852. elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
  1853. provider = 'aws'
  1854. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1855. metadata = get_provider_metadata(metadata_url)
  1856. elif re.search(r'OpenStack', product_name):
  1857. provider = 'openstack'
  1858. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1859. 'meta_data.json')
  1860. metadata = get_provider_metadata(metadata_url, True, None,
  1861. True)
  1862. if metadata:
  1863. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1864. metadata['ec2_compat'] = get_provider_metadata(
  1865. ec2_compat_url
  1866. )
  1867. # disable pylint maybe-no-member because overloaded use of
  1868. # the module name causes pylint to not detect that results
  1869. # is an array or hash
  1870. # pylint: disable=maybe-no-member
  1871. # Filter public_keys and random_seed from openstack metadata
  1872. metadata.pop('public_keys', None)
  1873. metadata.pop('random_seed', None)
  1874. if not metadata['ec2_compat']:
  1875. metadata = None
  1876. return dict(name=provider, metadata=metadata)
  1877. def init_provider_facts(self):
  1878. """ Initialize the provider facts
  1879. Returns:
  1880. dict: The normalized provider facts
  1881. """
  1882. provider_info = self.guess_host_provider()
  1883. provider_facts = normalize_provider_facts(
  1884. provider_info.get('name'),
  1885. provider_info.get('metadata')
  1886. )
  1887. return provider_facts
  1888. @staticmethod
  1889. def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
  1890. """ Split openshift_env facts based on openshift_env structures.
  1891. Args:
  1892. openshift_env_fact (string): the openshift_env fact to split
  1893. ex: 'openshift_cloudprovider_openstack_auth_url'
  1894. openshift_env_structures (list): a list of structures to determine fact keys
  1895. ex: ['openshift.cloudprovider.openstack.*']
  1896. Returns:
  1897. list: a list of keys that represent the fact
  1898. ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
  1899. """
  1900. # By default, we'll split an openshift_env fact by underscores.
  1901. fact_keys = openshift_env_fact.split('_')
  1902. # Determine if any of the provided variable structures match the fact.
  1903. matching_structure = None
  1904. if openshift_env_structures is not None:
  1905. for structure in openshift_env_structures:
  1906. if re.match(structure, openshift_env_fact):
  1907. matching_structure = structure
  1908. # Fact didn't match any variable structures so return the default fact keys.
  1909. if matching_structure is None:
  1910. return fact_keys
  1911. final_keys = []
  1912. structure_keys = matching_structure.split('.')
  1913. for structure_key in structure_keys:
  1914. # Matched current key. Add to final keys.
  1915. if structure_key == fact_keys[structure_keys.index(structure_key)]:
  1916. final_keys.append(structure_key)
  1917. # Wildcard means we will be taking everything from here to the end of the fact.
  1918. elif structure_key == '*':
  1919. final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
  1920. # Shouldn't have gotten here, return the fact keys.
  1921. else:
  1922. return fact_keys
  1923. return final_keys
  1924. # Disabling too-many-branches and too-many-locals.
  1925. # This should be cleaned up as a TODO item.
  1926. # pylint: disable=too-many-branches, too-many-locals
  1927. def init_local_facts(self, facts=None,
  1928. additive_facts_to_overwrite=None,
  1929. openshift_env=None,
  1930. openshift_env_structures=None,
  1931. protected_facts_to_overwrite=None):
  1932. """ Initialize the local facts
  1933. Args:
  1934. facts (dict): local facts to set
  1935. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1936. '.' notation ex: ['master.named_certificates']
  1937. openshift_env (dict): openshift env facts to set
  1938. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1939. '.' notation ex: ['master.master_count']
  1940. Returns:
  1941. dict: The result of merging the provided facts with existing
  1942. local facts
  1943. """
  1944. changed = False
  1945. facts_to_set = dict()
  1946. if facts is not None:
  1947. facts_to_set[self.role] = facts
  1948. if openshift_env != {} and openshift_env is not None:
  1949. for fact, value in iteritems(openshift_env):
  1950. oo_env_facts = dict()
  1951. current_level = oo_env_facts
  1952. keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
  1953. if len(keys) > 0 and keys[0] != self.role:
  1954. continue
  1955. for key in keys:
  1956. if key == keys[-1]:
  1957. current_level[key] = value
  1958. elif key not in current_level:
  1959. current_level[key] = dict()
  1960. current_level = current_level[key]
  1961. facts_to_set = merge_facts(orig=facts_to_set,
  1962. new=oo_env_facts,
  1963. additive_facts_to_overwrite=[],
  1964. protected_facts_to_overwrite=[])
  1965. local_facts = get_local_facts_from_file(self.filename)
  1966. migrated_facts = migrate_local_facts(local_facts)
  1967. new_local_facts = merge_facts(migrated_facts,
  1968. facts_to_set,
  1969. additive_facts_to_overwrite,
  1970. protected_facts_to_overwrite)
  1971. if 'docker' in new_local_facts:
  1972. # remove duplicate and empty strings from registry lists
  1973. for cat in ['additional', 'blocked', 'insecure']:
  1974. key = '{0}_registries'.format(cat)
  1975. if key in new_local_facts['docker']:
  1976. val = new_local_facts['docker'][key]
  1977. if isinstance(val, string_types):
  1978. val = [x.strip() for x in val.split(',')]
  1979. new_local_facts['docker'][key] = list(set(val) - set(['']))
  1980. # Convert legacy log_options comma sep string to a list if present:
  1981. if 'log_options' in new_local_facts['docker'] and \
  1982. isinstance(new_local_facts['docker']['log_options'], string_types):
  1983. new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
  1984. new_local_facts = self.remove_empty_facts(new_local_facts)
  1985. if new_local_facts != local_facts:
  1986. self.validate_local_facts(new_local_facts)
  1987. changed = True
  1988. if not module.check_mode: # noqa: F405
  1989. save_local_facts(self.filename, new_local_facts)
  1990. self.changed = changed
  1991. return new_local_facts
  1992. def remove_empty_facts(self, facts=None):
  1993. """ Remove empty facts
  1994. Args:
  1995. facts (dict): facts to clean
  1996. """
  1997. facts_to_remove = []
  1998. for fact, value in iteritems(facts):
  1999. if isinstance(facts[fact], dict):
  2000. facts[fact] = self.remove_empty_facts(facts[fact])
  2001. else:
  2002. if value == "" or value == [""] or value is None:
  2003. facts_to_remove.append(fact)
  2004. for fact in facts_to_remove:
  2005. del facts[fact]
  2006. return facts
  2007. def validate_local_facts(self, facts=None):
  2008. """ Validate local facts
  2009. Args:
  2010. facts (dict): local facts to validate
  2011. """
  2012. invalid_facts = dict()
  2013. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  2014. if invalid_facts:
  2015. msg = 'Invalid facts detected:\n'
  2016. # pylint: disable=consider-iterating-dictionary
  2017. for key in invalid_facts.keys():
  2018. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  2019. module.fail_json(msg=msg, changed=self.changed) # noqa: F405
  2020. # disabling pylint errors for line-too-long since we're dealing
  2021. # with best effort reduction of error messages here.
  2022. # disabling errors for too-many-branches since we require checking
  2023. # many conditions.
  2024. # pylint: disable=line-too-long, too-many-branches
  2025. @staticmethod
  2026. def validate_master_facts(facts, invalid_facts):
  2027. """ Validate master facts
  2028. Args:
  2029. facts (dict): local facts to validate
  2030. invalid_facts (dict): collected invalid_facts
  2031. Returns:
  2032. dict: Invalid facts
  2033. """
  2034. if 'master' in facts:
  2035. # openshift.master.session_auth_secrets
  2036. if 'session_auth_secrets' in facts['master']:
  2037. session_auth_secrets = facts['master']['session_auth_secrets']
  2038. if not issubclass(type(session_auth_secrets), list):
  2039. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  2040. elif 'session_encryption_secrets' not in facts['master']:
  2041. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  2042. 'if openshift_master_session_auth_secrets is provided.')
  2043. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  2044. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  2045. 'openshift_master_session_encryption_secrets must be '
  2046. 'equal length.')
  2047. else:
  2048. for secret in session_auth_secrets:
  2049. if len(secret) < 32:
  2050. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  2051. 'Secrets must be at least 32 characters in length.')
  2052. # openshift.master.session_encryption_secrets
  2053. if 'session_encryption_secrets' in facts['master']:
  2054. session_encryption_secrets = facts['master']['session_encryption_secrets']
  2055. if not issubclass(type(session_encryption_secrets), list):
  2056. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  2057. elif 'session_auth_secrets' not in facts['master']:
  2058. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  2059. 'set if openshift_master_session_encryption_secrets '
  2060. 'is provided.')
  2061. else:
  2062. for secret in session_encryption_secrets:
  2063. if len(secret) not in [16, 24, 32]:
  2064. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  2065. 'Secrets must be 16, 24, or 32 characters in length.')
  2066. return invalid_facts
  2067. def main():
  2068. """ main """
  2069. # disabling pylint errors for global-variable-undefined and invalid-name
  2070. # for 'global module' usage, since it is required to use ansible_facts
  2071. # pylint: disable=global-variable-undefined, invalid-name
  2072. global module
  2073. module = AnsibleModule( # noqa: F405
  2074. argument_spec=dict(
  2075. role=dict(default='common', required=False,
  2076. choices=OpenShiftFacts.known_roles),
  2077. local_facts=dict(default=None, type='dict', required=False),
  2078. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  2079. openshift_env=dict(default={}, type='dict', required=False),
  2080. openshift_env_structures=dict(default=[], type='list', required=False),
  2081. protected_facts_to_overwrite=dict(default=[], type='list', required=False)
  2082. ),
  2083. supports_check_mode=True,
  2084. add_file_common_args=True,
  2085. )
  2086. if not HAVE_DBUS:
  2087. module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
  2088. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
  2089. module.params['gather_timeout'] = 10 # noqa: F405
  2090. module.params['filter'] = '*' # noqa: F405
  2091. role = module.params['role'] # noqa: F405
  2092. local_facts = module.params['local_facts'] # noqa: F405
  2093. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
  2094. openshift_env = module.params['openshift_env'] # noqa: F405
  2095. openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
  2096. protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
  2097. fact_file = '/etc/ansible/facts.d/openshift.fact'
  2098. openshift_facts = OpenShiftFacts(role,
  2099. fact_file,
  2100. local_facts,
  2101. additive_facts_to_overwrite,
  2102. openshift_env,
  2103. openshift_env_structures,
  2104. protected_facts_to_overwrite)
  2105. file_params = module.params.copy() # noqa: F405
  2106. file_params['path'] = fact_file
  2107. file_args = module.load_file_common_arguments(file_params) # noqa: F405
  2108. changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
  2109. openshift_facts.changed)
  2110. return module.exit_json(changed=changed, # noqa: F405
  2111. ansible_facts=openshift_facts.facts)
  2112. if __name__ == '__main__':
  2113. main()