openshift_facts.py 105 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  5. # Status: Permanently disabled to keep this module as self-contained as possible.
  6. """Ansible module for retrieving and setting openshift related facts"""
  7. # pylint: disable=no-name-in-module, import-error, wrong-import-order
  8. import copy
  9. import errno
  10. import json
  11. import re
  12. import io
  13. import os
  14. import yaml
  15. import struct
  16. import socket
  17. from distutils.util import strtobool
  18. from distutils.version import LooseVersion
  19. from ansible.module_utils.six import string_types, text_type
  20. from ansible.module_utils.six.moves import configparser
  21. # ignore pylint errors related to the module_utils import
  22. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  23. # import module snippets
  24. from ansible.module_utils.basic import * # noqa: F403
  25. from ansible.module_utils.facts import * # noqa: F403
  26. from ansible.module_utils.urls import * # noqa: F403
  27. from ansible.module_utils.six import iteritems, itervalues
  28. from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
  29. from ansible.module_utils._text import to_native
  30. HAVE_DBUS = False
  31. try:
  32. from dbus import SystemBus, Interface
  33. from dbus.exceptions import DBusException
  34. HAVE_DBUS = True
  35. except ImportError:
  36. pass
  37. DOCUMENTATION = '''
  38. ---
  39. module: openshift_facts
  40. short_description: Cluster Facts
  41. author: Jason DeTiberus
  42. requirements: [ ]
  43. '''
  44. EXAMPLES = '''
  45. '''
  46. def migrate_docker_facts(facts):
  47. """ Apply migrations for docker facts """
  48. params = {
  49. 'common': (
  50. 'additional_registries',
  51. 'insecure_registries',
  52. 'blocked_registries',
  53. 'options'
  54. ),
  55. 'node': (
  56. 'log_driver',
  57. 'log_options'
  58. )
  59. }
  60. if 'docker' not in facts:
  61. facts['docker'] = {}
  62. # pylint: disable=consider-iterating-dictionary
  63. for role in params.keys():
  64. if role in facts:
  65. for param in params[role]:
  66. old_param = 'docker_' + param
  67. if old_param in facts[role]:
  68. facts['docker'][param] = facts[role].pop(old_param)
  69. if 'node' in facts and 'portal_net' in facts['node']:
  70. facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
  71. # log_options was originally meant to be a comma separated string, but
  72. # we now prefer an actual list, with backward compatibility:
  73. if 'log_options' in facts['docker'] and \
  74. isinstance(facts['docker']['log_options'], string_types):
  75. facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
  76. return facts
  77. # TODO: We should add a generic migration function that takes source and destination
  78. # paths and does the right thing rather than one function for common, one for node, etc.
  79. def migrate_common_facts(facts):
  80. """ Migrate facts from various roles into common """
  81. params = {
  82. 'node': ('portal_net'),
  83. 'master': ('portal_net')
  84. }
  85. if 'common' not in facts:
  86. facts['common'] = {}
  87. # pylint: disable=consider-iterating-dictionary
  88. for role in params.keys():
  89. if role in facts:
  90. for param in params[role]:
  91. if param in facts[role]:
  92. facts['common'][param] = facts[role].pop(param)
  93. return facts
  94. def migrate_node_facts(facts):
  95. """ Migrate facts from various roles into node """
  96. params = {
  97. 'common': ('dns_ip'),
  98. }
  99. if 'node' not in facts:
  100. facts['node'] = {}
  101. # pylint: disable=consider-iterating-dictionary
  102. for role in params.keys():
  103. if role in facts:
  104. for param in params[role]:
  105. if param in facts[role]:
  106. facts['node'][param] = facts[role].pop(param)
  107. return facts
  108. def migrate_hosted_facts(facts):
  109. """ Apply migrations for master facts """
  110. if 'master' in facts:
  111. if 'router_selector' in facts['master']:
  112. if 'hosted' not in facts:
  113. facts['hosted'] = {}
  114. if 'router' not in facts['hosted']:
  115. facts['hosted']['router'] = {}
  116. facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
  117. if 'registry_selector' in facts['master']:
  118. if 'hosted' not in facts:
  119. facts['hosted'] = {}
  120. if 'registry' not in facts['hosted']:
  121. facts['hosted']['registry'] = {}
  122. facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
  123. return facts
  124. def migrate_admission_plugin_facts(facts):
  125. """ Apply migrations for admission plugin facts """
  126. if 'master' in facts:
  127. if 'kube_admission_plugin_config' in facts['master']:
  128. if 'admission_plugin_config' not in facts['master']:
  129. facts['master']['admission_plugin_config'] = dict()
  130. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  131. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  132. facts['master']['kube_admission_plugin_config'],
  133. additive_facts_to_overwrite=[],
  134. protected_facts_to_overwrite=[])
  135. # Remove kube_admission_plugin_config fact
  136. facts['master'].pop('kube_admission_plugin_config', None)
  137. return facts
  138. def migrate_local_facts(facts):
  139. """ Apply migrations of local facts """
  140. migrated_facts = copy.deepcopy(facts)
  141. migrated_facts = migrate_docker_facts(migrated_facts)
  142. migrated_facts = migrate_common_facts(migrated_facts)
  143. migrated_facts = migrate_node_facts(migrated_facts)
  144. migrated_facts = migrate_hosted_facts(migrated_facts)
  145. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  146. return migrated_facts
  147. def first_ip(network):
  148. """ Return the first IPv4 address in network
  149. Args:
  150. network (str): network in CIDR format
  151. Returns:
  152. str: first IPv4 address
  153. """
  154. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
  155. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
  156. (address, netmask) = network.split('/')
  157. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  158. return itoa((atoi(address) & netmask_i) + 1)
  159. def hostname_valid(hostname):
  160. """ Test if specified hostname should be considered valid
  161. Args:
  162. hostname (str): hostname to test
  163. Returns:
  164. bool: True if valid, otherwise False
  165. """
  166. if (not hostname or
  167. hostname.startswith('localhost') or
  168. hostname.endswith('localdomain')):
  169. return False
  170. return True
  171. def choose_hostname(hostnames=None, fallback=''):
  172. """ Choose a hostname from the provided hostnames
  173. Given a list of hostnames and a fallback value, choose a hostname to
  174. use. This function will prefer fqdns if they exist (excluding any that
  175. begin with localhost or end with localdomain) over ip addresses.
  176. Args:
  177. hostnames (list): list of hostnames
  178. fallback (str): default value to set if hostnames does not contain
  179. a valid hostname
  180. Returns:
  181. str: chosen hostname
  182. """
  183. hostname = fallback
  184. if hostnames is None:
  185. return hostname
  186. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  187. ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
  188. hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
  189. for host_list in (hosts, ips):
  190. for host in host_list:
  191. if hostname_valid(host):
  192. return host
  193. return hostname
  194. def query_metadata(metadata_url, headers=None, expect_json=False):
  195. """ Return metadata from the provided metadata_url
  196. Args:
  197. metadata_url (str): metadata url
  198. headers (dict): headers to set for metadata request
  199. expect_json (bool): does the metadata_url return json
  200. Returns:
  201. dict or list: metadata request result
  202. """
  203. result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
  204. if info['status'] != 200:
  205. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  206. if expect_json:
  207. return module.from_json(to_native(result.read())) # noqa: F405
  208. else:
  209. return [to_native(line.strip()) for line in result.readlines()]
  210. def walk_metadata(metadata_url, headers=None, expect_json=False):
  211. """ Walk the metadata tree and return a dictionary of the entire tree
  212. Args:
  213. metadata_url (str): metadata url
  214. headers (dict): headers to set for metadata request
  215. expect_json (bool): does the metadata_url return json
  216. Returns:
  217. dict: the result of walking the metadata tree
  218. """
  219. metadata = dict()
  220. for line in query_metadata(metadata_url, headers, expect_json):
  221. if line.endswith('/') and not line == 'public-keys/':
  222. key = line[:-1]
  223. metadata[key] = walk_metadata(metadata_url + line,
  224. headers, expect_json)
  225. else:
  226. results = query_metadata(metadata_url + line, headers,
  227. expect_json)
  228. if len(results) == 1:
  229. # disable pylint maybe-no-member because overloaded use of
  230. # the module name causes pylint to not detect that results
  231. # is an array or hash
  232. # pylint: disable=maybe-no-member
  233. metadata[line] = results.pop()
  234. else:
  235. metadata[line] = results
  236. return metadata
  237. def get_provider_metadata(metadata_url, supports_recursive=False,
  238. headers=None, expect_json=False):
  239. """ Retrieve the provider metadata
  240. Args:
  241. metadata_url (str): metadata url
  242. supports_recursive (bool): does the provider metadata api support
  243. recursion
  244. headers (dict): headers to set for metadata request
  245. expect_json (bool): does the metadata_url return json
  246. Returns:
  247. dict: the provider metadata
  248. """
  249. try:
  250. if supports_recursive:
  251. metadata = query_metadata(metadata_url, headers,
  252. expect_json)
  253. else:
  254. metadata = walk_metadata(metadata_url, headers,
  255. expect_json)
  256. except OpenShiftFactsMetadataUnavailableError:
  257. metadata = None
  258. return metadata
  259. def normalize_gce_facts(metadata, facts):
  260. """ Normalize gce facts
  261. Args:
  262. metadata (dict): provider metadata
  263. facts (dict): facts to update
  264. Returns:
  265. dict: the result of adding the normalized metadata to the provided
  266. facts dict
  267. """
  268. for interface in metadata['instance']['networkInterfaces']:
  269. int_info = dict(ips=[interface['ip']], network_type='gce')
  270. int_info['public_ips'] = [ac['externalIp'] for ac
  271. in interface['accessConfigs']]
  272. int_info['public_ips'].extend(interface['forwardedIps'])
  273. _, _, network_id = interface['network'].rpartition('/')
  274. int_info['network_id'] = network_id
  275. facts['network']['interfaces'].append(int_info)
  276. _, _, zone = metadata['instance']['zone'].rpartition('/')
  277. facts['zone'] = zone
  278. # GCE currently only supports a single interface
  279. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  280. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  281. facts['network']['public_ip'] = pub_ip
  282. # Split instance hostname from GCE metadata to use the short instance name
  283. facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
  284. # TODO: attempt to resolve public_hostname
  285. facts['network']['public_hostname'] = facts['network']['public_ip']
  286. return facts
  287. def normalize_aws_facts(metadata, facts):
  288. """ Normalize aws facts
  289. Args:
  290. metadata (dict): provider metadata
  291. facts (dict): facts to update
  292. Returns:
  293. dict: the result of adding the normalized metadata to the provided
  294. facts dict
  295. """
  296. for interface in sorted(
  297. metadata['network']['interfaces']['macs'].values(),
  298. key=lambda x: x['device-number']
  299. ):
  300. int_info = dict()
  301. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  302. for ips_var, int_var in iteritems(var_map):
  303. ips = interface.get(int_var)
  304. if isinstance(ips, string_types):
  305. int_info[ips_var] = [ips]
  306. else:
  307. int_info[ips_var] = ips
  308. if 'vpc-id' in interface:
  309. int_info['network_type'] = 'vpc'
  310. else:
  311. int_info['network_type'] = 'classic'
  312. if int_info['network_type'] == 'vpc':
  313. int_info['network_id'] = interface['subnet-id']
  314. else:
  315. int_info['network_id'] = None
  316. facts['network']['interfaces'].append(int_info)
  317. facts['zone'] = metadata['placement']['availability-zone']
  318. # TODO: actually attempt to determine default local and public ips
  319. # by using the ansible default ip fact and the ipv4-associations
  320. # from the ec2 metadata
  321. facts['network']['ip'] = metadata.get('local-ipv4')
  322. facts['network']['public_ip'] = metadata.get('public-ipv4')
  323. # TODO: verify that local hostname makes sense and is resolvable
  324. facts['network']['hostname'] = metadata.get('local-hostname')
  325. # TODO: verify that public hostname makes sense and is resolvable
  326. facts['network']['public_hostname'] = metadata.get('public-hostname')
  327. return facts
  328. def normalize_openstack_facts(metadata, facts):
  329. """ Normalize openstack facts
  330. Args:
  331. metadata (dict): provider metadata
  332. facts (dict): facts to update
  333. Returns:
  334. dict: the result of adding the normalized metadata to the provided
  335. facts dict
  336. """
  337. # openstack ec2 compat api does not support network interfaces and
  338. # the version tested on did not include the info in the openstack
  339. # metadata api, should be updated if neutron exposes this.
  340. facts['zone'] = metadata['availability_zone']
  341. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  342. facts['network']['ip'] = local_ipv4
  343. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  344. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  345. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  346. try:
  347. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  348. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  349. else:
  350. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  351. except socket.gaierror:
  352. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  353. return facts
  354. def normalize_provider_facts(provider, metadata):
  355. """ Normalize provider facts
  356. Args:
  357. provider (str): host provider
  358. metadata (dict): provider metadata
  359. Returns:
  360. dict: the normalized provider facts
  361. """
  362. if provider is None or metadata is None:
  363. return {}
  364. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  365. # and configure ipv6 facts if available
  366. # TODO: add support for setting user_data if available
  367. facts = dict(name=provider, metadata=metadata,
  368. network=dict(interfaces=[], ipv6_enabled=False))
  369. if provider == 'gce':
  370. facts = normalize_gce_facts(metadata, facts)
  371. elif provider == 'aws':
  372. facts = normalize_aws_facts(metadata, facts)
  373. elif provider == 'openstack':
  374. facts = normalize_openstack_facts(metadata, facts)
  375. return facts
  376. def set_flannel_facts_if_unset(facts):
  377. """ Set flannel facts if not already present in facts dict
  378. dict: the facts dict updated with the flannel facts if
  379. missing
  380. Args:
  381. facts (dict): existing facts
  382. Returns:
  383. dict: the facts dict updated with the flannel
  384. facts if they were not already present
  385. """
  386. if 'common' in facts:
  387. if 'use_flannel' not in facts['common']:
  388. use_flannel = False
  389. facts['common']['use_flannel'] = use_flannel
  390. return facts
  391. def set_calico_facts_if_unset(facts):
  392. """ Set calico facts if not already present in facts dict
  393. dict: the facts dict updated with the calico facts if
  394. missing
  395. Args:
  396. facts (dict): existing facts
  397. Returns:
  398. dict: the facts dict updated with the calico
  399. facts if they were not already present
  400. """
  401. if 'common' in facts:
  402. if 'use_calico' not in facts['common']:
  403. use_calico = False
  404. facts['common']['use_calico'] = use_calico
  405. return facts
  406. def set_nuage_facts_if_unset(facts):
  407. """ Set nuage facts if not already present in facts dict
  408. dict: the facts dict updated with the nuage facts if
  409. missing
  410. Args:
  411. facts (dict): existing facts
  412. Returns:
  413. dict: the facts dict updated with the nuage
  414. facts if they were not already present
  415. """
  416. if 'common' in facts:
  417. if 'use_nuage' not in facts['common']:
  418. use_nuage = False
  419. facts['common']['use_nuage'] = use_nuage
  420. return facts
  421. def set_contiv_facts_if_unset(facts):
  422. """ Set contiv facts if not already present in facts dict
  423. dict: the facts dict updated with the contiv facts if
  424. missing
  425. Args:
  426. facts (dict): existing facts
  427. Returns:
  428. dict: the facts dict updated with the contiv
  429. facts if they were not already present
  430. """
  431. if 'common' in facts:
  432. if 'use_contiv' not in facts['common']:
  433. use_contiv = False
  434. facts['common']['use_contiv'] = use_contiv
  435. return facts
  436. def set_node_schedulability(facts):
  437. """ Set schedulable facts if not already present in facts dict
  438. Args:
  439. facts (dict): existing facts
  440. Returns:
  441. dict: the facts dict updated with the generated schedulable
  442. facts if they were not already present
  443. """
  444. if 'node' in facts:
  445. if 'schedulable' not in facts['node']:
  446. if 'master' in facts:
  447. facts['node']['schedulable'] = False
  448. else:
  449. facts['node']['schedulable'] = True
  450. return facts
  451. # pylint: disable=too-many-branches
  452. def set_selectors(facts):
  453. """ Set selectors facts if not already present in facts dict
  454. Args:
  455. facts (dict): existing facts
  456. Returns:
  457. dict: the facts dict updated with the generated selectors
  458. facts if they were not already present
  459. """
  460. deployment_type = facts['common']['deployment_type']
  461. if deployment_type == 'online':
  462. selector = "type=infra"
  463. else:
  464. selector = "region=infra"
  465. if 'hosted' not in facts:
  466. facts['hosted'] = {}
  467. if 'router' not in facts['hosted']:
  468. facts['hosted']['router'] = {}
  469. if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
  470. facts['hosted']['router']['selector'] = selector
  471. if 'registry' not in facts['hosted']:
  472. facts['hosted']['registry'] = {}
  473. if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
  474. facts['hosted']['registry']['selector'] = selector
  475. if 'metrics' not in facts['hosted']:
  476. facts['hosted']['metrics'] = {}
  477. if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
  478. facts['hosted']['metrics']['selector'] = None
  479. if 'logging' not in facts['hosted']:
  480. facts['hosted']['logging'] = {}
  481. if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
  482. facts['hosted']['logging']['selector'] = None
  483. if 'etcd' not in facts['hosted']:
  484. facts['hosted']['etcd'] = {}
  485. if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
  486. facts['hosted']['etcd']['selector'] = None
  487. return facts
  488. def set_dnsmasq_facts_if_unset(facts):
  489. """ Set dnsmasq facts if not already present in facts
  490. Args:
  491. facts (dict) existing facts
  492. Returns:
  493. facts (dict) updated facts with values set if not previously set
  494. """
  495. if 'common' in facts:
  496. if 'use_dnsmasq' not in facts['common']:
  497. facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
  498. if 'master' in facts and 'dns_port' not in facts['master']:
  499. if safe_get_bool(facts['common']['use_dnsmasq']):
  500. facts['master']['dns_port'] = 8053
  501. else:
  502. facts['master']['dns_port'] = 53
  503. return facts
  504. def set_project_cfg_facts_if_unset(facts):
  505. """ Set Project Configuration facts if not already present in facts dict
  506. dict:
  507. Args:
  508. facts (dict): existing facts
  509. Returns:
  510. dict: the facts dict updated with the generated Project Configuration
  511. facts if they were not already present
  512. """
  513. config = {
  514. 'default_node_selector': '',
  515. 'project_request_message': '',
  516. 'project_request_template': '',
  517. 'mcs_allocator_range': 's0:/2',
  518. 'mcs_labels_per_project': 5,
  519. 'uid_allocator_range': '1000000000-1999999999/10000'
  520. }
  521. if 'master' in facts:
  522. for key, value in config.items():
  523. if key not in facts['master']:
  524. facts['master'][key] = value
  525. return facts
  526. def set_identity_providers_if_unset(facts):
  527. """ Set identity_providers fact if not already present in facts dict
  528. Args:
  529. facts (dict): existing facts
  530. Returns:
  531. dict: the facts dict updated with the generated identity providers
  532. facts if they were not already present
  533. """
  534. if 'master' in facts:
  535. deployment_type = facts['common']['deployment_type']
  536. if 'identity_providers' not in facts['master']:
  537. identity_provider = dict(
  538. name='allow_all', challenge=True, login=True,
  539. kind='AllowAllPasswordIdentityProvider'
  540. )
  541. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  542. identity_provider = dict(
  543. name='deny_all', challenge=True, login=True,
  544. kind='DenyAllPasswordIdentityProvider'
  545. )
  546. facts['master']['identity_providers'] = [identity_provider]
  547. return facts
  548. def set_url_facts_if_unset(facts):
  549. """ Set url facts if not already present in facts dict
  550. Args:
  551. facts (dict): existing facts
  552. Returns:
  553. dict: the facts dict updated with the generated url facts if they
  554. were not already present
  555. """
  556. if 'master' in facts:
  557. hostname = facts['common']['hostname']
  558. cluster_hostname = facts['master'].get('cluster_hostname')
  559. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  560. public_hostname = facts['common']['public_hostname']
  561. api_hostname = cluster_hostname if cluster_hostname else hostname
  562. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  563. console_path = facts['master']['console_path']
  564. etcd_hosts = facts['master']['etcd_hosts']
  565. use_ssl = dict(
  566. api=facts['master']['api_use_ssl'],
  567. public_api=facts['master']['api_use_ssl'],
  568. loopback_api=facts['master']['api_use_ssl'],
  569. console=facts['master']['console_use_ssl'],
  570. public_console=facts['master']['console_use_ssl'],
  571. etcd=facts['master']['etcd_use_ssl']
  572. )
  573. ports = dict(
  574. api=facts['master']['api_port'],
  575. public_api=facts['master']['api_port'],
  576. loopback_api=facts['master']['api_port'],
  577. console=facts['master']['console_port'],
  578. public_console=facts['master']['console_port'],
  579. etcd=facts['master']['etcd_port'],
  580. )
  581. etcd_urls = []
  582. if etcd_hosts != '':
  583. facts['master']['etcd_port'] = ports['etcd']
  584. facts['master']['embedded_etcd'] = False
  585. for host in etcd_hosts:
  586. etcd_urls.append(format_url(use_ssl['etcd'], host,
  587. ports['etcd']))
  588. else:
  589. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  590. ports['etcd'])]
  591. facts['master'].setdefault('etcd_urls', etcd_urls)
  592. prefix_hosts = [('api', api_hostname),
  593. ('public_api', api_public_hostname),
  594. ('loopback_api', hostname)]
  595. for prefix, host in prefix_hosts:
  596. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  597. host,
  598. ports[prefix]))
  599. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  600. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  601. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  602. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  603. facts['master'].setdefault('loopback_user', r_lhu)
  604. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  605. for prefix, host in prefix_hosts:
  606. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  607. host,
  608. ports[prefix],
  609. console_path))
  610. return facts
  611. def set_aggregate_facts(facts):
  612. """ Set aggregate facts
  613. Args:
  614. facts (dict): existing facts
  615. Returns:
  616. dict: the facts dict updated with aggregated facts
  617. """
  618. all_hostnames = set()
  619. internal_hostnames = set()
  620. kube_svc_ip = first_ip(facts['common']['portal_net'])
  621. if 'common' in facts:
  622. all_hostnames.add(facts['common']['hostname'])
  623. all_hostnames.add(facts['common']['public_hostname'])
  624. all_hostnames.add(facts['common']['ip'])
  625. all_hostnames.add(facts['common']['public_ip'])
  626. facts['common']['kube_svc_ip'] = kube_svc_ip
  627. internal_hostnames.add(facts['common']['hostname'])
  628. internal_hostnames.add(facts['common']['ip'])
  629. cluster_domain = facts['common']['dns_domain']
  630. if 'master' in facts:
  631. if 'cluster_hostname' in facts['master']:
  632. all_hostnames.add(facts['master']['cluster_hostname'])
  633. if 'cluster_public_hostname' in facts['master']:
  634. all_hostnames.add(facts['master']['cluster_public_hostname'])
  635. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  636. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  637. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  638. all_hostnames.update(svc_names)
  639. internal_hostnames.update(svc_names)
  640. all_hostnames.add(kube_svc_ip)
  641. internal_hostnames.add(kube_svc_ip)
  642. facts['common']['all_hostnames'] = list(all_hostnames)
  643. facts['common']['internal_hostnames'] = list(internal_hostnames)
  644. return facts
  645. def set_etcd_facts_if_unset(facts):
  646. """
  647. If using embedded etcd, loads the data directory from master-config.yaml.
  648. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
  649. If anything goes wrong parsing these, the fact will not be set.
  650. """
  651. if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
  652. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  653. if 'etcd_data_dir' not in etcd_facts:
  654. try:
  655. # Parse master config to find actual etcd data dir:
  656. master_cfg_path = os.path.join(facts['common']['config_base'],
  657. 'master/master-config.yaml')
  658. master_cfg_f = open(master_cfg_path, 'r')
  659. config = yaml.safe_load(master_cfg_f.read())
  660. master_cfg_f.close()
  661. etcd_facts['etcd_data_dir'] = \
  662. config['etcdConfig']['storageDirectory']
  663. facts['etcd'] = etcd_facts
  664. # We don't want exceptions bubbling up here:
  665. # pylint: disable=broad-except
  666. except Exception:
  667. pass
  668. else:
  669. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  670. # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
  671. try:
  672. # Add a fake section for parsing:
  673. ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
  674. ini_fp = io.StringIO(ini_str)
  675. config = configparser.RawConfigParser()
  676. config.readfp(ini_fp)
  677. etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
  678. if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
  679. etcd_data_dir = etcd_data_dir[1:-1]
  680. etcd_facts['etcd_data_dir'] = etcd_data_dir
  681. facts['etcd'] = etcd_facts
  682. # We don't want exceptions bubbling up here:
  683. # pylint: disable=broad-except
  684. except Exception:
  685. pass
  686. return facts
  687. def set_deployment_facts_if_unset(facts):
  688. """ Set Facts that vary based on deployment_type. This currently
  689. includes common.service_type, master.registry_url, node.registry_url,
  690. node.storage_plugin_deps
  691. Args:
  692. facts (dict): existing facts
  693. Returns:
  694. dict: the facts dict updated with the generated deployment_type
  695. facts
  696. """
  697. # disabled to avoid breaking up facts related to deployment type into
  698. # multiple methods for now.
  699. # pylint: disable=too-many-statements, too-many-branches
  700. if 'common' in facts:
  701. deployment_type = facts['common']['deployment_type']
  702. if 'service_type' not in facts['common']:
  703. service_type = 'atomic-openshift'
  704. if deployment_type == 'origin':
  705. service_type = 'origin'
  706. elif deployment_type in ['enterprise']:
  707. service_type = 'openshift'
  708. facts['common']['service_type'] = service_type
  709. if 'docker' in facts:
  710. deployment_type = facts['common']['deployment_type']
  711. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  712. addtl_regs = facts['docker'].get('additional_registries', [])
  713. ent_reg = 'registry.access.redhat.com'
  714. if ent_reg not in addtl_regs:
  715. facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
  716. for role in ('master', 'node'):
  717. if role in facts:
  718. deployment_type = facts['common']['deployment_type']
  719. if 'registry_url' not in facts[role]:
  720. registry_url = 'openshift/origin-${component}:${version}'
  721. if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
  722. registry_url = 'openshift3/ose-${component}:${version}'
  723. elif deployment_type == 'atomic-enterprise':
  724. registry_url = 'aep3_beta/aep-${component}:${version}'
  725. facts[role]['registry_url'] = registry_url
  726. if 'master' in facts:
  727. deployment_type = facts['common']['deployment_type']
  728. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  729. if 'disabled_features' in facts['master']:
  730. if deployment_type == 'atomic-enterprise':
  731. curr_disabled_features = set(facts['master']['disabled_features'])
  732. facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
  733. else:
  734. if facts['common']['deployment_subtype'] == 'registry':
  735. facts['master']['disabled_features'] = openshift_features
  736. if 'node' in facts:
  737. deployment_type = facts['common']['deployment_type']
  738. if 'storage_plugin_deps' not in facts['node']:
  739. if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
  740. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  741. else:
  742. facts['node']['storage_plugin_deps'] = []
  743. return facts
  744. # pylint: disable=too-many-statements
  745. def set_version_facts_if_unset(facts):
  746. """ Set version facts. This currently includes common.version and
  747. common.version_gte_3_1_or_1_1.
  748. Args:
  749. facts (dict): existing facts
  750. Returns:
  751. dict: the facts dict updated with version facts.
  752. """
  753. if 'common' in facts:
  754. deployment_type = facts['common']['deployment_type']
  755. openshift_version = get_openshift_version(facts)
  756. if openshift_version and openshift_version != "latest":
  757. version = LooseVersion(openshift_version)
  758. facts['common']['version'] = openshift_version
  759. facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
  760. if deployment_type == 'origin':
  761. version_gte_3_1_or_1_1 = version >= LooseVersion('1.1.0')
  762. version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('1.1.1')
  763. version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
  764. version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
  765. version_gte_3_4_or_1_4 = version >= LooseVersion('1.4')
  766. version_gte_3_5_or_1_5 = version >= LooseVersion('1.5')
  767. version_gte_3_6 = version >= LooseVersion('3.6')
  768. else:
  769. version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
  770. version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
  771. version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
  772. version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
  773. version_gte_3_4_or_1_4 = version >= LooseVersion('3.4')
  774. version_gte_3_5_or_1_5 = version >= LooseVersion('3.5')
  775. version_gte_3_6 = version >= LooseVersion('3.6')
  776. else:
  777. # 'Latest' version is set to True, 'Next' versions set to False
  778. version_gte_3_1_or_1_1 = True
  779. version_gte_3_1_1_or_1_1_1 = True
  780. version_gte_3_2_or_1_2 = True
  781. version_gte_3_3_or_1_3 = True
  782. version_gte_3_4_or_1_4 = True
  783. version_gte_3_5_or_1_5 = True
  784. version_gte_3_6 = True
  785. facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
  786. facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
  787. facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
  788. facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
  789. facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
  790. facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
  791. facts['common']['version_gte_3_6'] = version_gte_3_6
  792. if version_gte_3_6:
  793. examples_content_version = 'v3.6'
  794. elif version_gte_3_5_or_1_5:
  795. examples_content_version = 'v1.5'
  796. elif version_gte_3_4_or_1_4:
  797. examples_content_version = 'v1.4'
  798. elif version_gte_3_3_or_1_3:
  799. examples_content_version = 'v1.3'
  800. elif version_gte_3_2_or_1_2:
  801. examples_content_version = 'v1.2'
  802. elif version_gte_3_1_or_1_1:
  803. examples_content_version = 'v1.1'
  804. else:
  805. examples_content_version = 'v1.0'
  806. facts['common']['examples_content_version'] = examples_content_version
  807. return facts
  808. def set_manageiq_facts_if_unset(facts):
  809. """ Set manageiq facts. This currently includes common.use_manageiq.
  810. Args:
  811. facts (dict): existing facts
  812. Returns:
  813. dict: the facts dict updated with version facts.
  814. Raises:
  815. OpenShiftFactsInternalError:
  816. """
  817. if 'common' not in facts:
  818. if 'version_gte_3_1_or_1_1' not in facts['common']:
  819. raise OpenShiftFactsInternalError(
  820. "Invalid invocation: The required facts are not set"
  821. )
  822. if 'use_manageiq' not in facts['common']:
  823. facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
  824. return facts
  825. def set_sdn_facts_if_unset(facts, system_facts):
  826. """ Set sdn facts if not already present in facts dict
  827. Args:
  828. facts (dict): existing facts
  829. system_facts (dict): ansible_facts
  830. Returns:
  831. dict: the facts dict updated with the generated sdn facts if they
  832. were not already present
  833. """
  834. # pylint: disable=too-many-branches
  835. if 'common' in facts:
  836. use_sdn = facts['common']['use_openshift_sdn']
  837. if not (use_sdn == '' or isinstance(use_sdn, bool)):
  838. use_sdn = safe_get_bool(use_sdn)
  839. facts['common']['use_openshift_sdn'] = use_sdn
  840. if 'sdn_network_plugin_name' not in facts['common']:
  841. plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
  842. facts['common']['sdn_network_plugin_name'] = plugin
  843. if 'master' in facts:
  844. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  845. # these might be overridden if they exist in the master config file
  846. sdn_cluster_network_cidr = '10.128.0.0/14'
  847. sdn_host_subnet_length = '9'
  848. master_cfg_path = os.path.join(facts['common']['config_base'],
  849. 'master/master-config.yaml')
  850. if os.path.isfile(master_cfg_path):
  851. with open(master_cfg_path, 'r') as master_cfg_f:
  852. config = yaml.safe_load(master_cfg_f.read())
  853. if 'networkConfig' in config:
  854. if 'clusterNetworkCIDR' in config['networkConfig']:
  855. sdn_cluster_network_cidr = \
  856. config['networkConfig']['clusterNetworkCIDR']
  857. if 'hostSubnetLength' in config['networkConfig']:
  858. sdn_host_subnet_length = \
  859. config['networkConfig']['hostSubnetLength']
  860. if 'sdn_cluster_network_cidr' not in facts['master']:
  861. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  862. if 'sdn_host_subnet_length' not in facts['master']:
  863. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  864. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  865. node_ip = facts['common']['ip']
  866. # default MTU if interface MTU cannot be detected
  867. facts['node']['sdn_mtu'] = '1450'
  868. for val in itervalues(system_facts):
  869. if isinstance(val, dict) and 'mtu' in val:
  870. mtu = val['mtu']
  871. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  872. facts['node']['sdn_mtu'] = str(mtu - 50)
  873. return facts
  874. def set_nodename(facts):
  875. """ set nodename """
  876. if 'node' in facts and 'common' in facts:
  877. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
  878. facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
  879. # TODO: The openstack cloudprovider nodename setting was too opinionaed.
  880. # It needs to be generalized before it can be enabled again.
  881. # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  882. # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  883. else:
  884. facts['node']['nodename'] = facts['common']['hostname'].lower()
  885. return facts
  886. def migrate_oauth_template_facts(facts):
  887. """
  888. Migrate an old oauth template fact to a newer format if it's present.
  889. The legacy 'oauth_template' fact was just a filename, and assumed you were
  890. setting the 'login' template.
  891. The new pluralized 'oauth_templates' fact is a dict mapping the template
  892. name to a filename.
  893. Simplify the code after this by merging the old fact into the new.
  894. """
  895. if 'master' in facts and 'oauth_template' in facts['master']:
  896. if 'oauth_templates' not in facts['master']:
  897. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  898. elif 'login' not in facts['master']['oauth_templates']:
  899. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  900. return facts
  901. def format_url(use_ssl, hostname, port, path=''):
  902. """ Format url based on ssl flag, hostname, port and path
  903. Args:
  904. use_ssl (bool): is ssl enabled
  905. hostname (str): hostname
  906. port (str): port
  907. path (str): url path
  908. Returns:
  909. str: The generated url string
  910. """
  911. scheme = 'https' if use_ssl else 'http'
  912. netloc = hostname
  913. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  914. netloc += ":%s" % port
  915. try:
  916. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  917. except AttributeError:
  918. # pylint: disable=undefined-variable
  919. url = urlunparse((scheme, netloc, path, '', '', ''))
  920. return url
  921. def get_current_config(facts):
  922. """ Get current openshift config
  923. Args:
  924. facts (dict): existing facts
  925. Returns:
  926. dict: the facts dict updated with the current openshift config
  927. """
  928. current_config = dict()
  929. roles = [role for role in facts if role not in ['common', 'provider']]
  930. for role in roles:
  931. if 'roles' in current_config:
  932. current_config['roles'].append(role)
  933. else:
  934. current_config['roles'] = [role]
  935. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  936. # determine the location of files.
  937. # TODO: I suspect this isn't working right now, but it doesn't prevent
  938. # anything from working properly as far as I can tell, perhaps because
  939. # we override the kubeconfig path everywhere we use it?
  940. # Query kubeconfig settings
  941. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  942. if role == 'node':
  943. kubeconfig_dir = os.path.join(
  944. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  945. )
  946. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  947. if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
  948. try:
  949. _, output, _ = module.run_command( # noqa: F405
  950. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  951. "json", "--kubeconfig=%s" % kubeconfig_path],
  952. check_rc=False
  953. )
  954. config = json.loads(output)
  955. cad = 'certificate-authority-data'
  956. try:
  957. for cluster in config['clusters']:
  958. config['clusters'][cluster][cad] = 'masked'
  959. except KeyError:
  960. pass
  961. try:
  962. for user in config['users']:
  963. config['users'][user][cad] = 'masked'
  964. config['users'][user]['client-key-data'] = 'masked'
  965. except KeyError:
  966. pass
  967. current_config['kubeconfig'] = config
  968. # override pylint broad-except warning, since we do not want
  969. # to bubble up any exceptions if oc config view
  970. # fails
  971. # pylint: disable=broad-except
  972. except Exception:
  973. pass
  974. return current_config
  975. def build_kubelet_args(facts):
  976. """Build node kubelet_args
  977. In the node-config.yaml file, kubeletArgument sub-keys have their
  978. values provided as a list. Hence the gratuitous use of ['foo'] below.
  979. """
  980. cloud_cfg_path = os.path.join(
  981. facts['common']['config_base'],
  982. 'cloudprovider')
  983. # We only have to do this stuff on hosts that are nodes
  984. if 'node' in facts:
  985. # Any changes to the kubeletArguments parameter are stored
  986. # here first.
  987. kubelet_args = {}
  988. if 'cloudprovider' in facts:
  989. # EVERY cloud is special <3
  990. if 'kind' in facts['cloudprovider']:
  991. if facts['cloudprovider']['kind'] == 'aws':
  992. kubelet_args['cloud-provider'] = ['aws']
  993. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  994. if facts['cloudprovider']['kind'] == 'openstack':
  995. kubelet_args['cloud-provider'] = ['openstack']
  996. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  997. if facts['cloudprovider']['kind'] == 'gce':
  998. kubelet_args['cloud-provider'] = ['gce']
  999. kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1000. # Automatically add node-labels to the kubeletArguments
  1001. # parameter. See BZ1359848 for additional details.
  1002. #
  1003. # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
  1004. if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
  1005. # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
  1006. # into ['foo=bar', 'a=b']
  1007. #
  1008. # On the openshift_node_labels inventory variable we loop
  1009. # over each key-value tuple (from .items()) and join the
  1010. # key to the value with an '=' character, this produces a
  1011. # list.
  1012. #
  1013. # map() seems to be returning an itertools.imap object
  1014. # instead of a list. We cast it to a list ourselves.
  1015. # pylint: disable=unnecessary-lambda
  1016. labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
  1017. if labels_str != '':
  1018. kubelet_args['node-labels'] = labels_str
  1019. # If we've added items to the kubelet_args dict then we need
  1020. # to merge the new items back into the main facts object.
  1021. if kubelet_args != {}:
  1022. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
  1023. return facts
  1024. def build_controller_args(facts):
  1025. """ Build master controller_args """
  1026. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  1027. 'cloudprovider')
  1028. if 'master' in facts:
  1029. controller_args = {}
  1030. if 'cloudprovider' in facts:
  1031. if 'kind' in facts['cloudprovider']:
  1032. if facts['cloudprovider']['kind'] == 'aws':
  1033. controller_args['cloud-provider'] = ['aws']
  1034. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  1035. if facts['cloudprovider']['kind'] == 'openstack':
  1036. controller_args['cloud-provider'] = ['openstack']
  1037. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  1038. if facts['cloudprovider']['kind'] == 'gce':
  1039. controller_args['cloud-provider'] = ['gce']
  1040. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1041. if controller_args != {}:
  1042. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
  1043. return facts
  1044. def build_api_server_args(facts):
  1045. """ Build master api_server_args """
  1046. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  1047. 'cloudprovider')
  1048. if 'master' in facts:
  1049. api_server_args = {}
  1050. if 'cloudprovider' in facts:
  1051. if 'kind' in facts['cloudprovider']:
  1052. if facts['cloudprovider']['kind'] == 'aws':
  1053. api_server_args['cloud-provider'] = ['aws']
  1054. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  1055. if facts['cloudprovider']['kind'] == 'openstack':
  1056. api_server_args['cloud-provider'] = ['openstack']
  1057. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  1058. if facts['cloudprovider']['kind'] == 'gce':
  1059. api_server_args['cloud-provider'] = ['gce']
  1060. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1061. if api_server_args != {}:
  1062. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
  1063. return facts
  1064. def is_service_running(service):
  1065. """ Queries systemd through dbus to see if the service is running """
  1066. service_running = False
  1067. try:
  1068. bus = SystemBus()
  1069. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  1070. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  1071. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  1072. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  1073. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  1074. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  1075. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  1076. if service_load_state == 'loaded' and service_active_state == 'active':
  1077. service_running = True
  1078. except DBusException:
  1079. # TODO: do not swallow exception, as it may be hiding useful debugging
  1080. # information.
  1081. pass
  1082. return service_running
  1083. def rpm_rebuilddb():
  1084. """
  1085. Runs rpm --rebuilddb to ensure the db is in good shape.
  1086. """
  1087. module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
  1088. def get_version_output(binary, version_cmd):
  1089. """ runs and returns the version output for a command """
  1090. cmd = []
  1091. for item in (binary, version_cmd):
  1092. if isinstance(item, list):
  1093. cmd.extend(item)
  1094. else:
  1095. cmd.append(item)
  1096. if os.path.isfile(cmd[0]):
  1097. _, output, _ = module.run_command(cmd) # noqa: F405
  1098. return output
  1099. def get_docker_version_info():
  1100. """ Parses and returns the docker version info """
  1101. result = None
  1102. if is_service_running('docker') or is_service_running('container-engine'):
  1103. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  1104. if 'Server' in version_info:
  1105. result = {
  1106. 'api_version': version_info['Server']['API version'],
  1107. 'version': version_info['Server']['Version']
  1108. }
  1109. return result
  1110. def get_hosted_registry_insecure():
  1111. """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
  1112. registry is currently insecure.
  1113. """
  1114. hosted_registry_insecure = None
  1115. if os.path.exists('/etc/sysconfig/docker'):
  1116. try:
  1117. ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
  1118. ini_fp = io.StringIO(ini_str)
  1119. config = configparser.RawConfigParser()
  1120. config.readfp(ini_fp)
  1121. options = config.get('root', 'OPTIONS')
  1122. if 'insecure-registry' in options:
  1123. hosted_registry_insecure = True
  1124. except Exception: # pylint: disable=broad-except
  1125. pass
  1126. return hosted_registry_insecure
  1127. def get_openshift_version(facts):
  1128. """ Get current version of openshift on the host.
  1129. Checks a variety of ways ranging from fastest to slowest.
  1130. Args:
  1131. facts (dict): existing facts
  1132. optional cli_image for pulling the version number
  1133. Returns:
  1134. version: the current openshift version
  1135. """
  1136. version = None
  1137. # No need to run this method repeatedly on a system if we already know the
  1138. # version
  1139. # TODO: We need a way to force reload this after upgrading bits.
  1140. if 'common' in facts:
  1141. if 'version' in facts['common'] and facts['common']['version'] is not None:
  1142. return chomp_commit_offset(facts['common']['version'])
  1143. if os.path.isfile('/usr/bin/openshift'):
  1144. _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
  1145. version = parse_openshift_version(output)
  1146. elif 'common' in facts and 'is_containerized' in facts['common']:
  1147. version = get_container_openshift_version(facts)
  1148. # Handle containerized masters that have not yet been configured as a node.
  1149. # This can be very slow and may get re-run multiple times, so we only use this
  1150. # if other methods failed to find a version.
  1151. if not version and os.path.isfile('/usr/local/bin/openshift'):
  1152. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
  1153. version = parse_openshift_version(output)
  1154. return chomp_commit_offset(version)
  1155. def chomp_commit_offset(version):
  1156. """Chomp any "+git.foo" commit offset string from the given `version`
  1157. and return the modified version string.
  1158. Ex:
  1159. - chomp_commit_offset(None) => None
  1160. - chomp_commit_offset(1337) => "1337"
  1161. - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
  1162. - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
  1163. - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
  1164. """
  1165. if version is None:
  1166. return version
  1167. else:
  1168. # Stringify, just in case it's a Number type. Split by '+' and
  1169. # return the first split. No concerns about strings without a
  1170. # '+', .split() returns an array of the original string.
  1171. return str(version).split('+')[0]
  1172. def get_container_openshift_version(facts):
  1173. """
  1174. If containerized, see if we can determine the installed version via the
  1175. systemd environment files.
  1176. """
  1177. for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
  1178. env_path = filename % facts['common']['service_type']
  1179. if not os.path.exists(env_path):
  1180. continue
  1181. with open(env_path) as env_file:
  1182. for line in env_file:
  1183. if line.startswith("IMAGE_VERSION="):
  1184. tag = line[len("IMAGE_VERSION="):].strip()
  1185. # Remove leading "v" and any trailing release info, we just want
  1186. # a version number here:
  1187. no_v_version = tag[1:] if tag[0] == 'v' else tag
  1188. version = no_v_version.split("-")[0]
  1189. return version
  1190. return None
  1191. def parse_openshift_version(output):
  1192. """ Apply provider facts to supplied facts dict
  1193. Args:
  1194. string: output of 'openshift version'
  1195. Returns:
  1196. string: the version number
  1197. """
  1198. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  1199. ver = versions.get('openshift', '')
  1200. # Remove trailing build number and commit hash from older versions, we need to return a straight
  1201. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  1202. ver = ver.split('-')[0]
  1203. return ver
  1204. def apply_provider_facts(facts, provider_facts):
  1205. """ Apply provider facts to supplied facts dict
  1206. Args:
  1207. facts (dict): facts dict to update
  1208. provider_facts (dict): provider facts to apply
  1209. roles: host roles
  1210. Returns:
  1211. dict: the merged facts
  1212. """
  1213. if not provider_facts:
  1214. return facts
  1215. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  1216. for h_var, ip_var in common_vars:
  1217. ip_value = provider_facts['network'].get(ip_var)
  1218. if ip_value:
  1219. facts['common'][ip_var] = ip_value
  1220. facts['common'][h_var] = choose_hostname(
  1221. [provider_facts['network'].get(h_var)],
  1222. facts['common'][h_var]
  1223. )
  1224. facts['provider'] = provider_facts
  1225. return facts
  1226. # Disabling pylint too many branches. This function needs refactored
  1227. # but is a very core part of openshift_facts.
  1228. # pylint: disable=too-many-branches, too-many-nested-blocks
  1229. def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
  1230. """ Recursively merge facts dicts
  1231. Args:
  1232. orig (dict): existing facts
  1233. new (dict): facts to update
  1234. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1235. '.' notation ex: ['master.named_certificates']
  1236. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1237. '.' notation ex: ['master.master_count']
  1238. Returns:
  1239. dict: the merged facts
  1240. """
  1241. additive_facts = ['named_certificates']
  1242. protected_facts = ['ha']
  1243. # Facts we do not ever want to merge. These originate in inventory variables
  1244. # and contain JSON dicts. We don't ever want to trigger a merge
  1245. # here, just completely overwrite with the new if they are present there.
  1246. inventory_json_facts = ['admission_plugin_config',
  1247. 'kube_admission_plugin_config',
  1248. 'image_policy_config',
  1249. "builddefaults",
  1250. "buildoverrides"]
  1251. facts = dict()
  1252. for key, value in iteritems(orig):
  1253. # Key exists in both old and new facts.
  1254. if key in new:
  1255. if key in inventory_json_facts:
  1256. # Watchout for JSON facts that sometimes load as strings.
  1257. # (can happen if the JSON contains a boolean)
  1258. if isinstance(new[key], string_types):
  1259. facts[key] = yaml.safe_load(new[key])
  1260. else:
  1261. facts[key] = copy.deepcopy(new[key])
  1262. # Continue to recurse if old and new fact is a dictionary.
  1263. elif isinstance(value, dict) and isinstance(new[key], dict):
  1264. # Collect the subset of additive facts to overwrite if
  1265. # key matches. These will be passed to the subsequent
  1266. # merge_facts call.
  1267. relevant_additive_facts = []
  1268. for item in additive_facts_to_overwrite:
  1269. if '.' in item and item.startswith(key + '.'):
  1270. relevant_additive_facts.append(item)
  1271. # Collect the subset of protected facts to overwrite
  1272. # if key matches. These will be passed to the
  1273. # subsequent merge_facts call.
  1274. relevant_protected_facts = []
  1275. for item in protected_facts_to_overwrite:
  1276. if '.' in item and item.startswith(key + '.'):
  1277. relevant_protected_facts.append(item)
  1278. facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
  1279. # Key matches an additive fact and we are not overwriting
  1280. # it so we will append the new value to the existing value.
  1281. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  1282. if isinstance(value, list) and isinstance(new[key], list):
  1283. new_fact = []
  1284. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  1285. if item not in new_fact:
  1286. new_fact.append(item)
  1287. facts[key] = new_fact
  1288. # Key matches a protected fact and we are not overwriting
  1289. # it so we will determine if it is okay to change this
  1290. # fact.
  1291. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
  1292. # ha (bool) can not change unless it has been passed
  1293. # as a protected fact to overwrite.
  1294. if key == 'ha':
  1295. if safe_get_bool(value) != safe_get_bool(new[key]):
  1296. # pylint: disable=line-too-long
  1297. module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
  1298. else:
  1299. facts[key] = value
  1300. # No other condition has been met. Overwrite the old fact
  1301. # with the new value.
  1302. else:
  1303. facts[key] = copy.deepcopy(new[key])
  1304. # Key isn't in new so add it to facts to keep it.
  1305. else:
  1306. facts[key] = copy.deepcopy(value)
  1307. new_keys = set(new.keys()) - set(orig.keys())
  1308. for key in new_keys:
  1309. # Watchout for JSON facts that sometimes load as strings.
  1310. # (can happen if the JSON contains a boolean)
  1311. if key in inventory_json_facts and isinstance(new[key], string_types):
  1312. facts[key] = yaml.safe_load(new[key])
  1313. else:
  1314. facts[key] = copy.deepcopy(new[key])
  1315. return facts
  1316. def save_local_facts(filename, facts):
  1317. """ Save local facts
  1318. Args:
  1319. filename (str): local facts file
  1320. facts (dict): facts to set
  1321. """
  1322. try:
  1323. fact_dir = os.path.dirname(filename)
  1324. try:
  1325. os.makedirs(fact_dir) # try to make the directory
  1326. except OSError as exception:
  1327. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  1328. raise # pass any other exceptions up the chain
  1329. with open(filename, 'w') as fact_file:
  1330. fact_file.write(module.jsonify(facts)) # noqa: F405
  1331. os.chmod(filename, 0o600)
  1332. except (IOError, OSError) as ex:
  1333. raise OpenShiftFactsFileWriteError(
  1334. "Could not create fact file: %s, error: %s" % (filename, ex)
  1335. )
  1336. def get_local_facts_from_file(filename):
  1337. """ Retrieve local facts from fact file
  1338. Args:
  1339. filename (str): local facts file
  1340. Returns:
  1341. dict: the retrieved facts
  1342. """
  1343. local_facts = dict()
  1344. try:
  1345. # Handle conversion of INI style facts file to json style
  1346. ini_facts = configparser.SafeConfigParser()
  1347. ini_facts.read(filename)
  1348. for section in ini_facts.sections():
  1349. local_facts[section] = dict()
  1350. for key, value in ini_facts.items(section):
  1351. local_facts[section][key] = value
  1352. except (configparser.MissingSectionHeaderError,
  1353. configparser.ParsingError):
  1354. try:
  1355. with open(filename, 'r') as facts_file:
  1356. local_facts = json.load(facts_file)
  1357. except (ValueError, IOError):
  1358. pass
  1359. return local_facts
  1360. def sort_unique(alist):
  1361. """ Sorts and de-dupes a list
  1362. Args:
  1363. list: a list
  1364. Returns:
  1365. list: a sorted de-duped list
  1366. """
  1367. return sorted(list(set(alist)))
  1368. def safe_get_bool(fact):
  1369. """ Get a boolean fact safely.
  1370. Args:
  1371. facts: fact to convert
  1372. Returns:
  1373. bool: given fact as a bool
  1374. """
  1375. return bool(strtobool(str(fact)))
  1376. def set_proxy_facts(facts):
  1377. """ Set global proxy facts
  1378. Args:
  1379. facts(dict): existing facts
  1380. Returns:
  1381. facts(dict): Updated facts with missing values
  1382. """
  1383. if 'common' in facts:
  1384. common = facts['common']
  1385. ######################################################################
  1386. # We can exit early now if we don't need to set any proxy facts
  1387. proxy_params = ['no_proxy', 'https_proxy', 'http_proxy']
  1388. # If any of the known Proxy Params (pp) are defined
  1389. proxy_settings_defined = any(
  1390. [True for pp in proxy_params if pp in common]
  1391. )
  1392. if not proxy_settings_defined:
  1393. return facts
  1394. # As of 3.6 if ANY of the proxy parameters are defined in the
  1395. # inventory then we MUST add certain domains to the NO_PROXY
  1396. # environment variable.
  1397. ######################################################################
  1398. # Spot to build up some data we may insert later
  1399. raw_no_proxy_list = []
  1400. # Automatic 3.6 NO_PROXY additions if a proxy is in use
  1401. svc_cluster_name = ['.svc', '.' + common['dns_domain'], common['hostname']]
  1402. # auto_hosts: Added to NO_PROXY list if any proxy params are
  1403. # set in the inventory. This a list of the FQDNs of all
  1404. # cluster hosts:
  1405. auto_hosts = common['no_proxy_internal_hostnames'].split(',')
  1406. # custom_no_proxy_hosts: If you define openshift_no_proxy in
  1407. # inventory we automatically add those hosts to the list:
  1408. if 'no_proxy' in common:
  1409. custom_no_proxy_hosts = common['no_proxy'].split(',')
  1410. else:
  1411. custom_no_proxy_hosts = []
  1412. # This should exist no matter what. Defaults to true.
  1413. if 'generate_no_proxy_hosts' in common:
  1414. generate_no_proxy_hosts = safe_get_bool(common['generate_no_proxy_hosts'])
  1415. ######################################################################
  1416. # You set a proxy var. Now we are obliged to add some things
  1417. raw_no_proxy_list = svc_cluster_name + custom_no_proxy_hosts
  1418. # You did not turn openshift_generate_no_proxy_hosts to False
  1419. if generate_no_proxy_hosts:
  1420. raw_no_proxy_list.extend(auto_hosts)
  1421. ######################################################################
  1422. # Was anything actually added? There should be something by now.
  1423. processed_no_proxy_list = sort_unique(raw_no_proxy_list)
  1424. if processed_no_proxy_list != list():
  1425. common['no_proxy'] = ','.join(processed_no_proxy_list)
  1426. else:
  1427. # Somehow we got an empty list. This should have been
  1428. # skipped by now in the 'return' earlier. If
  1429. # common['no_proxy'] is DEFINED it will cause unexpected
  1430. # behavior and bad templating. Ensure it does not
  1431. # exist. Even an empty list or string will have undesired
  1432. # side-effects.
  1433. del common['no_proxy']
  1434. ######################################################################
  1435. # In case you were wondering, because 'common' is a reference
  1436. # to the object facts['common'], there is no need to re-assign
  1437. # it.
  1438. return facts
  1439. def set_builddefaults_facts(facts):
  1440. """ Set build defaults including setting proxy values from http_proxy, https_proxy,
  1441. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1442. 1. http_proxy, https_proxy, no_proxy
  1443. 2. builddefaults_*
  1444. 3. builddefaults_git_*
  1445. Args:
  1446. facts(dict): existing facts
  1447. Returns:
  1448. facts(dict): Updated facts with missing values
  1449. """
  1450. if 'builddefaults' in facts:
  1451. builddefaults = facts['builddefaults']
  1452. common = facts['common']
  1453. # Copy values from common to builddefaults
  1454. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1455. builddefaults['http_proxy'] = common['http_proxy']
  1456. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1457. builddefaults['https_proxy'] = common['https_proxy']
  1458. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1459. builddefaults['no_proxy'] = common['no_proxy']
  1460. # Create git specific facts from generic values, if git specific values are
  1461. # not defined.
  1462. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1463. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1464. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1465. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1466. if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
  1467. builddefaults['git_no_proxy'] = builddefaults['no_proxy']
  1468. # If we're actually defining a builddefaults config then create admission_plugin_config
  1469. # then merge builddefaults[config] structure into admission_plugin_config
  1470. if 'config' in builddefaults:
  1471. if 'admission_plugin_config' not in facts['master']:
  1472. facts['master']['admission_plugin_config'] = dict()
  1473. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  1474. # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
  1475. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
  1476. return facts
  1477. def delete_empty_keys(keylist):
  1478. """ Delete dictionary elements from keylist where "value" is empty.
  1479. Args:
  1480. keylist(list): A list of builddefault configuration envs.
  1481. Returns:
  1482. none
  1483. Example:
  1484. keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1485. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1486. {'name': 'NO_PROXY', 'value': ''}]
  1487. After calling delete_empty_keys the provided list is modified to become:
  1488. [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1489. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
  1490. """
  1491. count = 0
  1492. for i in range(0, len(keylist)):
  1493. if len(keylist[i - count]['value']) == 0:
  1494. del keylist[i - count]
  1495. count += 1
  1496. def set_buildoverrides_facts(facts):
  1497. """ Set build overrides
  1498. Args:
  1499. facts(dict): existing facts
  1500. Returns:
  1501. facts(dict): Updated facts with missing values
  1502. """
  1503. if 'buildoverrides' in facts:
  1504. buildoverrides = facts['buildoverrides']
  1505. # If we're actually defining a buildoverrides config then create admission_plugin_config
  1506. # then merge buildoverrides[config] structure into admission_plugin_config
  1507. if 'config' in buildoverrides:
  1508. if 'admission_plugin_config' not in facts['master']:
  1509. facts['master']['admission_plugin_config'] = dict()
  1510. facts['master']['admission_plugin_config'].update(buildoverrides['config'])
  1511. return facts
  1512. # pylint: disable=too-many-statements
  1513. def set_container_facts_if_unset(facts):
  1514. """ Set containerized facts.
  1515. Args:
  1516. facts (dict): existing facts
  1517. Returns:
  1518. dict: the facts dict updated with the generated containerization
  1519. facts
  1520. """
  1521. deployment_type = facts['common']['deployment_type']
  1522. if deployment_type in ['enterprise', 'openshift-enterprise']:
  1523. master_image = 'openshift3/ose'
  1524. cli_image = master_image
  1525. node_image = 'openshift3/node'
  1526. ovs_image = 'openshift3/openvswitch'
  1527. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1528. pod_image = 'openshift3/ose-pod'
  1529. router_image = 'openshift3/ose-haproxy-router'
  1530. registry_image = 'openshift3/ose-docker-registry'
  1531. deployer_image = 'openshift3/ose-deployer'
  1532. elif deployment_type == 'atomic-enterprise':
  1533. master_image = 'aep3_beta/aep'
  1534. cli_image = master_image
  1535. node_image = 'aep3_beta/node'
  1536. ovs_image = 'aep3_beta/openvswitch'
  1537. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1538. pod_image = 'aep3_beta/aep-pod'
  1539. router_image = 'aep3_beta/aep-haproxy-router'
  1540. registry_image = 'aep3_beta/aep-docker-registry'
  1541. deployer_image = 'aep3_beta/aep-deployer'
  1542. else:
  1543. master_image = 'openshift/origin'
  1544. cli_image = master_image
  1545. node_image = 'openshift/node'
  1546. ovs_image = 'openshift/openvswitch'
  1547. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1548. pod_image = 'openshift/origin-pod'
  1549. router_image = 'openshift/origin-haproxy-router'
  1550. registry_image = 'openshift/origin-docker-registry'
  1551. deployer_image = 'openshift/origin-deployer'
  1552. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1553. # If openshift_docker_use_system_container is set and is True ....
  1554. if 'use_system_container' in list(facts['docker'].keys()):
  1555. if facts['docker']['use_system_container']:
  1556. # ... set the service name to container-engine
  1557. facts['docker']['service_name'] = 'container-engine'
  1558. if 'is_containerized' not in facts['common']:
  1559. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1560. if 'cli_image' not in facts['common']:
  1561. facts['common']['cli_image'] = cli_image
  1562. if 'pod_image' not in facts['common']:
  1563. facts['common']['pod_image'] = pod_image
  1564. if 'router_image' not in facts['common']:
  1565. facts['common']['router_image'] = router_image
  1566. if 'registry_image' not in facts['common']:
  1567. facts['common']['registry_image'] = registry_image
  1568. if 'deployer_image' not in facts['common']:
  1569. facts['common']['deployer_image'] = deployer_image
  1570. if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
  1571. facts['etcd']['etcd_image'] = etcd_image
  1572. if 'master' in facts and 'master_image' not in facts['master']:
  1573. facts['master']['master_image'] = master_image
  1574. facts['master']['master_system_image'] = master_image
  1575. if 'node' in facts:
  1576. if 'node_image' not in facts['node']:
  1577. facts['node']['node_image'] = node_image
  1578. facts['node']['node_system_image'] = node_image
  1579. if 'ovs_image' not in facts['node']:
  1580. facts['node']['ovs_image'] = ovs_image
  1581. facts['node']['ovs_system_image'] = ovs_image
  1582. if safe_get_bool(facts['common']['is_containerized']):
  1583. facts['common']['admin_binary'] = '/usr/local/bin/oadm'
  1584. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1585. return facts
  1586. def set_installed_variant_rpm_facts(facts):
  1587. """ Set RPM facts of installed variant
  1588. Args:
  1589. facts (dict): existing facts
  1590. Returns:
  1591. dict: the facts dict updated with installed_variant_rpms
  1592. """
  1593. installed_rpms = []
  1594. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1595. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1596. variant_rpms = [base_rpm] + \
  1597. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1598. ['tuned-profiles-%s-node' % base_rpm]
  1599. for rpm in variant_rpms:
  1600. exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
  1601. if exit_code == 0:
  1602. installed_rpms.append(rpm)
  1603. facts['common']['installed_variant_rpms'] = installed_rpms
  1604. return facts
  1605. class OpenShiftFactsInternalError(Exception):
  1606. """Origin Facts Error"""
  1607. pass
  1608. class OpenShiftFactsUnsupportedRoleError(Exception):
  1609. """Origin Facts Unsupported Role Error"""
  1610. pass
  1611. class OpenShiftFactsFileWriteError(Exception):
  1612. """Origin Facts File Write Error"""
  1613. pass
  1614. class OpenShiftFactsMetadataUnavailableError(Exception):
  1615. """Origin Facts Metadata Unavailable Error"""
  1616. pass
  1617. class OpenShiftFacts(object):
  1618. """ Origin Facts
  1619. Attributes:
  1620. facts (dict): facts for the host
  1621. Args:
  1622. module (AnsibleModule): an AnsibleModule object
  1623. role (str): role for setting local facts
  1624. filename (str): local facts file to use
  1625. local_facts (dict): local facts to set
  1626. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1627. '.' notation ex: ['master.named_certificates']
  1628. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1629. '.' notation ex: ['master.master_count']
  1630. Raises:
  1631. OpenShiftFactsUnsupportedRoleError:
  1632. """
  1633. known_roles = ['builddefaults',
  1634. 'buildoverrides',
  1635. 'clock',
  1636. 'cloudprovider',
  1637. 'common',
  1638. 'docker',
  1639. 'etcd',
  1640. 'hosted',
  1641. 'master',
  1642. 'node']
  1643. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1644. # pylint: disable=too-many-arguments,no-value-for-parameter
  1645. def __init__(self, role, filename, local_facts,
  1646. additive_facts_to_overwrite=None,
  1647. openshift_env=None,
  1648. openshift_env_structures=None,
  1649. protected_facts_to_overwrite=None):
  1650. self.changed = False
  1651. self.filename = filename
  1652. if role not in self.known_roles:
  1653. raise OpenShiftFactsUnsupportedRoleError(
  1654. "Role %s is not supported by this module" % role
  1655. )
  1656. self.role = role
  1657. # Collect system facts and preface each fact with 'ansible_'.
  1658. try:
  1659. # pylint: disable=too-many-function-args,invalid-name
  1660. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
  1661. additional_facts = {}
  1662. for (k, v) in self.system_facts.items():
  1663. additional_facts["ansible_%s" % k.replace('-', '_')] = v
  1664. self.system_facts.update(additional_facts)
  1665. except UnboundLocalError:
  1666. # ansible-2.2,2.3
  1667. self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
  1668. self.facts = self.generate_facts(local_facts,
  1669. additive_facts_to_overwrite,
  1670. openshift_env,
  1671. openshift_env_structures,
  1672. protected_facts_to_overwrite)
  1673. def generate_facts(self,
  1674. local_facts,
  1675. additive_facts_to_overwrite,
  1676. openshift_env,
  1677. openshift_env_structures,
  1678. protected_facts_to_overwrite):
  1679. """ Generate facts
  1680. Args:
  1681. local_facts (dict): local_facts for overriding generated defaults
  1682. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1683. '.' notation ex: ['master.named_certificates']
  1684. openshift_env (dict): openshift_env facts for overriding generated defaults
  1685. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1686. '.' notation ex: ['master.master_count']
  1687. Returns:
  1688. dict: The generated facts
  1689. """
  1690. local_facts = self.init_local_facts(local_facts,
  1691. additive_facts_to_overwrite,
  1692. openshift_env,
  1693. openshift_env_structures,
  1694. protected_facts_to_overwrite)
  1695. roles = local_facts.keys()
  1696. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1697. deployment_type = local_facts['common']['deployment_type']
  1698. else:
  1699. deployment_type = 'origin'
  1700. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1701. deployment_subtype = local_facts['common']['deployment_subtype']
  1702. else:
  1703. deployment_subtype = 'basic'
  1704. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1705. provider_facts = self.init_provider_facts()
  1706. facts = apply_provider_facts(defaults, provider_facts)
  1707. facts = merge_facts(facts,
  1708. local_facts,
  1709. additive_facts_to_overwrite,
  1710. protected_facts_to_overwrite)
  1711. facts = migrate_oauth_template_facts(facts)
  1712. facts['current_config'] = get_current_config(facts)
  1713. facts = set_url_facts_if_unset(facts)
  1714. facts = set_project_cfg_facts_if_unset(facts)
  1715. facts = set_flannel_facts_if_unset(facts)
  1716. facts = set_calico_facts_if_unset(facts)
  1717. facts = set_nuage_facts_if_unset(facts)
  1718. facts = set_contiv_facts_if_unset(facts)
  1719. facts = set_node_schedulability(facts)
  1720. facts = set_selectors(facts)
  1721. facts = set_identity_providers_if_unset(facts)
  1722. facts = set_deployment_facts_if_unset(facts)
  1723. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1724. facts = set_container_facts_if_unset(facts)
  1725. facts = build_kubelet_args(facts)
  1726. facts = build_controller_args(facts)
  1727. facts = build_api_server_args(facts)
  1728. facts = set_version_facts_if_unset(facts)
  1729. facts = set_dnsmasq_facts_if_unset(facts)
  1730. facts = set_manageiq_facts_if_unset(facts)
  1731. facts = set_aggregate_facts(facts)
  1732. facts = set_etcd_facts_if_unset(facts)
  1733. facts = set_proxy_facts(facts)
  1734. facts = set_builddefaults_facts(facts)
  1735. facts = set_buildoverrides_facts(facts)
  1736. if not safe_get_bool(facts['common']['is_containerized']):
  1737. facts = set_installed_variant_rpm_facts(facts)
  1738. facts = set_nodename(facts)
  1739. return dict(openshift=facts)
  1740. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1741. """ Get default fact values
  1742. Args:
  1743. roles (list): list of roles for this host
  1744. Returns:
  1745. dict: The generated default facts
  1746. """
  1747. defaults = {}
  1748. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1749. exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
  1750. hostname_f = output.strip() if exit_code == 0 else ''
  1751. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1752. self.system_facts['ansible_fqdn']]
  1753. hostname = choose_hostname(hostname_values, ip_addr)
  1754. defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
  1755. public_ip=ip_addr,
  1756. deployment_type=deployment_type,
  1757. deployment_subtype=deployment_subtype,
  1758. hostname=hostname,
  1759. public_hostname=hostname,
  1760. portal_net='172.30.0.0/16',
  1761. client_binary='oc', admin_binary='oadm',
  1762. dns_domain='cluster.local',
  1763. install_examples=True,
  1764. debug_level=2,
  1765. config_base='/etc/origin',
  1766. data_dir='/var/lib/origin')
  1767. if 'master' in roles:
  1768. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1769. controllers_port='8444',
  1770. console_use_ssl=True,
  1771. console_path='/console',
  1772. console_port='8443', etcd_use_ssl=True,
  1773. etcd_hosts='', etcd_port='4001',
  1774. portal_net='172.30.0.0/16',
  1775. embedded_etcd=True, embedded_kube=True,
  1776. embedded_dns=True,
  1777. bind_addr='0.0.0.0',
  1778. session_max_seconds=3600,
  1779. session_name='ssn',
  1780. session_secrets_file='',
  1781. access_token_max_seconds=86400,
  1782. auth_token_max_seconds=500,
  1783. oauth_grant_method='auto',
  1784. dynamic_provisioning_enabled=True,
  1785. max_requests_inflight=500)
  1786. if 'node' in roles:
  1787. defaults['node'] = dict(labels={}, annotations={},
  1788. iptables_sync_period='30s',
  1789. local_quota_per_fsgroup="",
  1790. set_node_ip=False)
  1791. if 'docker' in roles:
  1792. docker = dict(disable_push_dockerhub=False,
  1793. options='--log-driver=journald')
  1794. # NOTE: This is a workaround for a dnf output racecondition that can occur in
  1795. # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
  1796. if self.system_facts['ansible_pkg_mgr'] == 'dnf':
  1797. rpm_rebuilddb()
  1798. version_info = get_docker_version_info()
  1799. if version_info is not None:
  1800. docker['api_version'] = version_info['api_version']
  1801. docker['version'] = version_info['version']
  1802. docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
  1803. hosted_registry_insecure = get_hosted_registry_insecure()
  1804. if hosted_registry_insecure is not None:
  1805. docker['hosted_registry_insecure'] = hosted_registry_insecure
  1806. docker['service_name'] = 'docker'
  1807. defaults['docker'] = docker
  1808. if 'clock' in roles:
  1809. exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony']) # noqa: F405
  1810. chrony_installed = bool(exit_code == 0)
  1811. defaults['clock'] = dict(
  1812. enabled=True,
  1813. chrony_installed=chrony_installed)
  1814. if 'cloudprovider' in roles:
  1815. defaults['cloudprovider'] = dict(kind=None)
  1816. if 'hosted' in roles or self.role == 'hosted':
  1817. defaults['hosted'] = dict(
  1818. metrics=dict(
  1819. deploy=False,
  1820. duration=7,
  1821. resolution='10s',
  1822. storage=dict(
  1823. kind=None,
  1824. volume=dict(
  1825. name='metrics',
  1826. size='10Gi'
  1827. ),
  1828. nfs=dict(
  1829. directory='/exports',
  1830. options='*(rw,root_squash)'
  1831. ),
  1832. host=None,
  1833. access=dict(
  1834. modes=['ReadWriteOnce']
  1835. ),
  1836. create_pv=True,
  1837. create_pvc=False
  1838. )
  1839. ),
  1840. loggingops=dict(
  1841. storage=dict(
  1842. kind=None,
  1843. volume=dict(
  1844. name='logging-es-ops',
  1845. size='10Gi'
  1846. ),
  1847. nfs=dict(
  1848. directory='/exports',
  1849. options='*(rw,root_squash)'
  1850. ),
  1851. host=None,
  1852. access=dict(
  1853. modes=['ReadWriteOnce']
  1854. ),
  1855. create_pv=True,
  1856. create_pvc=False
  1857. )
  1858. ),
  1859. logging=dict(
  1860. storage=dict(
  1861. kind=None,
  1862. volume=dict(
  1863. name='logging-es',
  1864. size='10Gi'
  1865. ),
  1866. nfs=dict(
  1867. directory='/exports',
  1868. options='*(rw,root_squash)'
  1869. ),
  1870. host=None,
  1871. access=dict(
  1872. modes=['ReadWriteOnce']
  1873. ),
  1874. create_pv=True,
  1875. create_pvc=False
  1876. )
  1877. ),
  1878. etcd=dict(
  1879. storage=dict(
  1880. kind=None,
  1881. volume=dict(
  1882. name='etcd',
  1883. size='1Gi'
  1884. ),
  1885. nfs=dict(
  1886. directory='/exports',
  1887. options='*(rw,root_squash)'
  1888. ),
  1889. host=None,
  1890. access=dict(
  1891. modes=['ReadWriteOnce']
  1892. ),
  1893. create_pv=True,
  1894. create_pvc=False
  1895. )
  1896. ),
  1897. registry=dict(
  1898. storage=dict(
  1899. kind=None,
  1900. volume=dict(
  1901. name='registry',
  1902. size='5Gi'
  1903. ),
  1904. nfs=dict(
  1905. directory='/exports',
  1906. options='*(rw,root_squash)'),
  1907. glusterfs=dict(
  1908. endpoints='glusterfs-registry-endpoints',
  1909. path='glusterfs-registry-volume',
  1910. readOnly=False,
  1911. swap=False,
  1912. swapcopy=True),
  1913. host=None,
  1914. access=dict(
  1915. modes=['ReadWriteMany']
  1916. ),
  1917. create_pv=True,
  1918. create_pvc=True
  1919. )
  1920. ),
  1921. router=dict()
  1922. )
  1923. return defaults
  1924. def guess_host_provider(self):
  1925. """ Guess the host provider
  1926. Returns:
  1927. dict: The generated default facts for the detected provider
  1928. """
  1929. # TODO: cloud provider facts should probably be submitted upstream
  1930. product_name = self.system_facts['ansible_product_name']
  1931. product_version = self.system_facts['ansible_product_version']
  1932. virt_type = self.system_facts['ansible_virtualization_type']
  1933. virt_role = self.system_facts['ansible_virtualization_role']
  1934. provider = None
  1935. metadata = None
  1936. # TODO: this is not exposed through module_utils/facts.py in ansible,
  1937. # need to create PR for ansible to expose it
  1938. bios_vendor = get_file_content( # noqa: F405
  1939. '/sys/devices/virtual/dmi/id/bios_vendor'
  1940. )
  1941. if bios_vendor == 'Google':
  1942. provider = 'gce'
  1943. metadata_url = ('http://metadata.google.internal/'
  1944. 'computeMetadata/v1/?recursive=true')
  1945. headers = {'Metadata-Flavor': 'Google'}
  1946. metadata = get_provider_metadata(metadata_url, True, headers,
  1947. True)
  1948. # Filter sshKeys and serviceAccounts from gce metadata
  1949. if metadata:
  1950. metadata['project']['attributes'].pop('sshKeys', None)
  1951. metadata['instance'].pop('serviceAccounts', None)
  1952. elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
  1953. provider = 'aws'
  1954. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1955. metadata = get_provider_metadata(metadata_url)
  1956. elif re.search(r'OpenStack', product_name):
  1957. provider = 'openstack'
  1958. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1959. 'meta_data.json')
  1960. metadata = get_provider_metadata(metadata_url, True, None,
  1961. True)
  1962. if metadata:
  1963. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1964. metadata['ec2_compat'] = get_provider_metadata(
  1965. ec2_compat_url
  1966. )
  1967. # disable pylint maybe-no-member because overloaded use of
  1968. # the module name causes pylint to not detect that results
  1969. # is an array or hash
  1970. # pylint: disable=maybe-no-member
  1971. # Filter public_keys and random_seed from openstack metadata
  1972. metadata.pop('public_keys', None)
  1973. metadata.pop('random_seed', None)
  1974. if not metadata['ec2_compat']:
  1975. metadata = None
  1976. return dict(name=provider, metadata=metadata)
  1977. def init_provider_facts(self):
  1978. """ Initialize the provider facts
  1979. Returns:
  1980. dict: The normalized provider facts
  1981. """
  1982. provider_info = self.guess_host_provider()
  1983. provider_facts = normalize_provider_facts(
  1984. provider_info.get('name'),
  1985. provider_info.get('metadata')
  1986. )
  1987. return provider_facts
  1988. @staticmethod
  1989. def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
  1990. """ Split openshift_env facts based on openshift_env structures.
  1991. Args:
  1992. openshift_env_fact (string): the openshift_env fact to split
  1993. ex: 'openshift_cloudprovider_openstack_auth_url'
  1994. openshift_env_structures (list): a list of structures to determine fact keys
  1995. ex: ['openshift.cloudprovider.openstack.*']
  1996. Returns:
  1997. list: a list of keys that represent the fact
  1998. ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
  1999. """
  2000. # By default, we'll split an openshift_env fact by underscores.
  2001. fact_keys = openshift_env_fact.split('_')
  2002. # Determine if any of the provided variable structures match the fact.
  2003. matching_structure = None
  2004. if openshift_env_structures is not None:
  2005. for structure in openshift_env_structures:
  2006. if re.match(structure, openshift_env_fact):
  2007. matching_structure = structure
  2008. # Fact didn't match any variable structures so return the default fact keys.
  2009. if matching_structure is None:
  2010. return fact_keys
  2011. final_keys = []
  2012. structure_keys = matching_structure.split('.')
  2013. for structure_key in structure_keys:
  2014. # Matched current key. Add to final keys.
  2015. if structure_key == fact_keys[structure_keys.index(structure_key)]:
  2016. final_keys.append(structure_key)
  2017. # Wildcard means we will be taking everything from here to the end of the fact.
  2018. elif structure_key == '*':
  2019. final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
  2020. # Shouldn't have gotten here, return the fact keys.
  2021. else:
  2022. return fact_keys
  2023. return final_keys
  2024. # Disabling too-many-branches and too-many-locals.
  2025. # This should be cleaned up as a TODO item.
  2026. # pylint: disable=too-many-branches, too-many-locals
  2027. def init_local_facts(self, facts=None,
  2028. additive_facts_to_overwrite=None,
  2029. openshift_env=None,
  2030. openshift_env_structures=None,
  2031. protected_facts_to_overwrite=None):
  2032. """ Initialize the local facts
  2033. Args:
  2034. facts (dict): local facts to set
  2035. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  2036. '.' notation ex: ['master.named_certificates']
  2037. openshift_env (dict): openshift env facts to set
  2038. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  2039. '.' notation ex: ['master.master_count']
  2040. Returns:
  2041. dict: The result of merging the provided facts with existing
  2042. local facts
  2043. """
  2044. changed = False
  2045. facts_to_set = dict()
  2046. if facts is not None:
  2047. facts_to_set[self.role] = facts
  2048. if openshift_env != {} and openshift_env is not None:
  2049. for fact, value in iteritems(openshift_env):
  2050. oo_env_facts = dict()
  2051. current_level = oo_env_facts
  2052. keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
  2053. if len(keys) > 0 and keys[0] != self.role:
  2054. continue
  2055. for key in keys:
  2056. if key == keys[-1]:
  2057. current_level[key] = value
  2058. elif key not in current_level:
  2059. current_level[key] = dict()
  2060. current_level = current_level[key]
  2061. facts_to_set = merge_facts(orig=facts_to_set,
  2062. new=oo_env_facts,
  2063. additive_facts_to_overwrite=[],
  2064. protected_facts_to_overwrite=[])
  2065. local_facts = get_local_facts_from_file(self.filename)
  2066. migrated_facts = migrate_local_facts(local_facts)
  2067. new_local_facts = merge_facts(migrated_facts,
  2068. facts_to_set,
  2069. additive_facts_to_overwrite,
  2070. protected_facts_to_overwrite)
  2071. if 'docker' in new_local_facts:
  2072. # remove duplicate and empty strings from registry lists, preserving order
  2073. for cat in ['additional', 'blocked', 'insecure']:
  2074. key = '{0}_registries'.format(cat)
  2075. if key in new_local_facts['docker']:
  2076. val = new_local_facts['docker'][key]
  2077. if isinstance(val, string_types):
  2078. val = [x.strip() for x in val.split(',')]
  2079. seen = set()
  2080. new_local_facts['docker'][key] = list()
  2081. for registry in val:
  2082. if registry not in seen and registry != '':
  2083. seen.add(registry)
  2084. new_local_facts['docker'][key].append(registry)
  2085. # Convert legacy log_options comma sep string to a list if present:
  2086. if 'log_options' in new_local_facts['docker'] and \
  2087. isinstance(new_local_facts['docker']['log_options'], string_types):
  2088. new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
  2089. new_local_facts = self.remove_empty_facts(new_local_facts)
  2090. if new_local_facts != local_facts:
  2091. self.validate_local_facts(new_local_facts)
  2092. changed = True
  2093. if not module.check_mode: # noqa: F405
  2094. save_local_facts(self.filename, new_local_facts)
  2095. self.changed = changed
  2096. return new_local_facts
  2097. def remove_empty_facts(self, facts=None):
  2098. """ Remove empty facts
  2099. Args:
  2100. facts (dict): facts to clean
  2101. """
  2102. facts_to_remove = []
  2103. for fact, value in iteritems(facts):
  2104. if isinstance(facts[fact], dict):
  2105. facts[fact] = self.remove_empty_facts(facts[fact])
  2106. else:
  2107. if value == "" or value == [""] or value is None:
  2108. facts_to_remove.append(fact)
  2109. for fact in facts_to_remove:
  2110. del facts[fact]
  2111. return facts
  2112. def validate_local_facts(self, facts=None):
  2113. """ Validate local facts
  2114. Args:
  2115. facts (dict): local facts to validate
  2116. """
  2117. invalid_facts = dict()
  2118. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  2119. if invalid_facts:
  2120. msg = 'Invalid facts detected:\n'
  2121. # pylint: disable=consider-iterating-dictionary
  2122. for key in invalid_facts.keys():
  2123. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  2124. module.fail_json(msg=msg, changed=self.changed) # noqa: F405
  2125. # disabling pylint errors for line-too-long since we're dealing
  2126. # with best effort reduction of error messages here.
  2127. # disabling errors for too-many-branches since we require checking
  2128. # many conditions.
  2129. # pylint: disable=line-too-long, too-many-branches
  2130. @staticmethod
  2131. def validate_master_facts(facts, invalid_facts):
  2132. """ Validate master facts
  2133. Args:
  2134. facts (dict): local facts to validate
  2135. invalid_facts (dict): collected invalid_facts
  2136. Returns:
  2137. dict: Invalid facts
  2138. """
  2139. if 'master' in facts:
  2140. # openshift.master.session_auth_secrets
  2141. if 'session_auth_secrets' in facts['master']:
  2142. session_auth_secrets = facts['master']['session_auth_secrets']
  2143. if not issubclass(type(session_auth_secrets), list):
  2144. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  2145. elif 'session_encryption_secrets' not in facts['master']:
  2146. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  2147. 'if openshift_master_session_auth_secrets is provided.')
  2148. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  2149. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  2150. 'openshift_master_session_encryption_secrets must be '
  2151. 'equal length.')
  2152. else:
  2153. for secret in session_auth_secrets:
  2154. if len(secret) < 32:
  2155. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  2156. 'Secrets must be at least 32 characters in length.')
  2157. # openshift.master.session_encryption_secrets
  2158. if 'session_encryption_secrets' in facts['master']:
  2159. session_encryption_secrets = facts['master']['session_encryption_secrets']
  2160. if not issubclass(type(session_encryption_secrets), list):
  2161. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  2162. elif 'session_auth_secrets' not in facts['master']:
  2163. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  2164. 'set if openshift_master_session_encryption_secrets '
  2165. 'is provided.')
  2166. else:
  2167. for secret in session_encryption_secrets:
  2168. if len(secret) not in [16, 24, 32]:
  2169. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  2170. 'Secrets must be 16, 24, or 32 characters in length.')
  2171. return invalid_facts
  2172. def main():
  2173. """ main """
  2174. # disabling pylint errors for global-variable-undefined and invalid-name
  2175. # for 'global module' usage, since it is required to use ansible_facts
  2176. # pylint: disable=global-variable-undefined, invalid-name
  2177. global module
  2178. module = AnsibleModule( # noqa: F405
  2179. argument_spec=dict(
  2180. role=dict(default='common', required=False,
  2181. choices=OpenShiftFacts.known_roles),
  2182. local_facts=dict(default=None, type='dict', required=False),
  2183. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  2184. openshift_env=dict(default={}, type='dict', required=False),
  2185. openshift_env_structures=dict(default=[], type='list', required=False),
  2186. protected_facts_to_overwrite=dict(default=[], type='list', required=False)
  2187. ),
  2188. supports_check_mode=True,
  2189. add_file_common_args=True,
  2190. )
  2191. if not HAVE_DBUS:
  2192. module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
  2193. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
  2194. module.params['gather_timeout'] = 10 # noqa: F405
  2195. module.params['filter'] = '*' # noqa: F405
  2196. role = module.params['role'] # noqa: F405
  2197. local_facts = module.params['local_facts'] # noqa: F405
  2198. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
  2199. openshift_env = module.params['openshift_env'] # noqa: F405
  2200. openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
  2201. protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
  2202. fact_file = '/etc/ansible/facts.d/openshift.fact'
  2203. openshift_facts = OpenShiftFacts(role,
  2204. fact_file,
  2205. local_facts,
  2206. additive_facts_to_overwrite,
  2207. openshift_env,
  2208. openshift_env_structures,
  2209. protected_facts_to_overwrite)
  2210. file_params = module.params.copy() # noqa: F405
  2211. file_params['path'] = fact_file
  2212. file_args = module.load_file_common_arguments(file_params) # noqa: F405
  2213. changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
  2214. openshift_facts.changed)
  2215. return module.exit_json(changed=changed, # noqa: F405
  2216. ansible_facts=openshift_facts.facts)
  2217. if __name__ == '__main__':
  2218. main()