openshift_facts.py 86 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # vim: expandtab:tabstop=4:shiftwidth=4
  5. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  6. # Status: Permanently disabled to keep this module as self-contained as possible.
  7. """Ansible module for retrieving and setting openshift related facts"""
  8. DOCUMENTATION = '''
  9. ---
  10. module: openshift_facts
  11. short_description: Cluster Facts
  12. author: Jason DeTiberus
  13. requirements: [ ]
  14. '''
  15. EXAMPLES = '''
  16. '''
  17. import ConfigParser
  18. import copy
  19. import io
  20. import os
  21. import yaml
  22. from distutils.util import strtobool
  23. from distutils.version import LooseVersion
  24. import struct
  25. import socket
  26. from dbus import SystemBus, Interface
  27. from dbus.exceptions import DBusException
  28. def migrate_docker_facts(facts):
  29. """ Apply migrations for docker facts """
  30. params = {
  31. 'common': (
  32. 'additional_registries',
  33. 'insecure_registries',
  34. 'blocked_registries',
  35. 'options'
  36. ),
  37. 'node': (
  38. 'log_driver',
  39. 'log_options'
  40. )
  41. }
  42. if 'docker' not in facts:
  43. facts['docker'] = {}
  44. for role in params.keys():
  45. if role in facts:
  46. for param in params[role]:
  47. old_param = 'docker_' + param
  48. if old_param in facts[role]:
  49. facts['docker'][param] = facts[role].pop(old_param)
  50. if 'node' in facts and 'portal_net' in facts['node']:
  51. facts['docker']['hosted_registry_insecure'] = True
  52. facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
  53. # log_options was originally meant to be a comma separated string, but
  54. # we now prefer an actual list, with backward compatability:
  55. if 'log_options' in facts['docker'] and \
  56. isinstance(facts['docker']['log_options'], basestring):
  57. facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
  58. return facts
  59. # TODO: We should add a generic migration function that takes source and destination
  60. # paths and does the right thing rather than one function for common, one for node, etc.
  61. def migrate_common_facts(facts):
  62. """ Migrate facts from various roles into common """
  63. params = {
  64. 'node': ('portal_net'),
  65. 'master': ('portal_net')
  66. }
  67. if 'common' not in facts:
  68. facts['common'] = {}
  69. for role in params.keys():
  70. if role in facts:
  71. for param in params[role]:
  72. if param in facts[role]:
  73. facts['common'][param] = facts[role].pop(param)
  74. return facts
  75. def migrate_node_facts(facts):
  76. """ Migrate facts from various roles into node """
  77. params = {
  78. 'common': ('dns_ip'),
  79. }
  80. if 'node' not in facts:
  81. facts['node'] = {}
  82. for role in params.keys():
  83. if role in facts:
  84. for param in params[role]:
  85. if param in facts[role]:
  86. facts['node'][param] = facts[role].pop(param)
  87. return facts
  88. def migrate_local_facts(facts):
  89. """ Apply migrations of local facts """
  90. migrated_facts = copy.deepcopy(facts)
  91. migrated_facts = migrate_docker_facts(migrated_facts)
  92. migrated_facts = migrate_common_facts(migrated_facts)
  93. migrated_facts = migrate_node_facts(migrated_facts)
  94. migrated_facts = migrate_hosted_facts(migrated_facts)
  95. return migrated_facts
  96. def migrate_hosted_facts(facts):
  97. """ Apply migrations for master facts """
  98. if 'master' in facts:
  99. if 'router_selector' in facts['master']:
  100. if 'hosted' not in facts:
  101. facts['hosted'] = {}
  102. if 'router' not in facts['hosted']:
  103. facts['hosted']['router'] = {}
  104. facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
  105. return facts
  106. def first_ip(network):
  107. """ Return the first IPv4 address in network
  108. Args:
  109. network (str): network in CIDR format
  110. Returns:
  111. str: first IPv4 address
  112. """
  113. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
  114. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
  115. (address, netmask) = network.split('/')
  116. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  117. return itoa((atoi(address) & netmask_i) + 1)
  118. def hostname_valid(hostname):
  119. """ Test if specified hostname should be considered valid
  120. Args:
  121. hostname (str): hostname to test
  122. Returns:
  123. bool: True if valid, otherwise False
  124. """
  125. if (not hostname or
  126. hostname.startswith('localhost') or
  127. hostname.endswith('localdomain') or
  128. len(hostname.split('.')) < 2):
  129. return False
  130. return True
  131. def choose_hostname(hostnames=None, fallback=''):
  132. """ Choose a hostname from the provided hostnames
  133. Given a list of hostnames and a fallback value, choose a hostname to
  134. use. This function will prefer fqdns if they exist (excluding any that
  135. begin with localhost or end with localdomain) over ip addresses.
  136. Args:
  137. hostnames (list): list of hostnames
  138. fallback (str): default value to set if hostnames does not contain
  139. a valid hostname
  140. Returns:
  141. str: chosen hostname
  142. """
  143. hostname = fallback
  144. if hostnames is None:
  145. return hostname
  146. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  147. ips = [i for i in hostnames
  148. if (i is not None and isinstance(i, basestring)
  149. and re.match(ip_regex, i))]
  150. hosts = [i for i in hostnames
  151. if i is not None and i != '' and i not in ips]
  152. for host_list in (hosts, ips):
  153. for host in host_list:
  154. if hostname_valid(host):
  155. return host
  156. return hostname
  157. def query_metadata(metadata_url, headers=None, expect_json=False):
  158. """ Return metadata from the provided metadata_url
  159. Args:
  160. metadata_url (str): metadata url
  161. headers (dict): headers to set for metadata request
  162. expect_json (bool): does the metadata_url return json
  163. Returns:
  164. dict or list: metadata request result
  165. """
  166. result, info = fetch_url(module, metadata_url, headers=headers)
  167. if info['status'] != 200:
  168. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  169. if expect_json:
  170. return module.from_json(result.read())
  171. else:
  172. return [line.strip() for line in result.readlines()]
  173. def walk_metadata(metadata_url, headers=None, expect_json=False):
  174. """ Walk the metadata tree and return a dictionary of the entire tree
  175. Args:
  176. metadata_url (str): metadata url
  177. headers (dict): headers to set for metadata request
  178. expect_json (bool): does the metadata_url return json
  179. Returns:
  180. dict: the result of walking the metadata tree
  181. """
  182. metadata = dict()
  183. for line in query_metadata(metadata_url, headers, expect_json):
  184. if line.endswith('/') and not line == 'public-keys/':
  185. key = line[:-1]
  186. metadata[key] = walk_metadata(metadata_url + line,
  187. headers, expect_json)
  188. else:
  189. results = query_metadata(metadata_url + line, headers,
  190. expect_json)
  191. if len(results) == 1:
  192. # disable pylint maybe-no-member because overloaded use of
  193. # the module name causes pylint to not detect that results
  194. # is an array or hash
  195. # pylint: disable=maybe-no-member
  196. metadata[line] = results.pop()
  197. else:
  198. metadata[line] = results
  199. return metadata
  200. def get_provider_metadata(metadata_url, supports_recursive=False,
  201. headers=None, expect_json=False):
  202. """ Retrieve the provider metadata
  203. Args:
  204. metadata_url (str): metadata url
  205. supports_recursive (bool): does the provider metadata api support
  206. recursion
  207. headers (dict): headers to set for metadata request
  208. expect_json (bool): does the metadata_url return json
  209. Returns:
  210. dict: the provider metadata
  211. """
  212. try:
  213. if supports_recursive:
  214. metadata = query_metadata(metadata_url, headers,
  215. expect_json)
  216. else:
  217. metadata = walk_metadata(metadata_url, headers,
  218. expect_json)
  219. except OpenShiftFactsMetadataUnavailableError:
  220. metadata = None
  221. return metadata
  222. def normalize_gce_facts(metadata, facts):
  223. """ Normalize gce facts
  224. Args:
  225. metadata (dict): provider metadata
  226. facts (dict): facts to update
  227. Returns:
  228. dict: the result of adding the normalized metadata to the provided
  229. facts dict
  230. """
  231. for interface in metadata['instance']['networkInterfaces']:
  232. int_info = dict(ips=[interface['ip']], network_type='gce')
  233. int_info['public_ips'] = [ac['externalIp'] for ac
  234. in interface['accessConfigs']]
  235. int_info['public_ips'].extend(interface['forwardedIps'])
  236. _, _, network_id = interface['network'].rpartition('/')
  237. int_info['network_id'] = network_id
  238. facts['network']['interfaces'].append(int_info)
  239. _, _, zone = metadata['instance']['zone'].rpartition('/')
  240. facts['zone'] = zone
  241. # GCE currently only supports a single interface
  242. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  243. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  244. facts['network']['public_ip'] = pub_ip
  245. facts['network']['hostname'] = metadata['instance']['hostname']
  246. # TODO: attempt to resolve public_hostname
  247. facts['network']['public_hostname'] = facts['network']['public_ip']
  248. return facts
  249. def normalize_aws_facts(metadata, facts):
  250. """ Normalize aws facts
  251. Args:
  252. metadata (dict): provider metadata
  253. facts (dict): facts to update
  254. Returns:
  255. dict: the result of adding the normalized metadata to the provided
  256. facts dict
  257. """
  258. for interface in sorted(
  259. metadata['network']['interfaces']['macs'].values(),
  260. key=lambda x: x['device-number']
  261. ):
  262. int_info = dict()
  263. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  264. for ips_var, int_var in var_map.iteritems():
  265. ips = interface.get(int_var)
  266. if isinstance(ips, basestring):
  267. int_info[ips_var] = [ips]
  268. else:
  269. int_info[ips_var] = ips
  270. if 'vpc-id' in interface:
  271. int_info['network_type'] = 'vpc'
  272. else:
  273. int_info['network_type'] = 'classic'
  274. if int_info['network_type'] == 'vpc':
  275. int_info['network_id'] = interface['subnet-id']
  276. else:
  277. int_info['network_id'] = None
  278. facts['network']['interfaces'].append(int_info)
  279. facts['zone'] = metadata['placement']['availability-zone']
  280. # TODO: actually attempt to determine default local and public ips
  281. # by using the ansible default ip fact and the ipv4-associations
  282. # from the ec2 metadata
  283. facts['network']['ip'] = metadata.get('local-ipv4')
  284. facts['network']['public_ip'] = metadata.get('public-ipv4')
  285. # TODO: verify that local hostname makes sense and is resolvable
  286. facts['network']['hostname'] = metadata.get('local-hostname')
  287. # TODO: verify that public hostname makes sense and is resolvable
  288. facts['network']['public_hostname'] = metadata.get('public-hostname')
  289. return facts
  290. def normalize_openstack_facts(metadata, facts):
  291. """ Normalize openstack facts
  292. Args:
  293. metadata (dict): provider metadata
  294. facts (dict): facts to update
  295. Returns:
  296. dict: the result of adding the normalized metadata to the provided
  297. facts dict
  298. """
  299. # openstack ec2 compat api does not support network interfaces and
  300. # the version tested on did not include the info in the openstack
  301. # metadata api, should be updated if neutron exposes this.
  302. facts['zone'] = metadata['availability_zone']
  303. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  304. facts['network']['ip'] = local_ipv4
  305. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  306. # TODO: verify local hostname makes sense and is resolvable
  307. facts['network']['hostname'] = metadata['hostname']
  308. # TODO: verify that public hostname makes sense and is resolvable
  309. pub_h = metadata['ec2_compat']['public-hostname']
  310. facts['network']['public_hostname'] = pub_h
  311. return facts
  312. def normalize_provider_facts(provider, metadata):
  313. """ Normalize provider facts
  314. Args:
  315. provider (str): host provider
  316. metadata (dict): provider metadata
  317. Returns:
  318. dict: the normalized provider facts
  319. """
  320. if provider is None or metadata is None:
  321. return {}
  322. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  323. # and configure ipv6 facts if available
  324. # TODO: add support for setting user_data if available
  325. facts = dict(name=provider, metadata=metadata,
  326. network=dict(interfaces=[], ipv6_enabled=False))
  327. if provider == 'gce':
  328. facts = normalize_gce_facts(metadata, facts)
  329. elif provider == 'aws':
  330. facts = normalize_aws_facts(metadata, facts)
  331. elif provider == 'openstack':
  332. facts = normalize_openstack_facts(metadata, facts)
  333. return facts
  334. def set_flannel_facts_if_unset(facts):
  335. """ Set flannel facts if not already present in facts dict
  336. dict: the facts dict updated with the flannel facts if
  337. missing
  338. Args:
  339. facts (dict): existing facts
  340. Returns:
  341. dict: the facts dict updated with the flannel
  342. facts if they were not already present
  343. """
  344. if 'common' in facts:
  345. if 'use_flannel' not in facts['common']:
  346. use_flannel = False
  347. facts['common']['use_flannel'] = use_flannel
  348. return facts
  349. def set_nuage_facts_if_unset(facts):
  350. """ Set nuage facts if not already present in facts dict
  351. dict: the facts dict updated with the nuage facts if
  352. missing
  353. Args:
  354. facts (dict): existing facts
  355. Returns:
  356. dict: the facts dict updated with the nuage
  357. facts if they were not already present
  358. """
  359. if 'common' in facts:
  360. if 'use_nuage' not in facts['common']:
  361. use_nuage = False
  362. facts['common']['use_nuage'] = use_nuage
  363. return facts
  364. def set_node_schedulability(facts):
  365. """ Set schedulable facts if not already present in facts dict
  366. Args:
  367. facts (dict): existing facts
  368. Returns:
  369. dict: the facts dict updated with the generated schedulable
  370. facts if they were not already present
  371. """
  372. if 'node' in facts:
  373. if 'schedulable' not in facts['node']:
  374. if 'master' in facts:
  375. facts['node']['schedulable'] = False
  376. else:
  377. facts['node']['schedulable'] = True
  378. return facts
  379. def set_selectors(facts):
  380. """ Set selectors facts if not already present in facts dict
  381. Args:
  382. facts (dict): existing facts
  383. Returns:
  384. dict: the facts dict updated with the generated selectors
  385. facts if they were not already present
  386. """
  387. deployment_type = facts['common']['deployment_type']
  388. if deployment_type == 'online':
  389. selector = "type=infra"
  390. else:
  391. selector = "region=infra"
  392. if 'hosted' not in facts:
  393. facts['hosted'] = {}
  394. if 'router' not in facts['hosted']:
  395. facts['hosted']['router'] = {}
  396. if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
  397. facts['hosted']['router']['selector'] = selector
  398. if 'master' in facts:
  399. if 'infra_nodes' in facts['master']:
  400. if 'registry_selector' not in facts['master']:
  401. facts['master']['registry_selector'] = selector
  402. return facts
  403. def set_metrics_facts_if_unset(facts):
  404. """ Set cluster metrics facts if not already present in facts dict
  405. dict: the facts dict updated with the generated cluster metrics facts if
  406. missing
  407. Args:
  408. facts (dict): existing facts
  409. Returns:
  410. dict: the facts dict updated with the generated cluster metrics
  411. facts if they were not already present
  412. """
  413. if 'common' in facts:
  414. if 'use_cluster_metrics' not in facts['common']:
  415. use_cluster_metrics = False
  416. facts['common']['use_cluster_metrics'] = use_cluster_metrics
  417. return facts
  418. def set_dnsmasq_facts_if_unset(facts):
  419. """ Set dnsmasq facts if not already present in facts
  420. Args:
  421. facts (dict) existing facts
  422. Returns:
  423. facts (dict) updated facts with values set if not previously set
  424. """
  425. if 'common' in facts:
  426. if 'use_dnsmasq' not in facts['common'] and safe_get_bool(facts['common']['version_gte_3_2_or_1_2']):
  427. facts['common']['use_dnsmasq'] = True
  428. else:
  429. facts['common']['use_dnsmasq'] = False
  430. if 'master' in facts and 'dns_port' not in facts['master']:
  431. if safe_get_bool(facts['common']['use_dnsmasq']):
  432. facts['master']['dns_port'] = 8053
  433. else:
  434. facts['master']['dns_port'] = 53
  435. return facts
  436. def set_project_cfg_facts_if_unset(facts):
  437. """ Set Project Configuration facts if not already present in facts dict
  438. dict:
  439. Args:
  440. facts (dict): existing facts
  441. Returns:
  442. dict: the facts dict updated with the generated Project Configuration
  443. facts if they were not already present
  444. """
  445. config = {
  446. 'default_node_selector': '',
  447. 'project_request_message': '',
  448. 'project_request_template': '',
  449. 'mcs_allocator_range': 's0:/2',
  450. 'mcs_labels_per_project': 5,
  451. 'uid_allocator_range': '1000000000-1999999999/10000'
  452. }
  453. if 'master' in facts:
  454. for key, value in config.items():
  455. if key not in facts['master']:
  456. facts['master'][key] = value
  457. return facts
  458. def set_identity_providers_if_unset(facts):
  459. """ Set identity_providers fact if not already present in facts dict
  460. Args:
  461. facts (dict): existing facts
  462. Returns:
  463. dict: the facts dict updated with the generated identity providers
  464. facts if they were not already present
  465. """
  466. if 'master' in facts:
  467. deployment_type = facts['common']['deployment_type']
  468. if 'identity_providers' not in facts['master']:
  469. identity_provider = dict(
  470. name='allow_all', challenge=True, login=True,
  471. kind='AllowAllPasswordIdentityProvider'
  472. )
  473. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  474. identity_provider = dict(
  475. name='deny_all', challenge=True, login=True,
  476. kind='DenyAllPasswordIdentityProvider'
  477. )
  478. facts['master']['identity_providers'] = [identity_provider]
  479. return facts
  480. def set_url_facts_if_unset(facts):
  481. """ Set url facts if not already present in facts dict
  482. Args:
  483. facts (dict): existing facts
  484. Returns:
  485. dict: the facts dict updated with the generated url facts if they
  486. were not already present
  487. """
  488. if 'master' in facts:
  489. hostname = facts['common']['hostname']
  490. cluster_hostname = facts['master'].get('cluster_hostname')
  491. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  492. public_hostname = facts['common']['public_hostname']
  493. api_hostname = cluster_hostname if cluster_hostname else hostname
  494. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  495. console_path = facts['master']['console_path']
  496. etcd_hosts = facts['master']['etcd_hosts']
  497. use_ssl = dict(
  498. api=facts['master']['api_use_ssl'],
  499. public_api=facts['master']['api_use_ssl'],
  500. loopback_api=facts['master']['api_use_ssl'],
  501. console=facts['master']['console_use_ssl'],
  502. public_console=facts['master']['console_use_ssl'],
  503. etcd=facts['master']['etcd_use_ssl']
  504. )
  505. ports = dict(
  506. api=facts['master']['api_port'],
  507. public_api=facts['master']['api_port'],
  508. loopback_api=facts['master']['api_port'],
  509. console=facts['master']['console_port'],
  510. public_console=facts['master']['console_port'],
  511. etcd=facts['master']['etcd_port'],
  512. )
  513. etcd_urls = []
  514. if etcd_hosts != '':
  515. facts['master']['etcd_port'] = ports['etcd']
  516. facts['master']['embedded_etcd'] = False
  517. for host in etcd_hosts:
  518. etcd_urls.append(format_url(use_ssl['etcd'], host,
  519. ports['etcd']))
  520. else:
  521. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  522. ports['etcd'])]
  523. facts['master'].setdefault('etcd_urls', etcd_urls)
  524. prefix_hosts = [('api', api_hostname),
  525. ('public_api', api_public_hostname),
  526. ('loopback_api', hostname)]
  527. for prefix, host in prefix_hosts:
  528. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  529. host,
  530. ports[prefix]))
  531. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  532. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  533. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  534. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  535. facts['master'].setdefault('loopback_user', r_lhu)
  536. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  537. for prefix, host in prefix_hosts:
  538. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  539. host,
  540. ports[prefix],
  541. console_path))
  542. return facts
  543. def set_aggregate_facts(facts):
  544. """ Set aggregate facts
  545. Args:
  546. facts (dict): existing facts
  547. Returns:
  548. dict: the facts dict updated with aggregated facts
  549. """
  550. all_hostnames = set()
  551. internal_hostnames = set()
  552. kube_svc_ip = first_ip(facts['common']['portal_net'])
  553. if 'common' in facts:
  554. all_hostnames.add(facts['common']['hostname'])
  555. all_hostnames.add(facts['common']['public_hostname'])
  556. all_hostnames.add(facts['common']['ip'])
  557. all_hostnames.add(facts['common']['public_ip'])
  558. facts['common']['kube_svc_ip'] = kube_svc_ip
  559. internal_hostnames.add(facts['common']['hostname'])
  560. internal_hostnames.add(facts['common']['ip'])
  561. cluster_domain = facts['common']['dns_domain']
  562. if 'master' in facts:
  563. if 'cluster_hostname' in facts['master']:
  564. all_hostnames.add(facts['master']['cluster_hostname'])
  565. if 'cluster_public_hostname' in facts['master']:
  566. all_hostnames.add(facts['master']['cluster_public_hostname'])
  567. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  568. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  569. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  570. all_hostnames.update(svc_names)
  571. internal_hostnames.update(svc_names)
  572. all_hostnames.add(kube_svc_ip)
  573. internal_hostnames.add(kube_svc_ip)
  574. facts['common']['all_hostnames'] = list(all_hostnames)
  575. facts['common']['internal_hostnames'] = list(internal_hostnames)
  576. return facts
  577. def set_etcd_facts_if_unset(facts):
  578. """
  579. If using embedded etcd, loads the data directory from master-config.yaml.
  580. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
  581. If anything goes wrong parsing these, the fact will not be set.
  582. """
  583. if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
  584. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  585. if 'etcd_data_dir' not in etcd_facts:
  586. try:
  587. # Parse master config to find actual etcd data dir:
  588. master_cfg_path = os.path.join(facts['common']['config_base'],
  589. 'master/master-config.yaml')
  590. master_cfg_f = open(master_cfg_path, 'r')
  591. config = yaml.safe_load(master_cfg_f.read())
  592. master_cfg_f.close()
  593. etcd_facts['etcd_data_dir'] = \
  594. config['etcdConfig']['storageDirectory']
  595. facts['etcd'] = etcd_facts
  596. # We don't want exceptions bubbling up here:
  597. # pylint: disable=broad-except
  598. except Exception:
  599. pass
  600. else:
  601. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  602. # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
  603. try:
  604. # Add a fake section for parsing:
  605. ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
  606. ini_fp = io.StringIO(ini_str)
  607. config = ConfigParser.RawConfigParser()
  608. config.readfp(ini_fp)
  609. etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
  610. if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
  611. etcd_data_dir = etcd_data_dir[1:-1]
  612. etcd_facts['etcd_data_dir'] = etcd_data_dir
  613. facts['etcd'] = etcd_facts
  614. # We don't want exceptions bubbling up here:
  615. # pylint: disable=broad-except
  616. except Exception:
  617. pass
  618. return facts
  619. def set_deployment_facts_if_unset(facts):
  620. """ Set Facts that vary based on deployment_type. This currently
  621. includes common.service_type, common.config_base, master.registry_url,
  622. node.registry_url, node.storage_plugin_deps
  623. Args:
  624. facts (dict): existing facts
  625. Returns:
  626. dict: the facts dict updated with the generated deployment_type
  627. facts
  628. """
  629. # disabled to avoid breaking up facts related to deployment type into
  630. # multiple methods for now.
  631. # pylint: disable=too-many-statements, too-many-branches
  632. if 'common' in facts:
  633. deployment_type = facts['common']['deployment_type']
  634. if 'service_type' not in facts['common']:
  635. service_type = 'atomic-openshift'
  636. if deployment_type == 'origin':
  637. service_type = 'origin'
  638. elif deployment_type in ['enterprise']:
  639. service_type = 'openshift'
  640. facts['common']['service_type'] = service_type
  641. if 'config_base' not in facts['common']:
  642. config_base = '/etc/origin'
  643. if deployment_type in ['enterprise']:
  644. config_base = '/etc/openshift'
  645. # Handle upgrade scenarios when symlinks don't yet exist:
  646. if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
  647. config_base = '/etc/openshift'
  648. facts['common']['config_base'] = config_base
  649. if 'data_dir' not in facts['common']:
  650. data_dir = '/var/lib/origin'
  651. if deployment_type in ['enterprise']:
  652. data_dir = '/var/lib/openshift'
  653. # Handle upgrade scenarios when symlinks don't yet exist:
  654. if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
  655. data_dir = '/var/lib/openshift'
  656. facts['common']['data_dir'] = data_dir
  657. if 'docker' in facts:
  658. deployment_type = facts['common']['deployment_type']
  659. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  660. addtl_regs = facts['docker'].get('additional_registries', [])
  661. ent_reg = 'registry.access.redhat.com'
  662. if ent_reg not in addtl_regs:
  663. facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
  664. for role in ('master', 'node'):
  665. if role in facts:
  666. deployment_type = facts['common']['deployment_type']
  667. if 'registry_url' not in facts[role]:
  668. registry_url = 'openshift/origin-${component}:${version}'
  669. if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
  670. registry_url = 'openshift3/ose-${component}:${version}'
  671. elif deployment_type == 'atomic-enterprise':
  672. registry_url = 'aep3_beta/aep-${component}:${version}'
  673. facts[role]['registry_url'] = registry_url
  674. if 'master' in facts:
  675. deployment_type = facts['common']['deployment_type']
  676. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  677. if 'disabled_features' in facts['master']:
  678. if deployment_type == 'atomic-enterprise':
  679. curr_disabled_features = set(facts['master']['disabled_features'])
  680. facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
  681. else:
  682. if deployment_type == 'atomic-enterprise':
  683. facts['master']['disabled_features'] = openshift_features
  684. if 'node' in facts:
  685. deployment_type = facts['common']['deployment_type']
  686. if 'storage_plugin_deps' not in facts['node']:
  687. if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
  688. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  689. else:
  690. facts['node']['storage_plugin_deps'] = []
  691. return facts
  692. def set_version_facts_if_unset(facts):
  693. """ Set version facts. This currently includes common.version and
  694. common.version_gte_3_1_or_1_1.
  695. Args:
  696. facts (dict): existing facts
  697. Returns:
  698. dict: the facts dict updated with version facts.
  699. """
  700. if 'common' in facts:
  701. deployment_type = facts['common']['deployment_type']
  702. version = get_openshift_version(facts)
  703. if version is not None:
  704. facts['common']['version'] = version
  705. if deployment_type == 'origin':
  706. version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
  707. version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
  708. version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
  709. version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
  710. else:
  711. version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
  712. version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
  713. version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
  714. version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
  715. else:
  716. version_gte_3_1_or_1_1 = True
  717. version_gte_3_1_1_or_1_1_1 = True
  718. version_gte_3_2_or_1_2 = True
  719. version_gte_3_3_or_1_3 = False
  720. facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
  721. facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
  722. facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
  723. facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
  724. if version_gte_3_3_or_1_3:
  725. examples_content_version = 'v1.3'
  726. elif version_gte_3_2_or_1_2:
  727. examples_content_version = 'v1.2'
  728. elif version_gte_3_1_or_1_1:
  729. examples_content_version = 'v1.1'
  730. else:
  731. examples_content_version = 'v1.0'
  732. facts['common']['examples_content_version'] = examples_content_version
  733. return facts
  734. def set_manageiq_facts_if_unset(facts):
  735. """ Set manageiq facts. This currently includes common.use_manageiq.
  736. Args:
  737. facts (dict): existing facts
  738. Returns:
  739. dict: the facts dict updated with version facts.
  740. Raises:
  741. OpenShiftFactsInternalError:
  742. """
  743. if 'common' not in facts:
  744. if 'version_gte_3_1_or_1_1' not in facts['common']:
  745. raise OpenShiftFactsInternalError(
  746. "Invalid invocation: The required facts are not set"
  747. )
  748. if 'use_manageiq' not in facts['common']:
  749. facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
  750. return facts
  751. def set_sdn_facts_if_unset(facts, system_facts):
  752. """ Set sdn facts if not already present in facts dict
  753. Args:
  754. facts (dict): existing facts
  755. system_facts (dict): ansible_facts
  756. Returns:
  757. dict: the facts dict updated with the generated sdn facts if they
  758. were not already present
  759. """
  760. if 'common' in facts:
  761. use_sdn = facts['common']['use_openshift_sdn']
  762. if not (use_sdn == '' or isinstance(use_sdn, bool)):
  763. use_sdn = safe_get_bool(use_sdn)
  764. facts['common']['use_openshift_sdn'] = use_sdn
  765. if 'sdn_network_plugin_name' not in facts['common']:
  766. plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
  767. facts['common']['sdn_network_plugin_name'] = plugin
  768. if 'master' in facts:
  769. if 'sdn_cluster_network_cidr' not in facts['master']:
  770. facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
  771. if 'sdn_host_subnet_length' not in facts['master']:
  772. facts['master']['sdn_host_subnet_length'] = '8'
  773. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  774. node_ip = facts['common']['ip']
  775. # default MTU if interface MTU cannot be detected
  776. facts['node']['sdn_mtu'] = '1450'
  777. for val in system_facts.itervalues():
  778. if isinstance(val, dict) and 'mtu' in val:
  779. mtu = val['mtu']
  780. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  781. facts['node']['sdn_mtu'] = str(mtu - 50)
  782. return facts
  783. def migrate_oauth_template_facts(facts):
  784. """
  785. Migrate an old oauth template fact to a newer format if it's present.
  786. The legacy 'oauth_template' fact was just a filename, and assumed you were
  787. setting the 'login' template.
  788. The new pluralized 'oauth_templates' fact is a dict mapping the template
  789. name to a filename.
  790. Simplify the code after this by merging the old fact into the new.
  791. """
  792. if 'master' in facts and 'oauth_template' in facts['master']:
  793. if 'oauth_templates' not in facts['master']:
  794. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  795. elif 'login' not in facts['master']['oauth_templates']:
  796. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  797. return facts
  798. def format_url(use_ssl, hostname, port, path=''):
  799. """ Format url based on ssl flag, hostname, port and path
  800. Args:
  801. use_ssl (bool): is ssl enabled
  802. hostname (str): hostname
  803. port (str): port
  804. path (str): url path
  805. Returns:
  806. str: The generated url string
  807. """
  808. scheme = 'https' if use_ssl else 'http'
  809. netloc = hostname
  810. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  811. netloc += ":%s" % port
  812. return urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  813. def get_current_config(facts):
  814. """ Get current openshift config
  815. Args:
  816. facts (dict): existing facts
  817. Returns:
  818. dict: the facts dict updated with the current openshift config
  819. """
  820. current_config = dict()
  821. roles = [role for role in facts if role not in ['common', 'provider']]
  822. for role in roles:
  823. if 'roles' in current_config:
  824. current_config['roles'].append(role)
  825. else:
  826. current_config['roles'] = [role]
  827. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  828. # determine the location of files.
  829. # TODO: I suspect this isn't working right now, but it doesn't prevent
  830. # anything from working properly as far as I can tell, perhaps because
  831. # we override the kubeconfig path everywhere we use it?
  832. # Query kubeconfig settings
  833. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  834. if role == 'node':
  835. kubeconfig_dir = os.path.join(
  836. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  837. )
  838. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  839. if (os.path.isfile('/usr/bin/openshift')
  840. and os.path.isfile(kubeconfig_path)):
  841. try:
  842. _, output, _ = module.run_command(
  843. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  844. "json", "--kubeconfig=%s" % kubeconfig_path],
  845. check_rc=False
  846. )
  847. config = json.loads(output)
  848. cad = 'certificate-authority-data'
  849. try:
  850. for cluster in config['clusters']:
  851. config['clusters'][cluster][cad] = 'masked'
  852. except KeyError:
  853. pass
  854. try:
  855. for user in config['users']:
  856. config['users'][user][cad] = 'masked'
  857. config['users'][user]['client-key-data'] = 'masked'
  858. except KeyError:
  859. pass
  860. current_config['kubeconfig'] = config
  861. # override pylint broad-except warning, since we do not want
  862. # to bubble up any exceptions if oc config view
  863. # fails
  864. # pylint: disable=broad-except
  865. except Exception:
  866. pass
  867. return current_config
  868. def build_kubelet_args(facts):
  869. """ Build node kubelet_args """
  870. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  871. 'cloudprovider')
  872. if 'node' in facts:
  873. kubelet_args = {}
  874. if 'cloudprovider' in facts:
  875. if 'kind' in facts['cloudprovider']:
  876. if facts['cloudprovider']['kind'] == 'aws':
  877. kubelet_args['cloud-provider'] = ['aws']
  878. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  879. if facts['cloudprovider']['kind'] == 'openstack':
  880. kubelet_args['cloud-provider'] = ['openstack']
  881. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  882. if kubelet_args != {}:
  883. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
  884. return facts
  885. def build_controller_args(facts):
  886. """ Build master controller_args """
  887. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  888. 'cloudprovider')
  889. if 'master' in facts:
  890. controller_args = {}
  891. if 'cloudprovider' in facts:
  892. if 'kind' in facts['cloudprovider']:
  893. if facts['cloudprovider']['kind'] == 'aws':
  894. controller_args['cloud-provider'] = ['aws']
  895. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  896. if facts['cloudprovider']['kind'] == 'openstack':
  897. controller_args['cloud-provider'] = ['openstack']
  898. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  899. if controller_args != {}:
  900. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
  901. return facts
  902. def build_api_server_args(facts):
  903. """ Build master api_server_args """
  904. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  905. 'cloudprovider')
  906. if 'master' in facts:
  907. api_server_args = {}
  908. if 'cloudprovider' in facts:
  909. if 'kind' in facts['cloudprovider']:
  910. if facts['cloudprovider']['kind'] == 'aws':
  911. api_server_args['cloud-provider'] = ['aws']
  912. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  913. if facts['cloudprovider']['kind'] == 'openstack':
  914. api_server_args['cloud-provider'] = ['openstack']
  915. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  916. if api_server_args != {}:
  917. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
  918. return facts
  919. def is_service_running(service):
  920. """ Queries systemd through dbus to see if the service is running """
  921. service_running = False
  922. bus = SystemBus()
  923. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  924. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  925. try:
  926. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  927. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  928. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  929. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  930. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  931. if service_load_state == 'loaded' and service_active_state == 'active':
  932. service_running = True
  933. except DBusException:
  934. pass
  935. return service_running
  936. def get_version_output(binary, version_cmd):
  937. """ runs and returns the version output for a command """
  938. cmd = []
  939. for item in (binary, version_cmd):
  940. if isinstance(item, list):
  941. cmd.extend(item)
  942. else:
  943. cmd.append(item)
  944. if os.path.isfile(cmd[0]):
  945. _, output, _ = module.run_command(cmd)
  946. return output
  947. def get_docker_version_info():
  948. """ Parses and returns the docker version info """
  949. result = None
  950. if is_service_running('docker'):
  951. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  952. if 'Server' in version_info:
  953. result = {
  954. 'api_version': version_info['Server']['API version'],
  955. 'version': version_info['Server']['Version']
  956. }
  957. return result
  958. def get_openshift_version(facts):
  959. """ Get current version of openshift on the host
  960. Args:
  961. facts (dict): existing facts
  962. optional cli_image for pulling the version number
  963. Returns:
  964. version: the current openshift version
  965. """
  966. version = None
  967. # No need to run this method repeatedly on a system if we already know the
  968. # version
  969. if 'common' in facts:
  970. if 'version' in facts['common'] and facts['common']['version'] is not None:
  971. return facts['common']['version']
  972. if os.path.isfile('/usr/bin/openshift'):
  973. _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
  974. version = parse_openshift_version(output)
  975. # openshift_facts runs before openshift_docker_facts. However, it will be
  976. # called again and set properly throughout the playbook run. This could be
  977. # refactored to simply set the openshift.common.version in the
  978. # openshift_docker_facts role but it would take reworking some assumptions
  979. # on how get_openshift_version is called.
  980. if 'is_containerized' in facts['common'] and safe_get_bool(facts['common']['is_containerized']):
  981. if 'docker' in facts and 'openshift_version' in facts['docker']:
  982. version = facts['docker']['openshift_version']
  983. return version
  984. def parse_openshift_version(output):
  985. """ Apply provider facts to supplied facts dict
  986. Args:
  987. string: output of 'openshift version'
  988. Returns:
  989. string: the version number
  990. """
  991. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  992. return versions.get('openshift', '')
  993. def apply_provider_facts(facts, provider_facts):
  994. """ Apply provider facts to supplied facts dict
  995. Args:
  996. facts (dict): facts dict to update
  997. provider_facts (dict): provider facts to apply
  998. roles: host roles
  999. Returns:
  1000. dict: the merged facts
  1001. """
  1002. if not provider_facts:
  1003. return facts
  1004. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  1005. for h_var, ip_var in common_vars:
  1006. ip_value = provider_facts['network'].get(ip_var)
  1007. if ip_value:
  1008. facts['common'][ip_var] = ip_value
  1009. facts['common'][h_var] = choose_hostname(
  1010. [provider_facts['network'].get(h_var)],
  1011. facts['common'][ip_var]
  1012. )
  1013. facts['provider'] = provider_facts
  1014. return facts
  1015. # Disabling pylint too many branches. This function needs refactored
  1016. # but is a very core part of openshift_facts.
  1017. # pylint: disable=too-many-branches
  1018. def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
  1019. """ Recursively merge facts dicts
  1020. Args:
  1021. orig (dict): existing facts
  1022. new (dict): facts to update
  1023. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1024. '.' notation ex: ['master.named_certificates']
  1025. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1026. '.' notation ex: ['master.master_count']
  1027. Returns:
  1028. dict: the merged facts
  1029. """
  1030. additive_facts = ['named_certificates']
  1031. protected_facts = ['ha', 'master_count']
  1032. # Facts we do not ever want to merge. These originate in inventory variables
  1033. # and contain JSON dicts. We don't ever want to trigger a merge
  1034. # here, just completely overwrite with the new if they are present there.
  1035. inventory_json_facts = ['admission_plugin_config',
  1036. 'kube_admission_plugin_config',
  1037. 'image_policy_config']
  1038. facts = dict()
  1039. for key, value in orig.iteritems():
  1040. # Key exists in both old and new facts.
  1041. if key in new:
  1042. if key in inventory_json_facts:
  1043. # Watchout for JSON facts that sometimes load as strings.
  1044. # (can happen if the JSON contains a boolean)
  1045. if isinstance(new[key], basestring):
  1046. facts[key] = yaml.safe_load(new[key])
  1047. else:
  1048. facts[key] = copy.deepcopy(new[key])
  1049. # Continue to recurse if old and new fact is a dictionary.
  1050. elif isinstance(value, dict) and isinstance(new[key], dict):
  1051. # Collect the subset of additive facts to overwrite if
  1052. # key matches. These will be passed to the subsequent
  1053. # merge_facts call.
  1054. relevant_additive_facts = []
  1055. for item in additive_facts_to_overwrite:
  1056. if '.' in item and item.startswith(key + '.'):
  1057. relevant_additive_facts.append(item)
  1058. # Collect the subset of protected facts to overwrite
  1059. # if key matches. These will be passed to the
  1060. # subsequent merge_facts call.
  1061. relevant_protected_facts = []
  1062. for item in protected_facts_to_overwrite:
  1063. if '.' in item and item.startswith(key + '.'):
  1064. relevant_protected_facts.append(item)
  1065. facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
  1066. # Key matches an additive fact and we are not overwriting
  1067. # it so we will append the new value to the existing value.
  1068. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  1069. if isinstance(value, list) and isinstance(new[key], list):
  1070. new_fact = []
  1071. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  1072. if item not in new_fact:
  1073. new_fact.append(item)
  1074. facts[key] = new_fact
  1075. # Key matches a protected fact and we are not overwriting
  1076. # it so we will determine if it is okay to change this
  1077. # fact.
  1078. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
  1079. # The master count (int) can only increase unless it
  1080. # has been passed as a protected fact to overwrite.
  1081. if key == 'master_count':
  1082. if int(value) <= int(new[key]):
  1083. facts[key] = copy.deepcopy(new[key])
  1084. else:
  1085. module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
  1086. # ha (bool) can not change unless it has been passed
  1087. # as a protected fact to overwrite.
  1088. if key == 'ha':
  1089. if safe_get_bool(value) != safe_get_bool(new[key]):
  1090. module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
  1091. else:
  1092. facts[key] = value
  1093. # No other condition has been met. Overwrite the old fact
  1094. # with the new value.
  1095. else:
  1096. facts[key] = copy.deepcopy(new[key])
  1097. # Key isn't in new so add it to facts to keep it.
  1098. else:
  1099. facts[key] = copy.deepcopy(value)
  1100. new_keys = set(new.keys()) - set(orig.keys())
  1101. for key in new_keys:
  1102. # Watchout for JSON facts that sometimes load as strings.
  1103. # (can happen if the JSON contains a boolean)
  1104. if key in inventory_json_facts and isinstance(new[key], basestring):
  1105. facts[key] = yaml.safe_load(new[key])
  1106. else:
  1107. facts[key] = copy.deepcopy(new[key])
  1108. return facts
  1109. def save_local_facts(filename, facts):
  1110. """ Save local facts
  1111. Args:
  1112. filename (str): local facts file
  1113. facts (dict): facts to set
  1114. """
  1115. try:
  1116. fact_dir = os.path.dirname(filename)
  1117. if not os.path.exists(fact_dir):
  1118. os.makedirs(fact_dir)
  1119. with open(filename, 'w') as fact_file:
  1120. fact_file.write(module.jsonify(facts))
  1121. os.chmod(filename, 0o600)
  1122. except (IOError, OSError) as ex:
  1123. raise OpenShiftFactsFileWriteError(
  1124. "Could not create fact file: %s, error: %s" % (filename, ex)
  1125. )
  1126. def get_local_facts_from_file(filename):
  1127. """ Retrieve local facts from fact file
  1128. Args:
  1129. filename (str): local facts file
  1130. Returns:
  1131. dict: the retrieved facts
  1132. """
  1133. local_facts = dict()
  1134. try:
  1135. # Handle conversion of INI style facts file to json style
  1136. ini_facts = ConfigParser.SafeConfigParser()
  1137. ini_facts.read(filename)
  1138. for section in ini_facts.sections():
  1139. local_facts[section] = dict()
  1140. for key, value in ini_facts.items(section):
  1141. local_facts[section][key] = value
  1142. except (ConfigParser.MissingSectionHeaderError,
  1143. ConfigParser.ParsingError):
  1144. try:
  1145. with open(filename, 'r') as facts_file:
  1146. local_facts = json.load(facts_file)
  1147. except (ValueError, IOError):
  1148. pass
  1149. return local_facts
  1150. def sort_unique(alist):
  1151. """ Sorts and de-dupes a list
  1152. Args:
  1153. list: a list
  1154. Returns:
  1155. list: a sorted de-duped list
  1156. """
  1157. alist.sort()
  1158. out = list()
  1159. for i in alist:
  1160. if i not in out:
  1161. out.append(i)
  1162. return out
  1163. def safe_get_bool(fact):
  1164. """ Get a boolean fact safely.
  1165. Args:
  1166. facts: fact to convert
  1167. Returns:
  1168. bool: given fact as a bool
  1169. """
  1170. return bool(strtobool(str(fact)))
  1171. def set_proxy_facts(facts):
  1172. """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
  1173. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1174. 1. http_proxy, https_proxy, no_proxy
  1175. 2. builddefaults_*
  1176. 3. builddefaults_git_*
  1177. Args:
  1178. facts(dict): existing facts
  1179. Returns:
  1180. facts(dict): Updated facts with missing values
  1181. """
  1182. if 'common' in facts:
  1183. common = facts['common']
  1184. if 'http_proxy' in common or 'https_proxy' in common:
  1185. if 'no_proxy' in common and \
  1186. isinstance(common['no_proxy'], basestring):
  1187. common['no_proxy'] = common['no_proxy'].split(",")
  1188. elif 'no_proxy' not in common:
  1189. common['no_proxy'] = []
  1190. if 'generate_no_proxy_hosts' in common and \
  1191. safe_get_bool(common['generate_no_proxy_hosts']):
  1192. if 'no_proxy_internal_hostnames' in common:
  1193. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  1194. common['no_proxy'].append('.' + common['dns_domain'])
  1195. # We always add ourselves no matter what
  1196. common['no_proxy'].append(common['hostname'])
  1197. common['no_proxy'] = sort_unique(common['no_proxy'])
  1198. facts['common'] = common
  1199. if 'builddefaults' in facts:
  1200. builddefaults = facts['builddefaults']
  1201. common = facts['common']
  1202. # Copy values from common to builddefaults
  1203. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1204. builddefaults['http_proxy'] = common['http_proxy']
  1205. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1206. builddefaults['https_proxy'] = common['https_proxy']
  1207. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1208. builddefaults['no_proxy'] = common['no_proxy']
  1209. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1210. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1211. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1212. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1213. # If we're actually defining a proxy config then create kube_admission_plugin_config
  1214. # if it doesn't exist, then merge builddefaults[config] structure
  1215. # into kube_admission_plugin_config
  1216. if 'kube_admission_plugin_config' not in facts['master']:
  1217. facts['master']['kube_admission_plugin_config'] = dict()
  1218. if 'config' in builddefaults and ('http_proxy' in builddefaults or \
  1219. 'https_proxy' in builddefaults):
  1220. facts['master']['kube_admission_plugin_config'].update(builddefaults['config'])
  1221. facts['builddefaults'] = builddefaults
  1222. return facts
  1223. # pylint: disable=too-many-statements
  1224. def set_container_facts_if_unset(facts):
  1225. """ Set containerized facts.
  1226. Args:
  1227. facts (dict): existing facts
  1228. Returns:
  1229. dict: the facts dict updated with the generated containerization
  1230. facts
  1231. """
  1232. deployment_type = facts['common']['deployment_type']
  1233. if deployment_type in ['enterprise', 'openshift-enterprise']:
  1234. master_image = 'openshift3/ose'
  1235. cli_image = master_image
  1236. node_image = 'openshift3/node'
  1237. ovs_image = 'openshift3/openvswitch'
  1238. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1239. pod_image = 'openshift3/ose-pod'
  1240. router_image = 'openshift3/ose-haproxy-router'
  1241. registry_image = 'openshift3/ose-docker-registry'
  1242. deployer_image = 'openshift3/ose-deployer'
  1243. elif deployment_type == 'atomic-enterprise':
  1244. master_image = 'aep3_beta/aep'
  1245. cli_image = master_image
  1246. node_image = 'aep3_beta/node'
  1247. ovs_image = 'aep3_beta/openvswitch'
  1248. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1249. pod_image = 'aep3_beta/aep-pod'
  1250. router_image = 'aep3_beta/aep-haproxy-router'
  1251. registry_image = 'aep3_beta/aep-docker-registry'
  1252. deployer_image = 'aep3_beta/aep-deployer'
  1253. else:
  1254. master_image = 'openshift/origin'
  1255. cli_image = master_image
  1256. node_image = 'openshift/node'
  1257. ovs_image = 'openshift/openvswitch'
  1258. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1259. pod_image = 'openshift/origin-pod'
  1260. router_image = 'openshift/origin-haproxy-router'
  1261. registry_image = 'openshift/origin-docker-registry'
  1262. deployer_image = 'openshift/origin-deployer'
  1263. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1264. if 'is_containerized' not in facts['common']:
  1265. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1266. if 'cli_image' not in facts['common']:
  1267. facts['common']['cli_image'] = cli_image
  1268. if 'pod_image' not in facts['common']:
  1269. facts['common']['pod_image'] = pod_image
  1270. if 'router_image' not in facts['common']:
  1271. facts['common']['router_image'] = router_image
  1272. if 'registry_image' not in facts['common']:
  1273. facts['common']['registry_image'] = registry_image
  1274. if 'deployer_image' not in facts['common']:
  1275. facts['common']['deployer_image'] = deployer_image
  1276. if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
  1277. facts['etcd']['etcd_image'] = etcd_image
  1278. if 'master' in facts and 'master_image' not in facts['master']:
  1279. facts['master']['master_image'] = master_image
  1280. if 'node' in facts:
  1281. if 'node_image' not in facts['node']:
  1282. facts['node']['node_image'] = node_image
  1283. if 'ovs_image' not in facts['node']:
  1284. facts['node']['ovs_image'] = ovs_image
  1285. if safe_get_bool(facts['common']['is_containerized']):
  1286. facts['common']['admin_binary'] = '/usr/local/bin/oadm'
  1287. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1288. return facts
  1289. def set_installed_variant_rpm_facts(facts):
  1290. """ Set RPM facts of installed variant
  1291. Args:
  1292. facts (dict): existing facts
  1293. Returns:
  1294. dict: the facts dict updated with installed_variant_rpms
  1295. """
  1296. installed_rpms = []
  1297. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1298. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1299. variant_rpms = [base_rpm] + \
  1300. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1301. ['tuned-profiles-%s-node' % base_rpm]
  1302. for rpm in variant_rpms:
  1303. exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
  1304. if exit_code == 0:
  1305. installed_rpms.append(rpm)
  1306. facts['common']['installed_variant_rpms'] = installed_rpms
  1307. return facts
  1308. class OpenShiftFactsInternalError(Exception):
  1309. """Origin Facts Error"""
  1310. pass
  1311. class OpenShiftFactsUnsupportedRoleError(Exception):
  1312. """Origin Facts Unsupported Role Error"""
  1313. pass
  1314. class OpenShiftFactsFileWriteError(Exception):
  1315. """Origin Facts File Write Error"""
  1316. pass
  1317. class OpenShiftFactsMetadataUnavailableError(Exception):
  1318. """Origin Facts Metadata Unavailable Error"""
  1319. pass
  1320. class OpenShiftFacts(object):
  1321. """ Origin Facts
  1322. Attributes:
  1323. facts (dict): facts for the host
  1324. Args:
  1325. module (AnsibleModule): an AnsibleModule object
  1326. role (str): role for setting local facts
  1327. filename (str): local facts file to use
  1328. local_facts (dict): local facts to set
  1329. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1330. '.' notation ex: ['master.named_certificates']
  1331. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1332. '.' notation ex: ['master.master_count']
  1333. Raises:
  1334. OpenShiftFactsUnsupportedRoleError:
  1335. """
  1336. known_roles = ['builddefaults',
  1337. 'clock',
  1338. 'cloudprovider',
  1339. 'common',
  1340. 'docker',
  1341. 'etcd',
  1342. 'hosted',
  1343. 'loadbalancer',
  1344. 'master',
  1345. 'node']
  1346. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1347. # pylint: disable=too-many-arguments
  1348. def __init__(self, role, filename, local_facts,
  1349. additive_facts_to_overwrite=None,
  1350. openshift_env=None,
  1351. openshift_env_structures=None,
  1352. protected_facts_to_overwrite=None):
  1353. self.changed = False
  1354. self.filename = filename
  1355. if role not in self.known_roles:
  1356. raise OpenShiftFactsUnsupportedRoleError(
  1357. "Role %s is not supported by this module" % role
  1358. )
  1359. self.role = role
  1360. try:
  1361. # ansible-2.1
  1362. # pylint: disable=too-many-function-args
  1363. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
  1364. except TypeError:
  1365. # ansible-1.9.x,ansible-2.0.x
  1366. self.system_facts = ansible_facts(module)
  1367. self.facts = self.generate_facts(local_facts,
  1368. additive_facts_to_overwrite,
  1369. openshift_env,
  1370. openshift_env_structures,
  1371. protected_facts_to_overwrite)
  1372. def generate_facts(self,
  1373. local_facts,
  1374. additive_facts_to_overwrite,
  1375. openshift_env,
  1376. openshift_env_structures,
  1377. protected_facts_to_overwrite):
  1378. """ Generate facts
  1379. Args:
  1380. local_facts (dict): local_facts for overriding generated defaults
  1381. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1382. '.' notation ex: ['master.named_certificates']
  1383. openshift_env (dict): openshift_env facts for overriding generated defaults
  1384. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1385. '.' notation ex: ['master.master_count']
  1386. Returns:
  1387. dict: The generated facts
  1388. """
  1389. local_facts = self.init_local_facts(local_facts,
  1390. additive_facts_to_overwrite,
  1391. openshift_env,
  1392. openshift_env_structures,
  1393. protected_facts_to_overwrite)
  1394. roles = local_facts.keys()
  1395. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1396. deployment_type = local_facts['common']['deployment_type']
  1397. else:
  1398. deployment_type = 'origin'
  1399. defaults = self.get_defaults(roles, deployment_type)
  1400. provider_facts = self.init_provider_facts()
  1401. facts = apply_provider_facts(defaults, provider_facts)
  1402. facts = merge_facts(facts,
  1403. local_facts,
  1404. additive_facts_to_overwrite,
  1405. protected_facts_to_overwrite)
  1406. facts = migrate_oauth_template_facts(facts)
  1407. facts['current_config'] = get_current_config(facts)
  1408. facts = set_url_facts_if_unset(facts)
  1409. facts = set_project_cfg_facts_if_unset(facts)
  1410. facts = set_flannel_facts_if_unset(facts)
  1411. facts = set_nuage_facts_if_unset(facts)
  1412. facts = set_node_schedulability(facts)
  1413. facts = set_selectors(facts)
  1414. facts = set_metrics_facts_if_unset(facts)
  1415. facts = set_identity_providers_if_unset(facts)
  1416. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1417. facts = set_deployment_facts_if_unset(facts)
  1418. facts = set_container_facts_if_unset(facts)
  1419. facts = build_kubelet_args(facts)
  1420. facts = build_controller_args(facts)
  1421. facts = build_api_server_args(facts)
  1422. facts = set_version_facts_if_unset(facts)
  1423. facts = set_dnsmasq_facts_if_unset(facts)
  1424. facts = set_manageiq_facts_if_unset(facts)
  1425. facts = set_aggregate_facts(facts)
  1426. facts = set_etcd_facts_if_unset(facts)
  1427. facts = set_proxy_facts(facts)
  1428. if not safe_get_bool(facts['common']['is_containerized']):
  1429. facts = set_installed_variant_rpm_facts(facts)
  1430. return dict(openshift=facts)
  1431. def get_defaults(self, roles, deployment_type):
  1432. """ Get default fact values
  1433. Args:
  1434. roles (list): list of roles for this host
  1435. Returns:
  1436. dict: The generated default facts
  1437. """
  1438. defaults = {}
  1439. ip_addr = self.system_facts['default_ipv4']['address']
  1440. exit_code, output, _ = module.run_command(['hostname', '-f'])
  1441. hostname_f = output.strip() if exit_code == 0 else ''
  1442. hostname_values = [hostname_f, self.system_facts['nodename'],
  1443. self.system_facts['fqdn']]
  1444. hostname = choose_hostname(hostname_values, ip_addr)
  1445. defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
  1446. public_ip=ip_addr,
  1447. deployment_type=deployment_type,
  1448. hostname=hostname,
  1449. public_hostname=hostname,
  1450. portal_net='172.30.0.0/16',
  1451. client_binary='oc', admin_binary='oadm',
  1452. dns_domain='cluster.local',
  1453. install_examples=True,
  1454. debug_level=2)
  1455. if 'master' in roles:
  1456. scheduler_predicates = [
  1457. {"name": "MatchNodeSelector"},
  1458. {"name": "PodFitsResources"},
  1459. {"name": "PodFitsPorts"},
  1460. {"name": "NoDiskConflict"},
  1461. {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
  1462. ]
  1463. scheduler_priorities = [
  1464. {"name": "LeastRequestedPriority", "weight": 1},
  1465. {"name": "SelectorSpreadPriority", "weight": 1},
  1466. {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
  1467. ]
  1468. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1469. controllers_port='8444',
  1470. console_use_ssl=True,
  1471. console_path='/console',
  1472. console_port='8443', etcd_use_ssl=True,
  1473. etcd_hosts='', etcd_port='4001',
  1474. portal_net='172.30.0.0/16',
  1475. embedded_etcd=True, embedded_kube=True,
  1476. embedded_dns=True,
  1477. bind_addr='0.0.0.0',
  1478. session_max_seconds=3600,
  1479. session_name='ssn',
  1480. session_secrets_file='',
  1481. access_token_max_seconds=86400,
  1482. auth_token_max_seconds=500,
  1483. oauth_grant_method='auto',
  1484. scheduler_predicates=scheduler_predicates,
  1485. scheduler_priorities=scheduler_priorities,
  1486. dynamic_provisioning_enabled=True,
  1487. max_requests_inflight=500)
  1488. if 'node' in roles:
  1489. defaults['node'] = dict(labels={}, annotations={},
  1490. iptables_sync_period='5s',
  1491. local_quota_per_fsgroup="",
  1492. set_node_ip=False)
  1493. if 'docker' in roles:
  1494. docker = dict(disable_push_dockerhub=False,
  1495. hosted_registry_insecure=True,
  1496. options='--log-driver=json-file --log-opt max-size=50m')
  1497. version_info = get_docker_version_info()
  1498. if version_info is not None:
  1499. docker['api_version'] = version_info['api_version']
  1500. docker['version'] = version_info['version']
  1501. docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
  1502. defaults['docker'] = docker
  1503. if 'clock' in roles:
  1504. exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
  1505. if exit_code == 0:
  1506. chrony_installed = True
  1507. else:
  1508. chrony_installed = False
  1509. defaults['clock'] = dict(
  1510. enabled=True,
  1511. chrony_installed=chrony_installed)
  1512. if 'cloudprovider' in roles:
  1513. defaults['cloudprovider'] = dict(kind=None)
  1514. if 'hosted' in roles or self.role == 'hosted':
  1515. defaults['hosted'] = dict(
  1516. metrics=dict(
  1517. deploy=False,
  1518. duration=7,
  1519. resolution='10s',
  1520. storage=dict(
  1521. kind=None,
  1522. volume=dict(
  1523. name='metrics',
  1524. size='10Gi'
  1525. ),
  1526. nfs=dict(
  1527. directory='/exports',
  1528. options='*(rw,root_squash)'),
  1529. openstack=dict(
  1530. filesystem='ext4',
  1531. volumeID='123'),
  1532. host=None,
  1533. access_modes=['ReadWriteMany'],
  1534. create_pv=True
  1535. )
  1536. ),
  1537. registry=dict(
  1538. storage=dict(
  1539. kind=None,
  1540. volume=dict(
  1541. name='registry',
  1542. size='5Gi'
  1543. ),
  1544. nfs=dict(
  1545. directory='/exports',
  1546. options='*(rw,root_squash)'),
  1547. host=None,
  1548. access_modes=['ReadWriteMany'],
  1549. create_pv=True
  1550. )
  1551. ),
  1552. router=dict()
  1553. )
  1554. if 'loadbalancer' in roles:
  1555. loadbalancer = dict(frontend_port='8443',
  1556. default_maxconn='20000',
  1557. global_maxconn='20000',
  1558. limit_nofile='100000')
  1559. defaults['loadbalancer'] = loadbalancer
  1560. return defaults
  1561. def guess_host_provider(self):
  1562. """ Guess the host provider
  1563. Returns:
  1564. dict: The generated default facts for the detected provider
  1565. """
  1566. # TODO: cloud provider facts should probably be submitted upstream
  1567. product_name = self.system_facts['product_name']
  1568. product_version = self.system_facts['product_version']
  1569. virt_type = self.system_facts['virtualization_type']
  1570. virt_role = self.system_facts['virtualization_role']
  1571. provider = None
  1572. metadata = None
  1573. # TODO: this is not exposed through module_utils/facts.py in ansible,
  1574. # need to create PR for ansible to expose it
  1575. bios_vendor = get_file_content(
  1576. '/sys/devices/virtual/dmi/id/bios_vendor'
  1577. )
  1578. if bios_vendor == 'Google':
  1579. provider = 'gce'
  1580. metadata_url = ('http://metadata.google.internal/'
  1581. 'computeMetadata/v1/?recursive=true')
  1582. headers = {'Metadata-Flavor': 'Google'}
  1583. metadata = get_provider_metadata(metadata_url, True, headers,
  1584. True)
  1585. # Filter sshKeys and serviceAccounts from gce metadata
  1586. if metadata:
  1587. metadata['project']['attributes'].pop('sshKeys', None)
  1588. metadata['instance'].pop('serviceAccounts', None)
  1589. elif (virt_type == 'xen' and virt_role == 'guest'
  1590. and re.match(r'.*\.amazon$', product_version)):
  1591. provider = 'aws'
  1592. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1593. metadata = get_provider_metadata(metadata_url)
  1594. elif re.search(r'OpenStack', product_name):
  1595. provider = 'openstack'
  1596. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1597. 'meta_data.json')
  1598. metadata = get_provider_metadata(metadata_url, True, None,
  1599. True)
  1600. if metadata:
  1601. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1602. metadata['ec2_compat'] = get_provider_metadata(
  1603. ec2_compat_url
  1604. )
  1605. # disable pylint maybe-no-member because overloaded use of
  1606. # the module name causes pylint to not detect that results
  1607. # is an array or hash
  1608. # pylint: disable=maybe-no-member
  1609. # Filter public_keys and random_seed from openstack metadata
  1610. metadata.pop('public_keys', None)
  1611. metadata.pop('random_seed', None)
  1612. if not metadata['ec2_compat']:
  1613. metadata = None
  1614. return dict(name=provider, metadata=metadata)
  1615. def init_provider_facts(self):
  1616. """ Initialize the provider facts
  1617. Returns:
  1618. dict: The normalized provider facts
  1619. """
  1620. provider_info = self.guess_host_provider()
  1621. provider_facts = normalize_provider_facts(
  1622. provider_info.get('name'),
  1623. provider_info.get('metadata')
  1624. )
  1625. return provider_facts
  1626. @staticmethod
  1627. def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
  1628. """ Split openshift_env facts based on openshift_env structures.
  1629. Args:
  1630. openshift_env_fact (string): the openshift_env fact to split
  1631. ex: 'openshift_cloudprovider_openstack_auth_url'
  1632. openshift_env_structures (list): a list of structures to determine fact keys
  1633. ex: ['openshift.cloudprovider.openstack.*']
  1634. Returns:
  1635. list: a list of keys that represent the fact
  1636. ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
  1637. """
  1638. # By default, we'll split an openshift_env fact by underscores.
  1639. fact_keys = openshift_env_fact.split('_')
  1640. # Determine if any of the provided variable structures match the fact.
  1641. matching_structure = None
  1642. if openshift_env_structures != None:
  1643. for structure in openshift_env_structures:
  1644. if re.match(structure, openshift_env_fact):
  1645. matching_structure = structure
  1646. # Fact didn't match any variable structures so return the default fact keys.
  1647. if matching_structure is None:
  1648. return fact_keys
  1649. final_keys = []
  1650. structure_keys = matching_structure.split('.')
  1651. for structure_key in structure_keys:
  1652. # Matched current key. Add to final keys.
  1653. if structure_key == fact_keys[structure_keys.index(structure_key)]:
  1654. final_keys.append(structure_key)
  1655. # Wildcard means we will be taking everything from here to the end of the fact.
  1656. elif structure_key == '*':
  1657. final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
  1658. # Shouldn't have gotten here, return the fact keys.
  1659. else:
  1660. return fact_keys
  1661. return final_keys
  1662. # Disabling too-many-branches and too-many-locals.
  1663. # This should be cleaned up as a TODO item.
  1664. #pylint: disable=too-many-branches, too-many-locals
  1665. def init_local_facts(self, facts=None,
  1666. additive_facts_to_overwrite=None,
  1667. openshift_env=None,
  1668. openshift_env_structures=None,
  1669. protected_facts_to_overwrite=None):
  1670. """ Initialize the local facts
  1671. Args:
  1672. facts (dict): local facts to set
  1673. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1674. '.' notation ex: ['master.named_certificates']
  1675. openshift_env (dict): openshift env facts to set
  1676. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1677. '.' notation ex: ['master.master_count']
  1678. Returns:
  1679. dict: The result of merging the provided facts with existing
  1680. local facts
  1681. """
  1682. changed = False
  1683. facts_to_set = dict()
  1684. if facts is not None:
  1685. facts_to_set[self.role] = facts
  1686. if openshift_env != {} and openshift_env != None:
  1687. for fact, value in openshift_env.iteritems():
  1688. oo_env_facts = dict()
  1689. current_level = oo_env_facts
  1690. keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
  1691. if len(keys) > 0 and keys[0] != self.role:
  1692. continue
  1693. for key in keys:
  1694. if key == keys[-1]:
  1695. current_level[key] = value
  1696. elif key not in current_level:
  1697. current_level[key] = dict()
  1698. current_level = current_level[key]
  1699. facts_to_set = merge_facts(orig=facts_to_set,
  1700. new=oo_env_facts,
  1701. additive_facts_to_overwrite=[],
  1702. protected_facts_to_overwrite=[])
  1703. local_facts = get_local_facts_from_file(self.filename)
  1704. migrated_facts = migrate_local_facts(local_facts)
  1705. new_local_facts = merge_facts(migrated_facts,
  1706. facts_to_set,
  1707. additive_facts_to_overwrite,
  1708. protected_facts_to_overwrite)
  1709. if 'docker' in new_local_facts:
  1710. # remove duplicate and empty strings from registry lists
  1711. for cat in ['additional', 'blocked', 'insecure']:
  1712. key = '{0}_registries'.format(cat)
  1713. if key in new_local_facts['docker']:
  1714. val = new_local_facts['docker'][key]
  1715. if isinstance(val, basestring):
  1716. val = [x.strip() for x in val.split(',')]
  1717. new_local_facts['docker'][key] = list(set(val) - set(['']))
  1718. # Convert legacy log_options comma sep string to a list if present:
  1719. if 'log_options' in new_local_facts['docker'] and \
  1720. isinstance(new_local_facts['docker']['log_options'], basestring):
  1721. new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
  1722. new_local_facts = self.remove_empty_facts(new_local_facts)
  1723. if new_local_facts != local_facts:
  1724. self.validate_local_facts(new_local_facts)
  1725. changed = True
  1726. if not module.check_mode:
  1727. save_local_facts(self.filename, new_local_facts)
  1728. self.changed = changed
  1729. return new_local_facts
  1730. def remove_empty_facts(self, facts=None):
  1731. """ Remove empty facts
  1732. Args:
  1733. facts (dict): facts to clean
  1734. """
  1735. facts_to_remove = []
  1736. for fact, value in facts.iteritems():
  1737. if isinstance(facts[fact], dict):
  1738. facts[fact] = self.remove_empty_facts(facts[fact])
  1739. else:
  1740. if value == "" or value == [""] or value is None:
  1741. facts_to_remove.append(fact)
  1742. for fact in facts_to_remove:
  1743. del facts[fact]
  1744. return facts
  1745. def validate_local_facts(self, facts=None):
  1746. """ Validate local facts
  1747. Args:
  1748. facts (dict): local facts to validate
  1749. """
  1750. invalid_facts = dict()
  1751. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1752. if invalid_facts:
  1753. msg = 'Invalid facts detected:\n'
  1754. for key in invalid_facts.keys():
  1755. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1756. module.fail_json(msg=msg,
  1757. changed=self.changed)
  1758. # disabling pylint errors for line-too-long since we're dealing
  1759. # with best effort reduction of error messages here.
  1760. # disabling errors for too-many-branches since we require checking
  1761. # many conditions.
  1762. # pylint: disable=line-too-long, too-many-branches
  1763. @staticmethod
  1764. def validate_master_facts(facts, invalid_facts):
  1765. """ Validate master facts
  1766. Args:
  1767. facts (dict): local facts to validate
  1768. invalid_facts (dict): collected invalid_facts
  1769. Returns:
  1770. dict: Invalid facts
  1771. """
  1772. if 'master' in facts:
  1773. # openshift.master.session_auth_secrets
  1774. if 'session_auth_secrets' in facts['master']:
  1775. session_auth_secrets = facts['master']['session_auth_secrets']
  1776. if not issubclass(type(session_auth_secrets), list):
  1777. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  1778. elif 'session_encryption_secrets' not in facts['master']:
  1779. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  1780. 'if openshift_master_session_auth_secrets is provided.')
  1781. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  1782. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  1783. 'openshift_master_session_encryption_secrets must be '
  1784. 'equal length.')
  1785. else:
  1786. for secret in session_auth_secrets:
  1787. if len(secret) < 32:
  1788. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  1789. 'Secrets must be at least 32 characters in length.')
  1790. # openshift.master.session_encryption_secrets
  1791. if 'session_encryption_secrets' in facts['master']:
  1792. session_encryption_secrets = facts['master']['session_encryption_secrets']
  1793. if not issubclass(type(session_encryption_secrets), list):
  1794. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  1795. elif 'session_auth_secrets' not in facts['master']:
  1796. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  1797. 'set if openshift_master_session_encryption_secrets '
  1798. 'is provided.')
  1799. else:
  1800. for secret in session_encryption_secrets:
  1801. if len(secret) not in [16, 24, 32]:
  1802. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  1803. 'Secrets must be 16, 24, or 32 characters in length.')
  1804. return invalid_facts
  1805. def main():
  1806. """ main """
  1807. # disabling pylint errors for global-variable-undefined and invalid-name
  1808. # for 'global module' usage, since it is required to use ansible_facts
  1809. # pylint: disable=global-variable-undefined, invalid-name
  1810. global module
  1811. module = AnsibleModule(
  1812. argument_spec=dict(
  1813. role=dict(default='common', required=False,
  1814. choices=OpenShiftFacts.known_roles),
  1815. local_facts=dict(default=None, type='dict', required=False),
  1816. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  1817. openshift_env=dict(default={}, type='dict', required=False),
  1818. openshift_env_structures=dict(default=[], type='list', required=False),
  1819. protected_facts_to_overwrite=dict(default=[], type='list', required=False),
  1820. ),
  1821. supports_check_mode=True,
  1822. add_file_common_args=True,
  1823. )
  1824. role = module.params['role']
  1825. local_facts = module.params['local_facts']
  1826. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
  1827. openshift_env = module.params['openshift_env']
  1828. openshift_env_structures = module.params['openshift_env_structures']
  1829. protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
  1830. fact_file = '/etc/ansible/facts.d/openshift.fact'
  1831. openshift_facts = OpenShiftFacts(role,
  1832. fact_file,
  1833. local_facts,
  1834. additive_facts_to_overwrite,
  1835. openshift_env,
  1836. openshift_env_structures,
  1837. protected_facts_to_overwrite)
  1838. file_params = module.params.copy()
  1839. file_params['path'] = fact_file
  1840. file_args = module.load_file_common_arguments(file_params)
  1841. changed = module.set_fs_attributes_if_different(file_args,
  1842. openshift_facts.changed)
  1843. return module.exit_json(changed=changed,
  1844. ansible_facts=openshift_facts.facts)
  1845. # ignore pylint errors related to the module_utils import
  1846. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  1847. # import module snippets
  1848. from ansible.module_utils.basic import *
  1849. from ansible.module_utils.facts import *
  1850. from ansible.module_utils.urls import *
  1851. if __name__ == '__main__':
  1852. main()