openshift_facts.py 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  5. # Status: Permanently disabled to keep this module as self-contained as possible.
  6. """Ansible module for retrieving and setting openshift related facts"""
  7. # pylint: disable=no-name-in-module, import-error, wrong-import-order
  8. import copy
  9. import errno
  10. import json
  11. import re
  12. import os
  13. import yaml
  14. import struct
  15. import socket
  16. import ipaddress
  17. from distutils.util import strtobool
  18. from ansible.module_utils.six import text_type
  19. from ansible.module_utils.six import string_types
  20. from ansible.module_utils.six.moves import configparser
  21. # ignore pylint errors related to the module_utils import
  22. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  23. # import module snippets
  24. from ansible.module_utils.basic import * # noqa: F403
  25. from ansible.module_utils.facts import * # noqa: F403
  26. from ansible.module_utils.urls import * # noqa: F403
  27. from ansible.module_utils.six import iteritems, itervalues
  28. from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
  29. from ansible.module_utils._text import to_native
  30. HAVE_DBUS = False
  31. try:
  32. from dbus import SystemBus, Interface
  33. from dbus.exceptions import DBusException
  34. HAVE_DBUS = True
  35. except ImportError:
  36. pass
  37. DOCUMENTATION = '''
  38. ---
  39. module: openshift_facts
  40. short_description: Cluster Facts
  41. author: Jason DeTiberus
  42. requirements: [ ]
  43. '''
  44. EXAMPLES = '''
  45. '''
  46. # TODO: We should add a generic migration function that takes source and destination
  47. # paths and does the right thing rather than one function for common, one for node, etc.
  48. def migrate_common_facts(facts):
  49. """ Migrate facts from various roles into common """
  50. params = {
  51. 'node': ('portal_net'),
  52. 'master': ('portal_net')
  53. }
  54. if 'common' not in facts:
  55. facts['common'] = {}
  56. # pylint: disable=consider-iterating-dictionary
  57. for role in params.keys():
  58. if role in facts:
  59. for param in params[role]:
  60. if param in facts[role]:
  61. facts['common'][param] = facts[role].pop(param)
  62. return facts
  63. def migrate_admission_plugin_facts(facts):
  64. """ Apply migrations for admission plugin facts """
  65. if 'master' in facts:
  66. if 'kube_admission_plugin_config' in facts['master']:
  67. if 'admission_plugin_config' not in facts['master']:
  68. facts['master']['admission_plugin_config'] = dict()
  69. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  70. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  71. facts['master']['kube_admission_plugin_config'],
  72. additive_facts_to_overwrite=[])
  73. # Remove kube_admission_plugin_config fact
  74. facts['master'].pop('kube_admission_plugin_config', None)
  75. return facts
  76. def migrate_local_facts(facts):
  77. """ Apply migrations of local facts """
  78. migrated_facts = copy.deepcopy(facts)
  79. migrated_facts = migrate_common_facts(migrated_facts)
  80. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  81. return migrated_facts
  82. def first_ip(network):
  83. """ Return the first IPv4 address in network
  84. Args:
  85. network (str): network in CIDR format
  86. Returns:
  87. str: first IPv4 address
  88. """
  89. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
  90. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
  91. (address, netmask) = network.split('/')
  92. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  93. return itoa((atoi(address) & netmask_i) + 1)
  94. def hostname_valid(hostname):
  95. """ Test if specified hostname should be considered valid
  96. Args:
  97. hostname (str): hostname to test
  98. Returns:
  99. bool: True if valid, otherwise False
  100. """
  101. if (not hostname or
  102. hostname.startswith('localhost') or
  103. hostname.endswith('localdomain') or
  104. # OpenShift will not allow a node with more than 63 chars in name.
  105. len(hostname) > 63):
  106. return False
  107. return True
  108. def choose_hostname(hostnames=None, fallback=''):
  109. """ Choose a hostname from the provided hostnames
  110. Given a list of hostnames and a fallback value, choose a hostname to
  111. use. This function will prefer fqdns if they exist (excluding any that
  112. begin with localhost or end with localdomain) over ip addresses.
  113. Args:
  114. hostnames (list): list of hostnames
  115. fallback (str): default value to set if hostnames does not contain
  116. a valid hostname
  117. Returns:
  118. str: chosen hostname
  119. """
  120. hostname = fallback
  121. if hostnames is None:
  122. return hostname
  123. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  124. ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
  125. hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
  126. for host_list in (hosts, ips):
  127. for host in host_list:
  128. if hostname_valid(host):
  129. return host
  130. return hostname
  131. def query_metadata(metadata_url, headers=None, expect_json=False):
  132. """ Return metadata from the provided metadata_url
  133. Args:
  134. metadata_url (str): metadata url
  135. headers (dict): headers to set for metadata request
  136. expect_json (bool): does the metadata_url return json
  137. Returns:
  138. dict or list: metadata request result
  139. """
  140. result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
  141. if info['status'] != 200:
  142. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  143. if expect_json:
  144. return module.from_json(to_native(result.read())) # noqa: F405
  145. else:
  146. return [to_native(line.strip()) for line in result.readlines()]
  147. def walk_metadata(metadata_url, headers=None, expect_json=False):
  148. """ Walk the metadata tree and return a dictionary of the entire tree
  149. Args:
  150. metadata_url (str): metadata url
  151. headers (dict): headers to set for metadata request
  152. expect_json (bool): does the metadata_url return json
  153. Returns:
  154. dict: the result of walking the metadata tree
  155. """
  156. metadata = dict()
  157. for line in query_metadata(metadata_url, headers, expect_json):
  158. if line.endswith('/') and not line == 'public-keys/':
  159. key = line[:-1]
  160. metadata[key] = walk_metadata(metadata_url + line,
  161. headers, expect_json)
  162. else:
  163. results = query_metadata(metadata_url + line, headers,
  164. expect_json)
  165. if len(results) == 1:
  166. # disable pylint maybe-no-member because overloaded use of
  167. # the module name causes pylint to not detect that results
  168. # is an array or hash
  169. # pylint: disable=maybe-no-member
  170. metadata[line] = results.pop()
  171. else:
  172. metadata[line] = results
  173. return metadata
  174. def get_provider_metadata(metadata_url, supports_recursive=False,
  175. headers=None, expect_json=False):
  176. """ Retrieve the provider metadata
  177. Args:
  178. metadata_url (str): metadata url
  179. supports_recursive (bool): does the provider metadata api support
  180. recursion
  181. headers (dict): headers to set for metadata request
  182. expect_json (bool): does the metadata_url return json
  183. Returns:
  184. dict: the provider metadata
  185. """
  186. try:
  187. if supports_recursive:
  188. metadata = query_metadata(metadata_url, headers,
  189. expect_json)
  190. else:
  191. metadata = walk_metadata(metadata_url, headers,
  192. expect_json)
  193. except OpenShiftFactsMetadataUnavailableError:
  194. metadata = None
  195. return metadata
  196. def normalize_gce_facts(metadata, facts):
  197. """ Normalize gce facts
  198. Args:
  199. metadata (dict): provider metadata
  200. facts (dict): facts to update
  201. Returns:
  202. dict: the result of adding the normalized metadata to the provided
  203. facts dict
  204. """
  205. for interface in metadata['instance']['networkInterfaces']:
  206. int_info = dict(ips=[interface['ip']], network_type='gce')
  207. int_info['public_ips'] = [ac['externalIp'] for ac
  208. in interface['accessConfigs']]
  209. int_info['public_ips'].extend(interface['forwardedIps'])
  210. _, _, network_id = interface['network'].rpartition('/')
  211. int_info['network_id'] = network_id
  212. facts['network']['interfaces'].append(int_info)
  213. _, _, zone = metadata['instance']['zone'].rpartition('/')
  214. facts['zone'] = zone
  215. # GCE currently only supports a single interface
  216. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  217. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  218. facts['network']['public_ip'] = pub_ip
  219. # Split instance hostname from GCE metadata to use the short instance name
  220. facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
  221. # TODO: attempt to resolve public_hostname
  222. facts['network']['public_hostname'] = facts['network']['public_ip']
  223. return facts
  224. def normalize_aws_facts(metadata, facts):
  225. """ Normalize aws facts
  226. Args:
  227. metadata (dict): provider metadata
  228. facts (dict): facts to update
  229. Returns:
  230. dict: the result of adding the normalized metadata to the provided
  231. facts dict
  232. """
  233. for interface in sorted(
  234. metadata['network']['interfaces']['macs'].values(),
  235. key=lambda x: x['device-number']
  236. ):
  237. int_info = dict()
  238. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  239. for ips_var, int_var in iteritems(var_map):
  240. ips = interface.get(int_var)
  241. if isinstance(ips, string_types):
  242. int_info[ips_var] = [ips]
  243. else:
  244. int_info[ips_var] = ips
  245. if 'vpc-id' in interface:
  246. int_info['network_type'] = 'vpc'
  247. else:
  248. int_info['network_type'] = 'classic'
  249. if int_info['network_type'] == 'vpc':
  250. int_info['network_id'] = interface['subnet-id']
  251. else:
  252. int_info['network_id'] = None
  253. facts['network']['interfaces'].append(int_info)
  254. facts['zone'] = metadata['placement']['availability-zone']
  255. # TODO: actually attempt to determine default local and public ips
  256. # by using the ansible default ip fact and the ipv4-associations
  257. # from the ec2 metadata
  258. facts['network']['ip'] = metadata.get('local-ipv4')
  259. facts['network']['public_ip'] = metadata.get('public-ipv4')
  260. # TODO: verify that local hostname makes sense and is resolvable
  261. facts['network']['hostname'] = metadata.get('local-hostname')
  262. # TODO: verify that public hostname makes sense and is resolvable
  263. facts['network']['public_hostname'] = metadata.get('public-hostname')
  264. return facts
  265. def normalize_openstack_facts(metadata, facts):
  266. """ Normalize openstack facts
  267. Args:
  268. metadata (dict): provider metadata
  269. facts (dict): facts to update
  270. Returns:
  271. dict: the result of adding the normalized metadata to the provided
  272. facts dict
  273. """
  274. # openstack ec2 compat api does not support network interfaces and
  275. # the version tested on did not include the info in the openstack
  276. # metadata api, should be updated if neutron exposes this.
  277. facts['zone'] = metadata['availability_zone']
  278. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  279. facts['network']['ip'] = local_ipv4
  280. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  281. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  282. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  283. try:
  284. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  285. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  286. else:
  287. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  288. except socket.gaierror:
  289. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  290. return facts
  291. def normalize_provider_facts(provider, metadata):
  292. """ Normalize provider facts
  293. Args:
  294. provider (str): host provider
  295. metadata (dict): provider metadata
  296. Returns:
  297. dict: the normalized provider facts
  298. """
  299. if provider is None or metadata is None:
  300. return {}
  301. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  302. # and configure ipv6 facts if available
  303. # TODO: add support for setting user_data if available
  304. facts = dict(name=provider, metadata=metadata,
  305. network=dict(interfaces=[], ipv6_enabled=False))
  306. if provider == 'gce':
  307. facts = normalize_gce_facts(metadata, facts)
  308. elif provider == 'aws':
  309. facts = normalize_aws_facts(metadata, facts)
  310. elif provider == 'openstack':
  311. facts = normalize_openstack_facts(metadata, facts)
  312. return facts
  313. def set_identity_providers_if_unset(facts):
  314. """ Set identity_providers fact if not already present in facts dict
  315. Args:
  316. facts (dict): existing facts
  317. Returns:
  318. dict: the facts dict updated with the generated identity providers
  319. facts if they were not already present
  320. """
  321. if 'master' in facts:
  322. deployment_type = facts['common']['deployment_type']
  323. if 'identity_providers' not in facts['master']:
  324. identity_provider = dict(
  325. name='allow_all', challenge=True, login=True,
  326. kind='AllowAllPasswordIdentityProvider'
  327. )
  328. if deployment_type == 'openshift-enterprise':
  329. identity_provider = dict(
  330. name='deny_all', challenge=True, login=True,
  331. kind='DenyAllPasswordIdentityProvider'
  332. )
  333. facts['master']['identity_providers'] = [identity_provider]
  334. return facts
  335. def set_url_facts_if_unset(facts):
  336. """ Set url facts if not already present in facts dict
  337. Args:
  338. facts (dict): existing facts
  339. Returns:
  340. dict: the facts dict updated with the generated url facts if they
  341. were not already present
  342. """
  343. if 'master' in facts:
  344. hostname = facts['common']['hostname']
  345. cluster_hostname = facts['master'].get('cluster_hostname')
  346. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  347. public_hostname = facts['common']['public_hostname']
  348. api_hostname = cluster_hostname if cluster_hostname else hostname
  349. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  350. console_path = facts['master']['console_path']
  351. etcd_hosts = facts['master']['etcd_hosts']
  352. use_ssl = dict(
  353. api=facts['master']['api_use_ssl'],
  354. public_api=facts['master']['api_use_ssl'],
  355. loopback_api=facts['master']['api_use_ssl'],
  356. console=facts['master']['console_use_ssl'],
  357. public_console=facts['master']['console_use_ssl'],
  358. etcd=facts['master']['etcd_use_ssl']
  359. )
  360. ports = dict(
  361. api=facts['master']['api_port'],
  362. public_api=facts['master']['api_port'],
  363. loopback_api=facts['master']['api_port'],
  364. console=facts['master']['console_port'],
  365. public_console=facts['master']['console_port'],
  366. etcd=facts['master']['etcd_port'],
  367. )
  368. etcd_urls = []
  369. if etcd_hosts != '':
  370. facts['master']['etcd_port'] = ports['etcd']
  371. for host in etcd_hosts:
  372. etcd_urls.append(format_url(use_ssl['etcd'], host,
  373. ports['etcd']))
  374. else:
  375. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  376. ports['etcd'])]
  377. facts['master'].setdefault('etcd_urls', etcd_urls)
  378. prefix_hosts = [('api', api_hostname),
  379. ('public_api', api_public_hostname),
  380. ('loopback_api', hostname)]
  381. for prefix, host in prefix_hosts:
  382. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  383. host,
  384. ports[prefix]))
  385. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  386. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  387. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  388. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  389. facts['master'].setdefault('loopback_user', r_lhu)
  390. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  391. for prefix, host in prefix_hosts:
  392. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  393. host,
  394. ports[prefix],
  395. console_path))
  396. return facts
  397. def set_aggregate_facts(facts):
  398. """ Set aggregate facts
  399. Args:
  400. facts (dict): existing facts
  401. Returns:
  402. dict: the facts dict updated with aggregated facts
  403. """
  404. all_hostnames = set()
  405. internal_hostnames = set()
  406. kube_svc_ip = first_ip(facts['common']['portal_net'])
  407. if 'common' in facts:
  408. all_hostnames.add(facts['common']['hostname'])
  409. all_hostnames.add(facts['common']['public_hostname'])
  410. all_hostnames.add(facts['common']['ip'])
  411. all_hostnames.add(facts['common']['public_ip'])
  412. facts['common']['kube_svc_ip'] = kube_svc_ip
  413. internal_hostnames.add(facts['common']['hostname'])
  414. internal_hostnames.add(facts['common']['ip'])
  415. cluster_domain = facts['common']['dns_domain']
  416. if 'master' in facts:
  417. if 'cluster_hostname' in facts['master']:
  418. all_hostnames.add(facts['master']['cluster_hostname'])
  419. if 'cluster_public_hostname' in facts['master']:
  420. all_hostnames.add(facts['master']['cluster_public_hostname'])
  421. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  422. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  423. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  424. all_hostnames.update(svc_names)
  425. internal_hostnames.update(svc_names)
  426. all_hostnames.add(kube_svc_ip)
  427. internal_hostnames.add(kube_svc_ip)
  428. facts['common']['all_hostnames'] = list(all_hostnames)
  429. facts['common']['internal_hostnames'] = list(internal_hostnames)
  430. return facts
  431. def set_deployment_facts_if_unset(facts):
  432. """ Set Facts that vary based on deployment_type. This currently
  433. includes master.registry_url
  434. Args:
  435. facts (dict): existing facts
  436. Returns:
  437. dict: the facts dict updated with the generated deployment_type
  438. facts
  439. """
  440. if 'master' in facts:
  441. deployment_type = facts['common']['deployment_type']
  442. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  443. if 'disabled_features' not in facts['master']:
  444. if facts['common']['deployment_subtype'] == 'registry':
  445. facts['master']['disabled_features'] = openshift_features
  446. if 'registry_url' not in facts['master']:
  447. registry_url = 'openshift/origin-${component}:${version}'
  448. if deployment_type == 'openshift-enterprise':
  449. registry_url = 'openshift3/ose-${component}:${version}'
  450. facts['master']['registry_url'] = registry_url
  451. return facts
  452. def set_sdn_facts_if_unset(facts, system_facts):
  453. """ Set sdn facts if not already present in facts dict
  454. Args:
  455. facts (dict): existing facts
  456. system_facts (dict): ansible_facts
  457. Returns:
  458. dict: the facts dict updated with the generated sdn facts if they
  459. were not already present
  460. """
  461. if 'master' in facts:
  462. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  463. # these might be overridden if they exist in the master config file
  464. sdn_cluster_network_cidr = '10.128.0.0/14'
  465. sdn_host_subnet_length = '9'
  466. master_cfg_path = os.path.join(facts['common']['config_base'],
  467. 'master/master-config.yaml')
  468. if os.path.isfile(master_cfg_path):
  469. with open(master_cfg_path, 'r') as master_cfg_f:
  470. config = yaml.safe_load(master_cfg_f.read())
  471. if 'networkConfig' in config:
  472. if 'clusterNetworkCIDR' in config['networkConfig']:
  473. sdn_cluster_network_cidr = \
  474. config['networkConfig']['clusterNetworkCIDR']
  475. if 'hostSubnetLength' in config['networkConfig']:
  476. sdn_host_subnet_length = \
  477. config['networkConfig']['hostSubnetLength']
  478. if 'sdn_cluster_network_cidr' not in facts['master']:
  479. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  480. if 'sdn_host_subnet_length' not in facts['master']:
  481. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  482. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  483. node_ip = facts['common']['ip']
  484. # default MTU if interface MTU cannot be detected
  485. facts['node']['sdn_mtu'] = '1450'
  486. for val in itervalues(system_facts):
  487. if isinstance(val, dict) and 'mtu' in val:
  488. mtu = val['mtu']
  489. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  490. facts['node']['sdn_mtu'] = str(mtu - 50)
  491. return facts
  492. def set_nodename(facts):
  493. """ set nodename """
  494. if 'node' in facts and 'common' in facts:
  495. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
  496. facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
  497. # TODO: The openstack cloudprovider nodename setting was too opinionaed.
  498. # It needs to be generalized before it can be enabled again.
  499. # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  500. # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  501. else:
  502. facts['node']['nodename'] = facts['common']['hostname'].lower()
  503. return facts
  504. def format_url(use_ssl, hostname, port, path=''):
  505. """ Format url based on ssl flag, hostname, port and path
  506. Args:
  507. use_ssl (bool): is ssl enabled
  508. hostname (str): hostname
  509. port (str): port
  510. path (str): url path
  511. Returns:
  512. str: The generated url string
  513. """
  514. scheme = 'https' if use_ssl else 'http'
  515. netloc = hostname
  516. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  517. netloc += ":%s" % port
  518. try:
  519. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  520. except AttributeError:
  521. # pylint: disable=undefined-variable
  522. url = urlunparse((scheme, netloc, path, '', '', ''))
  523. return url
  524. def get_current_config(facts):
  525. """ Get current openshift config
  526. Args:
  527. facts (dict): existing facts
  528. Returns:
  529. dict: the facts dict updated with the current openshift config
  530. """
  531. current_config = dict()
  532. roles = [role for role in facts if role not in ['common', 'provider']]
  533. for role in roles:
  534. if 'roles' in current_config:
  535. current_config['roles'].append(role)
  536. else:
  537. current_config['roles'] = [role]
  538. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  539. # determine the location of files.
  540. # TODO: I suspect this isn't working right now, but it doesn't prevent
  541. # anything from working properly as far as I can tell, perhaps because
  542. # we override the kubeconfig path everywhere we use it?
  543. # Query kubeconfig settings
  544. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  545. if role == 'node':
  546. kubeconfig_dir = os.path.join(
  547. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  548. )
  549. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  550. if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
  551. try:
  552. _, output, _ = module.run_command( # noqa: F405
  553. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  554. "json", "--kubeconfig=%s" % kubeconfig_path],
  555. check_rc=False
  556. )
  557. config = json.loads(output)
  558. cad = 'certificate-authority-data'
  559. try:
  560. for cluster in config['clusters']:
  561. config['clusters'][cluster][cad] = 'masked'
  562. except KeyError:
  563. pass
  564. try:
  565. for user in config['users']:
  566. config['users'][user][cad] = 'masked'
  567. config['users'][user]['client-key-data'] = 'masked'
  568. except KeyError:
  569. pass
  570. current_config['kubeconfig'] = config
  571. # override pylint broad-except warning, since we do not want
  572. # to bubble up any exceptions if oc config view
  573. # fails
  574. # pylint: disable=broad-except
  575. except Exception:
  576. pass
  577. return current_config
  578. def build_controller_args(facts):
  579. """ Build master controller_args """
  580. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  581. 'cloudprovider')
  582. if 'master' in facts:
  583. controller_args = {}
  584. if 'cloudprovider' in facts:
  585. if 'kind' in facts['cloudprovider']:
  586. if facts['cloudprovider']['kind'] == 'aws':
  587. controller_args['cloud-provider'] = ['aws']
  588. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  589. if facts['cloudprovider']['kind'] == 'openstack':
  590. controller_args['cloud-provider'] = ['openstack']
  591. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  592. if facts['cloudprovider']['kind'] == 'gce':
  593. controller_args['cloud-provider'] = ['gce']
  594. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  595. if controller_args != {}:
  596. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
  597. return facts
  598. def build_api_server_args(facts):
  599. """ Build master api_server_args """
  600. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  601. 'cloudprovider')
  602. if 'master' in facts:
  603. api_server_args = {}
  604. if 'cloudprovider' in facts:
  605. if 'kind' in facts['cloudprovider']:
  606. if facts['cloudprovider']['kind'] == 'aws':
  607. api_server_args['cloud-provider'] = ['aws']
  608. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  609. if facts['cloudprovider']['kind'] == 'openstack':
  610. api_server_args['cloud-provider'] = ['openstack']
  611. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  612. if facts['cloudprovider']['kind'] == 'gce':
  613. api_server_args['cloud-provider'] = ['gce']
  614. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  615. if api_server_args != {}:
  616. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
  617. return facts
  618. def is_service_running(service):
  619. """ Queries systemd through dbus to see if the service is running """
  620. service_running = False
  621. try:
  622. bus = SystemBus()
  623. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  624. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  625. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  626. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  627. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  628. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  629. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  630. if service_load_state == 'loaded' and service_active_state == 'active':
  631. service_running = True
  632. except DBusException:
  633. # TODO: do not swallow exception, as it may be hiding useful debugging
  634. # information.
  635. pass
  636. return service_running
  637. def rpm_rebuilddb():
  638. """
  639. Runs rpm --rebuilddb to ensure the db is in good shape.
  640. """
  641. module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
  642. def get_version_output(binary, version_cmd):
  643. """ runs and returns the version output for a command """
  644. cmd = []
  645. for item in (binary, version_cmd):
  646. if isinstance(item, list):
  647. cmd.extend(item)
  648. else:
  649. cmd.append(item)
  650. if os.path.isfile(cmd[0]):
  651. _, output, _ = module.run_command(cmd) # noqa: F405
  652. return output
  653. # We may need this in the future.
  654. def get_docker_version_info():
  655. """ Parses and returns the docker version info """
  656. result = None
  657. if is_service_running('docker') or is_service_running('container-engine'):
  658. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  659. if 'Server' in version_info:
  660. result = {
  661. 'api_version': version_info['Server']['API version'],
  662. 'version': version_info['Server']['Version']
  663. }
  664. return result
  665. def apply_provider_facts(facts, provider_facts):
  666. """ Apply provider facts to supplied facts dict
  667. Args:
  668. facts (dict): facts dict to update
  669. provider_facts (dict): provider facts to apply
  670. roles: host roles
  671. Returns:
  672. dict: the merged facts
  673. """
  674. if not provider_facts:
  675. return facts
  676. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  677. for h_var, ip_var in common_vars:
  678. ip_value = provider_facts['network'].get(ip_var)
  679. if ip_value:
  680. facts['common'][ip_var] = ip_value
  681. facts['common'][h_var] = choose_hostname(
  682. [provider_facts['network'].get(h_var)],
  683. facts['common'][h_var]
  684. )
  685. facts['provider'] = provider_facts
  686. return facts
  687. # Disabling pylint too many branches. This function needs refactored
  688. # but is a very core part of openshift_facts.
  689. # pylint: disable=too-many-branches, too-many-nested-blocks
  690. def merge_facts(orig, new, additive_facts_to_overwrite):
  691. """ Recursively merge facts dicts
  692. Args:
  693. orig (dict): existing facts
  694. new (dict): facts to update
  695. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  696. '.' notation ex: ['master.named_certificates']
  697. Returns:
  698. dict: the merged facts
  699. """
  700. additive_facts = ['named_certificates']
  701. # Facts we do not ever want to merge. These originate in inventory variables
  702. # and contain JSON dicts. We don't ever want to trigger a merge
  703. # here, just completely overwrite with the new if they are present there.
  704. inventory_json_facts = ['admission_plugin_config',
  705. 'kube_admission_plugin_config',
  706. 'image_policy_config',
  707. "builddefaults",
  708. "buildoverrides"]
  709. facts = dict()
  710. for key, value in iteritems(orig):
  711. # Key exists in both old and new facts.
  712. if key in new:
  713. if key in inventory_json_facts:
  714. # Watchout for JSON facts that sometimes load as strings.
  715. # (can happen if the JSON contains a boolean)
  716. if isinstance(new[key], string_types):
  717. facts[key] = yaml.safe_load(new[key])
  718. else:
  719. facts[key] = copy.deepcopy(new[key])
  720. # Continue to recurse if old and new fact is a dictionary.
  721. elif isinstance(value, dict) and isinstance(new[key], dict):
  722. # Collect the subset of additive facts to overwrite if
  723. # key matches. These will be passed to the subsequent
  724. # merge_facts call.
  725. relevant_additive_facts = []
  726. for item in additive_facts_to_overwrite:
  727. if '.' in item and item.startswith(key + '.'):
  728. relevant_additive_facts.append(item)
  729. facts[key] = merge_facts(value, new[key], relevant_additive_facts)
  730. # Key matches an additive fact and we are not overwriting
  731. # it so we will append the new value to the existing value.
  732. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  733. if isinstance(value, list) and isinstance(new[key], list):
  734. new_fact = []
  735. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  736. if item not in new_fact:
  737. new_fact.append(item)
  738. facts[key] = new_fact
  739. # No other condition has been met. Overwrite the old fact
  740. # with the new value.
  741. else:
  742. facts[key] = copy.deepcopy(new[key])
  743. # Key isn't in new so add it to facts to keep it.
  744. else:
  745. facts[key] = copy.deepcopy(value)
  746. new_keys = set(new.keys()) - set(orig.keys())
  747. for key in new_keys:
  748. # Watchout for JSON facts that sometimes load as strings.
  749. # (can happen if the JSON contains a boolean)
  750. if key in inventory_json_facts and isinstance(new[key], string_types):
  751. facts[key] = yaml.safe_load(new[key])
  752. else:
  753. facts[key] = copy.deepcopy(new[key])
  754. return facts
  755. def save_local_facts(filename, facts):
  756. """ Save local facts
  757. Args:
  758. filename (str): local facts file
  759. facts (dict): facts to set
  760. """
  761. try:
  762. fact_dir = os.path.dirname(filename)
  763. try:
  764. os.makedirs(fact_dir) # try to make the directory
  765. except OSError as exception:
  766. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  767. raise # pass any other exceptions up the chain
  768. with open(filename, 'w') as fact_file:
  769. fact_file.write(module.jsonify(facts)) # noqa: F405
  770. os.chmod(filename, 0o600)
  771. except (IOError, OSError) as ex:
  772. raise OpenShiftFactsFileWriteError(
  773. "Could not create fact file: %s, error: %s" % (filename, ex)
  774. )
  775. def get_local_facts_from_file(filename):
  776. """ Retrieve local facts from fact file
  777. Args:
  778. filename (str): local facts file
  779. Returns:
  780. dict: the retrieved facts
  781. """
  782. local_facts = dict()
  783. try:
  784. # Handle conversion of INI style facts file to json style
  785. ini_facts = configparser.SafeConfigParser()
  786. ini_facts.read(filename)
  787. for section in ini_facts.sections():
  788. local_facts[section] = dict()
  789. for key, value in ini_facts.items(section):
  790. local_facts[section][key] = value
  791. except (configparser.MissingSectionHeaderError,
  792. configparser.ParsingError):
  793. try:
  794. with open(filename, 'r') as facts_file:
  795. local_facts = json.load(facts_file)
  796. except (ValueError, IOError):
  797. pass
  798. return local_facts
  799. def sort_unique(alist):
  800. """ Sorts and de-dupes a list
  801. Args:
  802. list: a list
  803. Returns:
  804. list: a sorted de-duped list
  805. """
  806. return sorted(list(set(alist)))
  807. def safe_get_bool(fact):
  808. """ Get a boolean fact safely.
  809. Args:
  810. facts: fact to convert
  811. Returns:
  812. bool: given fact as a bool
  813. """
  814. return bool(strtobool(str(fact)))
  815. def set_proxy_facts(facts):
  816. """ Set global proxy facts
  817. Args:
  818. facts(dict): existing facts
  819. Returns:
  820. facts(dict): Updated facts with missing values
  821. """
  822. if 'common' in facts:
  823. common = facts['common']
  824. if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
  825. if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
  826. common['no_proxy'] = common['no_proxy'].split(",")
  827. elif 'no_proxy' not in common:
  828. common['no_proxy'] = []
  829. # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
  830. # masters behind a proxy need to connect to etcd via IP
  831. if 'no_proxy_etcd_host_ips' in common:
  832. if isinstance(common['no_proxy_etcd_host_ips'], string_types):
  833. common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
  834. if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
  835. if 'no_proxy_internal_hostnames' in common:
  836. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  837. # We always add local dns domain and ourselves no matter what
  838. kube_svc_ip = str(ipaddress.ip_network(text_type(common['portal_net']))[1])
  839. common['no_proxy'].append(kube_svc_ip)
  840. common['no_proxy'].append('.' + common['dns_domain'])
  841. common['no_proxy'].append('.svc')
  842. common['no_proxy'].append(common['hostname'])
  843. common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
  844. facts['common'] = common
  845. return facts
  846. def set_builddefaults_facts(facts):
  847. """ Set build defaults including setting proxy values from http_proxy, https_proxy,
  848. no_proxy to the more specific builddefaults and builddefaults_git vars.
  849. 1. http_proxy, https_proxy, no_proxy
  850. 2. builddefaults_*
  851. 3. builddefaults_git_*
  852. Args:
  853. facts(dict): existing facts
  854. Returns:
  855. facts(dict): Updated facts with missing values
  856. """
  857. if 'builddefaults' in facts:
  858. builddefaults = facts['builddefaults']
  859. common = facts['common']
  860. # Copy values from common to builddefaults
  861. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  862. builddefaults['http_proxy'] = common['http_proxy']
  863. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  864. builddefaults['https_proxy'] = common['https_proxy']
  865. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  866. builddefaults['no_proxy'] = common['no_proxy']
  867. # Create git specific facts from generic values, if git specific values are
  868. # not defined.
  869. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  870. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  871. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  872. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  873. if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
  874. builddefaults['git_no_proxy'] = builddefaults['no_proxy']
  875. # If we're actually defining a builddefaults config then create admission_plugin_config
  876. # then merge builddefaults[config] structure into admission_plugin_config
  877. # 'config' is the 'openshift_builddefaults_json' inventory variable
  878. if 'config' in builddefaults:
  879. if 'admission_plugin_config' not in facts['master']:
  880. # Scaffold out the full expected datastructure
  881. facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
  882. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  883. if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
  884. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
  885. return facts
  886. def delete_empty_keys(keylist):
  887. """ Delete dictionary elements from keylist where "value" is empty.
  888. Args:
  889. keylist(list): A list of builddefault configuration envs.
  890. Returns:
  891. none
  892. Example:
  893. keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  894. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  895. {'name': 'NO_PROXY', 'value': ''}]
  896. After calling delete_empty_keys the provided list is modified to become:
  897. [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  898. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
  899. """
  900. count = 0
  901. for i in range(0, len(keylist)):
  902. if len(keylist[i - count]['value']) == 0:
  903. del keylist[i - count]
  904. count += 1
  905. def set_buildoverrides_facts(facts):
  906. """ Set build overrides
  907. Args:
  908. facts(dict): existing facts
  909. Returns:
  910. facts(dict): Updated facts with missing values
  911. """
  912. if 'buildoverrides' in facts:
  913. buildoverrides = facts['buildoverrides']
  914. # If we're actually defining a buildoverrides config then create admission_plugin_config
  915. # then merge buildoverrides[config] structure into admission_plugin_config
  916. if 'config' in buildoverrides:
  917. if 'admission_plugin_config' not in facts['master']:
  918. facts['master']['admission_plugin_config'] = dict()
  919. facts['master']['admission_plugin_config'].update(buildoverrides['config'])
  920. return facts
  921. # pylint: disable=too-many-statements
  922. def set_container_facts_if_unset(facts):
  923. """ Set containerized facts.
  924. Args:
  925. facts (dict): existing facts
  926. Returns:
  927. dict: the facts dict updated with the generated containerization
  928. facts
  929. """
  930. return facts
  931. class OpenShiftFactsInternalError(Exception):
  932. """Origin Facts Error"""
  933. pass
  934. class OpenShiftFactsUnsupportedRoleError(Exception):
  935. """Origin Facts Unsupported Role Error"""
  936. pass
  937. class OpenShiftFactsFileWriteError(Exception):
  938. """Origin Facts File Write Error"""
  939. pass
  940. class OpenShiftFactsMetadataUnavailableError(Exception):
  941. """Origin Facts Metadata Unavailable Error"""
  942. pass
  943. class OpenShiftFacts(object):
  944. """ Origin Facts
  945. Attributes:
  946. facts (dict): facts for the host
  947. Args:
  948. module (AnsibleModule): an AnsibleModule object
  949. role (str): role for setting local facts
  950. filename (str): local facts file to use
  951. local_facts (dict): local facts to set
  952. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  953. '.' notation ex: ['master.named_certificates']
  954. Raises:
  955. OpenShiftFactsUnsupportedRoleError:
  956. """
  957. known_roles = ['builddefaults',
  958. 'buildoverrides',
  959. 'cloudprovider',
  960. 'common',
  961. 'etcd',
  962. 'master',
  963. 'node']
  964. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  965. # pylint: disable=too-many-arguments,no-value-for-parameter
  966. def __init__(self, role, filename, local_facts,
  967. additive_facts_to_overwrite=None):
  968. self.changed = False
  969. self.filename = filename
  970. if role not in self.known_roles:
  971. raise OpenShiftFactsUnsupportedRoleError(
  972. "Role %s is not supported by this module" % role
  973. )
  974. self.role = role
  975. # Collect system facts and preface each fact with 'ansible_'.
  976. try:
  977. # pylint: disable=too-many-function-args,invalid-name
  978. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
  979. additional_facts = {}
  980. for (k, v) in self.system_facts.items():
  981. additional_facts["ansible_%s" % k.replace('-', '_')] = v
  982. self.system_facts.update(additional_facts)
  983. except UnboundLocalError:
  984. # ansible-2.2,2.3
  985. self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
  986. self.facts = self.generate_facts(local_facts,
  987. additive_facts_to_overwrite)
  988. def generate_facts(self,
  989. local_facts,
  990. additive_facts_to_overwrite):
  991. """ Generate facts
  992. Args:
  993. local_facts (dict): local_facts for overriding generated defaults
  994. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  995. '.' notation ex: ['master.named_certificates']
  996. Returns:
  997. dict: The generated facts
  998. """
  999. local_facts = self.init_local_facts(local_facts,
  1000. additive_facts_to_overwrite)
  1001. roles = local_facts.keys()
  1002. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1003. deployment_type = local_facts['common']['deployment_type']
  1004. else:
  1005. deployment_type = 'origin'
  1006. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1007. deployment_subtype = local_facts['common']['deployment_subtype']
  1008. else:
  1009. deployment_subtype = 'basic'
  1010. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1011. provider_facts = self.init_provider_facts()
  1012. facts = apply_provider_facts(defaults, provider_facts)
  1013. facts = merge_facts(facts,
  1014. local_facts,
  1015. additive_facts_to_overwrite)
  1016. facts['current_config'] = get_current_config(facts)
  1017. facts = set_url_facts_if_unset(facts)
  1018. facts = set_identity_providers_if_unset(facts)
  1019. facts = set_deployment_facts_if_unset(facts)
  1020. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1021. facts = set_container_facts_if_unset(facts)
  1022. facts = build_controller_args(facts)
  1023. facts = build_api_server_args(facts)
  1024. facts = set_aggregate_facts(facts)
  1025. facts = set_proxy_facts(facts)
  1026. facts = set_builddefaults_facts(facts)
  1027. facts = set_buildoverrides_facts(facts)
  1028. facts = set_nodename(facts)
  1029. return dict(openshift=facts)
  1030. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1031. """ Get default fact values
  1032. Args:
  1033. roles (list): list of roles for this host
  1034. Returns:
  1035. dict: The generated default facts
  1036. """
  1037. defaults = {}
  1038. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1039. exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
  1040. hostname_f = output.strip() if exit_code == 0 else ''
  1041. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1042. self.system_facts['ansible_fqdn']]
  1043. hostname = choose_hostname(hostname_values, ip_addr).lower()
  1044. defaults['common'] = dict(ip=ip_addr,
  1045. public_ip=ip_addr,
  1046. deployment_type=deployment_type,
  1047. deployment_subtype=deployment_subtype,
  1048. hostname=hostname,
  1049. public_hostname=hostname,
  1050. portal_net='172.30.0.0/16',
  1051. dns_domain='cluster.local',
  1052. config_base='/etc/origin')
  1053. if 'master' in roles:
  1054. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1055. controllers_port='8444',
  1056. console_use_ssl=True,
  1057. console_path='/console',
  1058. console_port='8443', etcd_use_ssl=True,
  1059. etcd_hosts='', etcd_port='2379',
  1060. portal_net='172.30.0.0/16',
  1061. embedded_kube=True,
  1062. embedded_dns=True,
  1063. bind_addr='0.0.0.0',
  1064. session_max_seconds=3600,
  1065. session_name='ssn',
  1066. session_secrets_file='',
  1067. access_token_max_seconds=86400,
  1068. auth_token_max_seconds=500,
  1069. oauth_grant_method='auto',
  1070. dynamic_provisioning_enabled=True,
  1071. max_requests_inflight=500)
  1072. if 'cloudprovider' in roles:
  1073. defaults['cloudprovider'] = dict(kind=None)
  1074. return defaults
  1075. def guess_host_provider(self):
  1076. """ Guess the host provider
  1077. Returns:
  1078. dict: The generated default facts for the detected provider
  1079. """
  1080. # TODO: cloud provider facts should probably be submitted upstream
  1081. product_name = self.system_facts['ansible_product_name']
  1082. product_version = self.system_facts['ansible_product_version']
  1083. virt_type = self.system_facts['ansible_virtualization_type']
  1084. virt_role = self.system_facts['ansible_virtualization_role']
  1085. bios_vendor = self.system_facts['ansible_system_vendor']
  1086. provider = None
  1087. metadata = None
  1088. if bios_vendor == 'Google':
  1089. provider = 'gce'
  1090. metadata_url = ('http://metadata.google.internal/'
  1091. 'computeMetadata/v1/?recursive=true')
  1092. headers = {'Metadata-Flavor': 'Google'}
  1093. metadata = get_provider_metadata(metadata_url, True, headers,
  1094. True)
  1095. # Filter sshKeys and serviceAccounts from gce metadata
  1096. if metadata:
  1097. metadata['project']['attributes'].pop('sshKeys', None)
  1098. metadata['instance'].pop('serviceAccounts', None)
  1099. elif bios_vendor == 'Amazon EC2':
  1100. # Adds support for Amazon EC2 C5 instance types
  1101. provider = 'aws'
  1102. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1103. metadata = get_provider_metadata(metadata_url)
  1104. elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
  1105. provider = 'aws'
  1106. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1107. metadata = get_provider_metadata(metadata_url)
  1108. elif re.search(r'OpenStack', product_name):
  1109. provider = 'openstack'
  1110. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1111. 'meta_data.json')
  1112. metadata = get_provider_metadata(metadata_url, True, None,
  1113. True)
  1114. if metadata:
  1115. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1116. metadata['ec2_compat'] = get_provider_metadata(
  1117. ec2_compat_url
  1118. )
  1119. # disable pylint maybe-no-member because overloaded use of
  1120. # the module name causes pylint to not detect that results
  1121. # is an array or hash
  1122. # pylint: disable=maybe-no-member
  1123. # Filter public_keys and random_seed from openstack metadata
  1124. metadata.pop('public_keys', None)
  1125. metadata.pop('random_seed', None)
  1126. if not metadata['ec2_compat']:
  1127. metadata = None
  1128. return dict(name=provider, metadata=metadata)
  1129. def init_provider_facts(self):
  1130. """ Initialize the provider facts
  1131. Returns:
  1132. dict: The normalized provider facts
  1133. """
  1134. provider_info = self.guess_host_provider()
  1135. provider_facts = normalize_provider_facts(
  1136. provider_info.get('name'),
  1137. provider_info.get('metadata')
  1138. )
  1139. return provider_facts
  1140. # Disabling too-many-branches and too-many-locals.
  1141. # This should be cleaned up as a TODO item.
  1142. # pylint: disable=too-many-branches, too-many-locals
  1143. def init_local_facts(self, facts=None,
  1144. additive_facts_to_overwrite=None):
  1145. """ Initialize the local facts
  1146. Args:
  1147. facts (dict): local facts to set
  1148. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1149. '.' notation ex: ['master.named_certificates']
  1150. Returns:
  1151. dict: The result of merging the provided facts with existing
  1152. local facts
  1153. """
  1154. changed = False
  1155. facts_to_set = dict()
  1156. if facts is not None:
  1157. facts_to_set[self.role] = facts
  1158. local_facts = get_local_facts_from_file(self.filename)
  1159. migrated_facts = migrate_local_facts(local_facts)
  1160. new_local_facts = merge_facts(migrated_facts,
  1161. facts_to_set,
  1162. additive_facts_to_overwrite)
  1163. new_local_facts = self.remove_empty_facts(new_local_facts)
  1164. if new_local_facts != local_facts:
  1165. self.validate_local_facts(new_local_facts)
  1166. changed = True
  1167. if not module.check_mode: # noqa: F405
  1168. save_local_facts(self.filename, new_local_facts)
  1169. self.changed = changed
  1170. return new_local_facts
  1171. def remove_empty_facts(self, facts=None):
  1172. """ Remove empty facts
  1173. Args:
  1174. facts (dict): facts to clean
  1175. """
  1176. facts_to_remove = []
  1177. for fact, value in iteritems(facts):
  1178. if isinstance(facts[fact], dict):
  1179. facts[fact] = self.remove_empty_facts(facts[fact])
  1180. else:
  1181. if value == "" or value == [""] or value is None:
  1182. facts_to_remove.append(fact)
  1183. for fact in facts_to_remove:
  1184. del facts[fact]
  1185. return facts
  1186. def validate_local_facts(self, facts=None):
  1187. """ Validate local facts
  1188. Args:
  1189. facts (dict): local facts to validate
  1190. """
  1191. invalid_facts = dict()
  1192. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1193. if invalid_facts:
  1194. msg = 'Invalid facts detected:\n'
  1195. # pylint: disable=consider-iterating-dictionary
  1196. for key in invalid_facts.keys():
  1197. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1198. module.fail_json(msg=msg, changed=self.changed) # noqa: F405
  1199. # disabling pylint errors for line-too-long since we're dealing
  1200. # with best effort reduction of error messages here.
  1201. # disabling errors for too-many-branches since we require checking
  1202. # many conditions.
  1203. # pylint: disable=line-too-long, too-many-branches
  1204. @staticmethod
  1205. def validate_master_facts(facts, invalid_facts):
  1206. """ Validate master facts
  1207. Args:
  1208. facts (dict): local facts to validate
  1209. invalid_facts (dict): collected invalid_facts
  1210. Returns:
  1211. dict: Invalid facts
  1212. """
  1213. if 'master' in facts:
  1214. # openshift.master.session_auth_secrets
  1215. if 'session_auth_secrets' in facts['master']:
  1216. session_auth_secrets = facts['master']['session_auth_secrets']
  1217. if not issubclass(type(session_auth_secrets), list):
  1218. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  1219. elif 'session_encryption_secrets' not in facts['master']:
  1220. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  1221. 'if openshift_master_session_auth_secrets is provided.')
  1222. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  1223. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  1224. 'openshift_master_session_encryption_secrets must be '
  1225. 'equal length.')
  1226. else:
  1227. for secret in session_auth_secrets:
  1228. if len(secret) < 32:
  1229. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  1230. 'Secrets must be at least 32 characters in length.')
  1231. # openshift.master.session_encryption_secrets
  1232. if 'session_encryption_secrets' in facts['master']:
  1233. session_encryption_secrets = facts['master']['session_encryption_secrets']
  1234. if not issubclass(type(session_encryption_secrets), list):
  1235. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  1236. elif 'session_auth_secrets' not in facts['master']:
  1237. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  1238. 'set if openshift_master_session_encryption_secrets '
  1239. 'is provided.')
  1240. else:
  1241. for secret in session_encryption_secrets:
  1242. if len(secret) not in [16, 24, 32]:
  1243. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  1244. 'Secrets must be 16, 24, or 32 characters in length.')
  1245. return invalid_facts
  1246. def main():
  1247. """ main """
  1248. # disabling pylint errors for global-variable-undefined and invalid-name
  1249. # for 'global module' usage, since it is required to use ansible_facts
  1250. # pylint: disable=global-variable-undefined, invalid-name
  1251. global module
  1252. module = AnsibleModule( # noqa: F405
  1253. argument_spec=dict(
  1254. role=dict(default='common', required=False,
  1255. choices=OpenShiftFacts.known_roles),
  1256. local_facts=dict(default=None, type='dict', required=False),
  1257. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  1258. ),
  1259. supports_check_mode=True,
  1260. add_file_common_args=True,
  1261. )
  1262. if not HAVE_DBUS:
  1263. module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
  1264. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
  1265. module.params['gather_timeout'] = 10 # noqa: F405
  1266. module.params['filter'] = '*' # noqa: F405
  1267. role = module.params['role'] # noqa: F405
  1268. local_facts = module.params['local_facts'] # noqa: F405
  1269. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
  1270. fact_file = '/etc/ansible/facts.d/openshift.fact'
  1271. openshift_facts = OpenShiftFacts(role,
  1272. fact_file,
  1273. local_facts,
  1274. additive_facts_to_overwrite)
  1275. file_params = module.params.copy() # noqa: F405
  1276. file_params['path'] = fact_file
  1277. file_args = module.load_file_common_arguments(file_params) # noqa: F405
  1278. changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
  1279. openshift_facts.changed)
  1280. return module.exit_json(changed=changed, # noqa: F405
  1281. ansible_facts=openshift_facts.facts)
  1282. if __name__ == '__main__':
  1283. main()