openshift_facts.py 96 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # vim: expandtab:tabstop=4:shiftwidth=4
  5. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  6. # Status: Permanently disabled to keep this module as self-contained as possible.
  7. """Ansible module for retrieving and setting openshift related facts"""
  8. try:
  9. # python2
  10. import ConfigParser
  11. except ImportError:
  12. # python3
  13. import configparser as ConfigParser
  14. import copy
  15. import io
  16. import os
  17. import yaml
  18. from distutils.util import strtobool
  19. from distutils.version import LooseVersion
  20. import struct
  21. import socket
  22. from dbus import SystemBus, Interface
  23. from dbus.exceptions import DBusException
  24. DOCUMENTATION = '''
  25. ---
  26. module: openshift_facts
  27. short_description: Cluster Facts
  28. author: Jason DeTiberus
  29. requirements: [ ]
  30. '''
  31. EXAMPLES = '''
  32. '''
  33. def migrate_docker_facts(facts):
  34. """ Apply migrations for docker facts """
  35. params = {
  36. 'common': (
  37. 'additional_registries',
  38. 'insecure_registries',
  39. 'blocked_registries',
  40. 'options'
  41. ),
  42. 'node': (
  43. 'log_driver',
  44. 'log_options'
  45. )
  46. }
  47. if 'docker' not in facts:
  48. facts['docker'] = {}
  49. for role in params.keys():
  50. if role in facts:
  51. for param in params[role]:
  52. old_param = 'docker_' + param
  53. if old_param in facts[role]:
  54. facts['docker'][param] = facts[role].pop(old_param)
  55. if 'node' in facts and 'portal_net' in facts['node']:
  56. facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
  57. # log_options was originally meant to be a comma separated string, but
  58. # we now prefer an actual list, with backward compatibility:
  59. if 'log_options' in facts['docker'] and \
  60. isinstance(facts['docker']['log_options'], basestring):
  61. facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
  62. return facts
  63. # TODO: We should add a generic migration function that takes source and destination
  64. # paths and does the right thing rather than one function for common, one for node, etc.
  65. def migrate_common_facts(facts):
  66. """ Migrate facts from various roles into common """
  67. params = {
  68. 'node': ('portal_net'),
  69. 'master': ('portal_net')
  70. }
  71. if 'common' not in facts:
  72. facts['common'] = {}
  73. for role in params.keys():
  74. if role in facts:
  75. for param in params[role]:
  76. if param in facts[role]:
  77. facts['common'][param] = facts[role].pop(param)
  78. return facts
  79. def migrate_node_facts(facts):
  80. """ Migrate facts from various roles into node """
  81. params = {
  82. 'common': ('dns_ip'),
  83. }
  84. if 'node' not in facts:
  85. facts['node'] = {}
  86. for role in params.keys():
  87. if role in facts:
  88. for param in params[role]:
  89. if param in facts[role]:
  90. facts['node'][param] = facts[role].pop(param)
  91. return facts
  92. def migrate_hosted_facts(facts):
  93. """ Apply migrations for master facts """
  94. if 'master' in facts:
  95. if 'router_selector' in facts['master']:
  96. if 'hosted' not in facts:
  97. facts['hosted'] = {}
  98. if 'router' not in facts['hosted']:
  99. facts['hosted']['router'] = {}
  100. facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
  101. if 'registry_selector' in facts['master']:
  102. if 'hosted' not in facts:
  103. facts['hosted'] = {}
  104. if 'registry' not in facts['hosted']:
  105. facts['hosted']['registry'] = {}
  106. facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
  107. return facts
  108. def migrate_admission_plugin_facts(facts):
  109. if 'master' in facts:
  110. if 'kube_admission_plugin_config' in facts['master']:
  111. if 'admission_plugin_config' not in facts['master']:
  112. facts['master']['admission_plugin_config'] = dict()
  113. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  114. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  115. facts['master']['kube_admission_plugin_config'],
  116. additive_facts_to_overwrite=[],
  117. protected_facts_to_overwrite=[])
  118. # Remove kube_admission_plugin_config fact
  119. facts['master'].pop('kube_admission_plugin_config', None)
  120. return facts
  121. def migrate_local_facts(facts):
  122. """ Apply migrations of local facts """
  123. migrated_facts = copy.deepcopy(facts)
  124. migrated_facts = migrate_docker_facts(migrated_facts)
  125. migrated_facts = migrate_common_facts(migrated_facts)
  126. migrated_facts = migrate_node_facts(migrated_facts)
  127. migrated_facts = migrate_hosted_facts(migrated_facts)
  128. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  129. return migrated_facts
  130. def first_ip(network):
  131. """ Return the first IPv4 address in network
  132. Args:
  133. network (str): network in CIDR format
  134. Returns:
  135. str: first IPv4 address
  136. """
  137. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
  138. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
  139. (address, netmask) = network.split('/')
  140. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  141. return itoa((atoi(address) & netmask_i) + 1)
  142. def hostname_valid(hostname):
  143. """ Test if specified hostname should be considered valid
  144. Args:
  145. hostname (str): hostname to test
  146. Returns:
  147. bool: True if valid, otherwise False
  148. """
  149. if (not hostname or
  150. hostname.startswith('localhost') or
  151. hostname.endswith('localdomain') or
  152. hostname.endswith('novalocal') or
  153. len(hostname.split('.')) < 2):
  154. return False
  155. return True
  156. def choose_hostname(hostnames=None, fallback=''):
  157. """ Choose a hostname from the provided hostnames
  158. Given a list of hostnames and a fallback value, choose a hostname to
  159. use. This function will prefer fqdns if they exist (excluding any that
  160. begin with localhost or end with localdomain) over ip addresses.
  161. Args:
  162. hostnames (list): list of hostnames
  163. fallback (str): default value to set if hostnames does not contain
  164. a valid hostname
  165. Returns:
  166. str: chosen hostname
  167. """
  168. hostname = fallback
  169. if hostnames is None:
  170. return hostname
  171. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  172. ips = [i for i in hostnames
  173. if (i is not None and isinstance(i, basestring)
  174. and re.match(ip_regex, i))]
  175. hosts = [i for i in hostnames
  176. if i is not None and i != '' and i not in ips]
  177. for host_list in (hosts, ips):
  178. for host in host_list:
  179. if hostname_valid(host):
  180. return host
  181. return hostname
  182. def query_metadata(metadata_url, headers=None, expect_json=False):
  183. """ Return metadata from the provided metadata_url
  184. Args:
  185. metadata_url (str): metadata url
  186. headers (dict): headers to set for metadata request
  187. expect_json (bool): does the metadata_url return json
  188. Returns:
  189. dict or list: metadata request result
  190. """
  191. result, info = fetch_url(module, metadata_url, headers=headers)
  192. if info['status'] != 200:
  193. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  194. if expect_json:
  195. return module.from_json(to_native(result.read()))
  196. else:
  197. return [to_native(line.strip()) for line in result.readlines()]
  198. def walk_metadata(metadata_url, headers=None, expect_json=False):
  199. """ Walk the metadata tree and return a dictionary of the entire tree
  200. Args:
  201. metadata_url (str): metadata url
  202. headers (dict): headers to set for metadata request
  203. expect_json (bool): does the metadata_url return json
  204. Returns:
  205. dict: the result of walking the metadata tree
  206. """
  207. metadata = dict()
  208. for line in query_metadata(metadata_url, headers, expect_json):
  209. if line.endswith('/') and not line == 'public-keys/':
  210. key = line[:-1]
  211. metadata[key] = walk_metadata(metadata_url + line,
  212. headers, expect_json)
  213. else:
  214. results = query_metadata(metadata_url + line, headers,
  215. expect_json)
  216. if len(results) == 1:
  217. # disable pylint maybe-no-member because overloaded use of
  218. # the module name causes pylint to not detect that results
  219. # is an array or hash
  220. # pylint: disable=maybe-no-member
  221. metadata[line] = results.pop()
  222. else:
  223. metadata[line] = results
  224. return metadata
  225. def get_provider_metadata(metadata_url, supports_recursive=False,
  226. headers=None, expect_json=False):
  227. """ Retrieve the provider metadata
  228. Args:
  229. metadata_url (str): metadata url
  230. supports_recursive (bool): does the provider metadata api support
  231. recursion
  232. headers (dict): headers to set for metadata request
  233. expect_json (bool): does the metadata_url return json
  234. Returns:
  235. dict: the provider metadata
  236. """
  237. try:
  238. if supports_recursive:
  239. metadata = query_metadata(metadata_url, headers,
  240. expect_json)
  241. else:
  242. metadata = walk_metadata(metadata_url, headers,
  243. expect_json)
  244. except OpenShiftFactsMetadataUnavailableError:
  245. metadata = None
  246. return metadata
  247. def normalize_gce_facts(metadata, facts):
  248. """ Normalize gce facts
  249. Args:
  250. metadata (dict): provider metadata
  251. facts (dict): facts to update
  252. Returns:
  253. dict: the result of adding the normalized metadata to the provided
  254. facts dict
  255. """
  256. for interface in metadata['instance']['networkInterfaces']:
  257. int_info = dict(ips=[interface['ip']], network_type='gce')
  258. int_info['public_ips'] = [ac['externalIp'] for ac
  259. in interface['accessConfigs']]
  260. int_info['public_ips'].extend(interface['forwardedIps'])
  261. _, _, network_id = interface['network'].rpartition('/')
  262. int_info['network_id'] = network_id
  263. facts['network']['interfaces'].append(int_info)
  264. _, _, zone = metadata['instance']['zone'].rpartition('/')
  265. facts['zone'] = zone
  266. # GCE currently only supports a single interface
  267. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  268. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  269. facts['network']['public_ip'] = pub_ip
  270. facts['network']['hostname'] = metadata['instance']['hostname']
  271. # TODO: attempt to resolve public_hostname
  272. facts['network']['public_hostname'] = facts['network']['public_ip']
  273. return facts
  274. def normalize_aws_facts(metadata, facts):
  275. """ Normalize aws facts
  276. Args:
  277. metadata (dict): provider metadata
  278. facts (dict): facts to update
  279. Returns:
  280. dict: the result of adding the normalized metadata to the provided
  281. facts dict
  282. """
  283. for interface in sorted(
  284. metadata['network']['interfaces']['macs'].values(),
  285. key=lambda x: x['device-number']
  286. ):
  287. int_info = dict()
  288. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  289. for ips_var, int_var in iteritems(var_map):
  290. ips = interface.get(int_var)
  291. if isinstance(ips, basestring):
  292. int_info[ips_var] = [ips]
  293. else:
  294. int_info[ips_var] = ips
  295. if 'vpc-id' in interface:
  296. int_info['network_type'] = 'vpc'
  297. else:
  298. int_info['network_type'] = 'classic'
  299. if int_info['network_type'] == 'vpc':
  300. int_info['network_id'] = interface['subnet-id']
  301. else:
  302. int_info['network_id'] = None
  303. facts['network']['interfaces'].append(int_info)
  304. facts['zone'] = metadata['placement']['availability-zone']
  305. # TODO: actually attempt to determine default local and public ips
  306. # by using the ansible default ip fact and the ipv4-associations
  307. # from the ec2 metadata
  308. facts['network']['ip'] = metadata.get('local-ipv4')
  309. facts['network']['public_ip'] = metadata.get('public-ipv4')
  310. # TODO: verify that local hostname makes sense and is resolvable
  311. facts['network']['hostname'] = metadata.get('local-hostname')
  312. # TODO: verify that public hostname makes sense and is resolvable
  313. facts['network']['public_hostname'] = metadata.get('public-hostname')
  314. return facts
  315. def normalize_openstack_facts(metadata, facts):
  316. """ Normalize openstack facts
  317. Args:
  318. metadata (dict): provider metadata
  319. facts (dict): facts to update
  320. Returns:
  321. dict: the result of adding the normalized metadata to the provided
  322. facts dict
  323. """
  324. # openstack ec2 compat api does not support network interfaces and
  325. # the version tested on did not include the info in the openstack
  326. # metadata api, should be updated if neutron exposes this.
  327. facts['zone'] = metadata['availability_zone']
  328. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  329. facts['network']['ip'] = local_ipv4
  330. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  331. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  332. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  333. try:
  334. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  335. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  336. else:
  337. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  338. except socket.gaierror:
  339. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  340. return facts
  341. def normalize_provider_facts(provider, metadata):
  342. """ Normalize provider facts
  343. Args:
  344. provider (str): host provider
  345. metadata (dict): provider metadata
  346. Returns:
  347. dict: the normalized provider facts
  348. """
  349. if provider is None or metadata is None:
  350. return {}
  351. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  352. # and configure ipv6 facts if available
  353. # TODO: add support for setting user_data if available
  354. facts = dict(name=provider, metadata=metadata,
  355. network=dict(interfaces=[], ipv6_enabled=False))
  356. if provider == 'gce':
  357. facts = normalize_gce_facts(metadata, facts)
  358. elif provider == 'aws':
  359. facts = normalize_aws_facts(metadata, facts)
  360. elif provider == 'openstack':
  361. facts = normalize_openstack_facts(metadata, facts)
  362. return facts
  363. def set_flannel_facts_if_unset(facts):
  364. """ Set flannel facts if not already present in facts dict
  365. dict: the facts dict updated with the flannel facts if
  366. missing
  367. Args:
  368. facts (dict): existing facts
  369. Returns:
  370. dict: the facts dict updated with the flannel
  371. facts if they were not already present
  372. """
  373. if 'common' in facts:
  374. if 'use_flannel' not in facts['common']:
  375. use_flannel = False
  376. facts['common']['use_flannel'] = use_flannel
  377. return facts
  378. def set_nuage_facts_if_unset(facts):
  379. """ Set nuage facts if not already present in facts dict
  380. dict: the facts dict updated with the nuage facts if
  381. missing
  382. Args:
  383. facts (dict): existing facts
  384. Returns:
  385. dict: the facts dict updated with the nuage
  386. facts if they were not already present
  387. """
  388. if 'common' in facts:
  389. if 'use_nuage' not in facts['common']:
  390. use_nuage = False
  391. facts['common']['use_nuage'] = use_nuage
  392. return facts
  393. def set_node_schedulability(facts):
  394. """ Set schedulable facts if not already present in facts dict
  395. Args:
  396. facts (dict): existing facts
  397. Returns:
  398. dict: the facts dict updated with the generated schedulable
  399. facts if they were not already present
  400. """
  401. if 'node' in facts:
  402. if 'schedulable' not in facts['node']:
  403. if 'master' in facts:
  404. facts['node']['schedulable'] = False
  405. else:
  406. facts['node']['schedulable'] = True
  407. return facts
  408. def set_selectors(facts):
  409. """ Set selectors facts if not already present in facts dict
  410. Args:
  411. facts (dict): existing facts
  412. Returns:
  413. dict: the facts dict updated with the generated selectors
  414. facts if they were not already present
  415. """
  416. deployment_type = facts['common']['deployment_type']
  417. if deployment_type == 'online':
  418. selector = "type=infra"
  419. else:
  420. selector = "region=infra"
  421. if 'hosted' not in facts:
  422. facts['hosted'] = {}
  423. if 'router' not in facts['hosted']:
  424. facts['hosted']['router'] = {}
  425. if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
  426. facts['hosted']['router']['selector'] = selector
  427. if 'registry' not in facts['hosted']:
  428. facts['hosted']['registry'] = {}
  429. if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
  430. facts['hosted']['registry']['selector'] = selector
  431. if 'metrics' not in facts['hosted']:
  432. facts['hosted']['metrics'] = {}
  433. if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
  434. facts['hosted']['metrics']['selector'] = None
  435. if 'logging' not in facts['hosted']:
  436. facts['hosted']['logging'] = {}
  437. if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
  438. facts['hosted']['logging']['selector'] = None
  439. return facts
  440. def set_dnsmasq_facts_if_unset(facts):
  441. """ Set dnsmasq facts if not already present in facts
  442. Args:
  443. facts (dict) existing facts
  444. Returns:
  445. facts (dict) updated facts with values set if not previously set
  446. """
  447. if 'common' in facts:
  448. if 'use_dnsmasq' not in facts['common']:
  449. facts['common']['use_dnsmasq'] = bool(safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
  450. if 'master' in facts and 'dns_port' not in facts['master']:
  451. if safe_get_bool(facts['common']['use_dnsmasq']):
  452. facts['master']['dns_port'] = 8053
  453. else:
  454. facts['master']['dns_port'] = 53
  455. return facts
  456. def set_project_cfg_facts_if_unset(facts):
  457. """ Set Project Configuration facts if not already present in facts dict
  458. dict:
  459. Args:
  460. facts (dict): existing facts
  461. Returns:
  462. dict: the facts dict updated with the generated Project Configuration
  463. facts if they were not already present
  464. """
  465. config = {
  466. 'default_node_selector': '',
  467. 'project_request_message': '',
  468. 'project_request_template': '',
  469. 'mcs_allocator_range': 's0:/2',
  470. 'mcs_labels_per_project': 5,
  471. 'uid_allocator_range': '1000000000-1999999999/10000'
  472. }
  473. if 'master' in facts:
  474. for key, value in config.items():
  475. if key not in facts['master']:
  476. facts['master'][key] = value
  477. return facts
  478. def set_identity_providers_if_unset(facts):
  479. """ Set identity_providers fact if not already present in facts dict
  480. Args:
  481. facts (dict): existing facts
  482. Returns:
  483. dict: the facts dict updated with the generated identity providers
  484. facts if they were not already present
  485. """
  486. if 'master' in facts:
  487. deployment_type = facts['common']['deployment_type']
  488. if 'identity_providers' not in facts['master']:
  489. identity_provider = dict(
  490. name='allow_all', challenge=True, login=True,
  491. kind='AllowAllPasswordIdentityProvider'
  492. )
  493. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  494. identity_provider = dict(
  495. name='deny_all', challenge=True, login=True,
  496. kind='DenyAllPasswordIdentityProvider'
  497. )
  498. facts['master']['identity_providers'] = [identity_provider]
  499. return facts
  500. def set_url_facts_if_unset(facts):
  501. """ Set url facts if not already present in facts dict
  502. Args:
  503. facts (dict): existing facts
  504. Returns:
  505. dict: the facts dict updated with the generated url facts if they
  506. were not already present
  507. """
  508. if 'master' in facts:
  509. hostname = facts['common']['hostname']
  510. cluster_hostname = facts['master'].get('cluster_hostname')
  511. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  512. public_hostname = facts['common']['public_hostname']
  513. api_hostname = cluster_hostname if cluster_hostname else hostname
  514. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  515. console_path = facts['master']['console_path']
  516. etcd_hosts = facts['master']['etcd_hosts']
  517. use_ssl = dict(
  518. api=facts['master']['api_use_ssl'],
  519. public_api=facts['master']['api_use_ssl'],
  520. loopback_api=facts['master']['api_use_ssl'],
  521. console=facts['master']['console_use_ssl'],
  522. public_console=facts['master']['console_use_ssl'],
  523. etcd=facts['master']['etcd_use_ssl']
  524. )
  525. ports = dict(
  526. api=facts['master']['api_port'],
  527. public_api=facts['master']['api_port'],
  528. loopback_api=facts['master']['api_port'],
  529. console=facts['master']['console_port'],
  530. public_console=facts['master']['console_port'],
  531. etcd=facts['master']['etcd_port'],
  532. )
  533. etcd_urls = []
  534. if etcd_hosts != '':
  535. facts['master']['etcd_port'] = ports['etcd']
  536. facts['master']['embedded_etcd'] = False
  537. for host in etcd_hosts:
  538. etcd_urls.append(format_url(use_ssl['etcd'], host,
  539. ports['etcd']))
  540. else:
  541. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  542. ports['etcd'])]
  543. facts['master'].setdefault('etcd_urls', etcd_urls)
  544. prefix_hosts = [('api', api_hostname),
  545. ('public_api', api_public_hostname),
  546. ('loopback_api', hostname)]
  547. for prefix, host in prefix_hosts:
  548. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  549. host,
  550. ports[prefix]))
  551. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  552. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  553. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  554. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  555. facts['master'].setdefault('loopback_user', r_lhu)
  556. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  557. for prefix, host in prefix_hosts:
  558. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  559. host,
  560. ports[prefix],
  561. console_path))
  562. return facts
  563. def set_aggregate_facts(facts):
  564. """ Set aggregate facts
  565. Args:
  566. facts (dict): existing facts
  567. Returns:
  568. dict: the facts dict updated with aggregated facts
  569. """
  570. all_hostnames = set()
  571. internal_hostnames = set()
  572. kube_svc_ip = first_ip(facts['common']['portal_net'])
  573. if 'common' in facts:
  574. all_hostnames.add(facts['common']['hostname'])
  575. all_hostnames.add(facts['common']['public_hostname'])
  576. all_hostnames.add(facts['common']['ip'])
  577. all_hostnames.add(facts['common']['public_ip'])
  578. facts['common']['kube_svc_ip'] = kube_svc_ip
  579. internal_hostnames.add(facts['common']['hostname'])
  580. internal_hostnames.add(facts['common']['ip'])
  581. cluster_domain = facts['common']['dns_domain']
  582. if 'master' in facts:
  583. if 'cluster_hostname' in facts['master']:
  584. all_hostnames.add(facts['master']['cluster_hostname'])
  585. if 'cluster_public_hostname' in facts['master']:
  586. all_hostnames.add(facts['master']['cluster_public_hostname'])
  587. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  588. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  589. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  590. all_hostnames.update(svc_names)
  591. internal_hostnames.update(svc_names)
  592. all_hostnames.add(kube_svc_ip)
  593. internal_hostnames.add(kube_svc_ip)
  594. facts['common']['all_hostnames'] = list(all_hostnames)
  595. facts['common']['internal_hostnames'] = list(internal_hostnames)
  596. return facts
  597. def set_etcd_facts_if_unset(facts):
  598. """
  599. If using embedded etcd, loads the data directory from master-config.yaml.
  600. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
  601. If anything goes wrong parsing these, the fact will not be set.
  602. """
  603. if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
  604. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  605. if 'etcd_data_dir' not in etcd_facts:
  606. try:
  607. # Parse master config to find actual etcd data dir:
  608. master_cfg_path = os.path.join(facts['common']['config_base'],
  609. 'master/master-config.yaml')
  610. master_cfg_f = open(master_cfg_path, 'r')
  611. config = yaml.safe_load(master_cfg_f.read())
  612. master_cfg_f.close()
  613. etcd_facts['etcd_data_dir'] = \
  614. config['etcdConfig']['storageDirectory']
  615. facts['etcd'] = etcd_facts
  616. # We don't want exceptions bubbling up here:
  617. # pylint: disable=broad-except
  618. except Exception:
  619. pass
  620. else:
  621. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  622. # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
  623. try:
  624. # Add a fake section for parsing:
  625. ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
  626. ini_fp = io.StringIO(ini_str)
  627. config = ConfigParser.RawConfigParser()
  628. config.readfp(ini_fp)
  629. etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
  630. if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
  631. etcd_data_dir = etcd_data_dir[1:-1]
  632. etcd_facts['etcd_data_dir'] = etcd_data_dir
  633. facts['etcd'] = etcd_facts
  634. # We don't want exceptions bubbling up here:
  635. # pylint: disable=broad-except
  636. except Exception:
  637. pass
  638. return facts
  639. def set_deployment_facts_if_unset(facts):
  640. """ Set Facts that vary based on deployment_type. This currently
  641. includes common.service_type, common.config_base, master.registry_url,
  642. node.registry_url, node.storage_plugin_deps
  643. Args:
  644. facts (dict): existing facts
  645. Returns:
  646. dict: the facts dict updated with the generated deployment_type
  647. facts
  648. """
  649. # disabled to avoid breaking up facts related to deployment type into
  650. # multiple methods for now.
  651. # pylint: disable=too-many-statements, too-many-branches
  652. if 'common' in facts:
  653. deployment_type = facts['common']['deployment_type']
  654. if 'service_type' not in facts['common']:
  655. service_type = 'atomic-openshift'
  656. if deployment_type == 'origin':
  657. service_type = 'origin'
  658. elif deployment_type in ['enterprise']:
  659. service_type = 'openshift'
  660. facts['common']['service_type'] = service_type
  661. if 'config_base' not in facts['common']:
  662. config_base = '/etc/origin'
  663. if deployment_type in ['enterprise']:
  664. config_base = '/etc/openshift'
  665. # Handle upgrade scenarios when symlinks don't yet exist:
  666. if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
  667. config_base = '/etc/openshift'
  668. facts['common']['config_base'] = config_base
  669. if 'data_dir' not in facts['common']:
  670. data_dir = '/var/lib/origin'
  671. if deployment_type in ['enterprise']:
  672. data_dir = '/var/lib/openshift'
  673. # Handle upgrade scenarios when symlinks don't yet exist:
  674. if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
  675. data_dir = '/var/lib/openshift'
  676. facts['common']['data_dir'] = data_dir
  677. if 'docker' in facts:
  678. deployment_type = facts['common']['deployment_type']
  679. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  680. addtl_regs = facts['docker'].get('additional_registries', [])
  681. ent_reg = 'registry.access.redhat.com'
  682. if ent_reg not in addtl_regs:
  683. facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
  684. for role in ('master', 'node'):
  685. if role in facts:
  686. deployment_type = facts['common']['deployment_type']
  687. if 'registry_url' not in facts[role]:
  688. registry_url = 'openshift/origin-${component}:${version}'
  689. if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
  690. registry_url = 'openshift3/ose-${component}:${version}'
  691. elif deployment_type == 'atomic-enterprise':
  692. registry_url = 'aep3_beta/aep-${component}:${version}'
  693. facts[role]['registry_url'] = registry_url
  694. if 'master' in facts:
  695. deployment_type = facts['common']['deployment_type']
  696. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  697. if 'disabled_features' in facts['master']:
  698. if deployment_type == 'atomic-enterprise':
  699. curr_disabled_features = set(facts['master']['disabled_features'])
  700. facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
  701. else:
  702. if facts['common']['deployment_subtype'] == 'registry':
  703. facts['master']['disabled_features'] = openshift_features
  704. if 'node' in facts:
  705. deployment_type = facts['common']['deployment_type']
  706. if 'storage_plugin_deps' not in facts['node']:
  707. if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
  708. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  709. else:
  710. facts['node']['storage_plugin_deps'] = []
  711. return facts
  712. def set_version_facts_if_unset(facts):
  713. """ Set version facts. This currently includes common.version and
  714. common.version_gte_3_1_or_1_1.
  715. Args:
  716. facts (dict): existing facts
  717. Returns:
  718. dict: the facts dict updated with version facts.
  719. """
  720. if 'common' in facts:
  721. deployment_type = facts['common']['deployment_type']
  722. version = get_openshift_version(facts)
  723. if version:
  724. facts['common']['version'] = version
  725. if deployment_type == 'origin':
  726. version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
  727. version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
  728. version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
  729. version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
  730. version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('1.4.0')
  731. else:
  732. version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
  733. version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
  734. version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
  735. version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
  736. version_gte_3_4_or_1_4 = LooseVersion(version) >= LooseVersion('3.4.0')
  737. else:
  738. version_gte_3_1_or_1_1 = True
  739. version_gte_3_1_1_or_1_1_1 = True
  740. version_gte_3_2_or_1_2 = True
  741. version_gte_3_3_or_1_3 = True
  742. version_gte_3_4_or_1_4 = False
  743. facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
  744. facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
  745. facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
  746. facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
  747. facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
  748. if version_gte_3_4_or_1_4:
  749. examples_content_version = 'v1.4'
  750. elif version_gte_3_3_or_1_3:
  751. examples_content_version = 'v1.3'
  752. elif version_gte_3_2_or_1_2:
  753. examples_content_version = 'v1.2'
  754. elif version_gte_3_1_or_1_1:
  755. examples_content_version = 'v1.1'
  756. else:
  757. examples_content_version = 'v1.0'
  758. facts['common']['examples_content_version'] = examples_content_version
  759. return facts
  760. def set_manageiq_facts_if_unset(facts):
  761. """ Set manageiq facts. This currently includes common.use_manageiq.
  762. Args:
  763. facts (dict): existing facts
  764. Returns:
  765. dict: the facts dict updated with version facts.
  766. Raises:
  767. OpenShiftFactsInternalError:
  768. """
  769. if 'common' not in facts:
  770. if 'version_gte_3_1_or_1_1' not in facts['common']:
  771. raise OpenShiftFactsInternalError(
  772. "Invalid invocation: The required facts are not set"
  773. )
  774. if 'use_manageiq' not in facts['common']:
  775. facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
  776. return facts
  777. def set_sdn_facts_if_unset(facts, system_facts):
  778. """ Set sdn facts if not already present in facts dict
  779. Args:
  780. facts (dict): existing facts
  781. system_facts (dict): ansible_facts
  782. Returns:
  783. dict: the facts dict updated with the generated sdn facts if they
  784. were not already present
  785. """
  786. if 'common' in facts:
  787. use_sdn = facts['common']['use_openshift_sdn']
  788. if not (use_sdn == '' or isinstance(use_sdn, bool)):
  789. use_sdn = safe_get_bool(use_sdn)
  790. facts['common']['use_openshift_sdn'] = use_sdn
  791. if 'sdn_network_plugin_name' not in facts['common']:
  792. plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
  793. facts['common']['sdn_network_plugin_name'] = plugin
  794. if 'master' in facts:
  795. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  796. # these might be overridden if they exist in the master config file
  797. sdn_cluster_network_cidr = '10.128.0.0/14'
  798. sdn_host_subnet_length = '9'
  799. master_cfg_path = os.path.join(facts['common']['config_base'],
  800. 'master/master-config.yaml')
  801. if os.path.isfile(master_cfg_path):
  802. with open(master_cfg_path, 'r') as master_cfg_f:
  803. config = yaml.safe_load(master_cfg_f.read())
  804. if 'networkConfig' in config:
  805. if 'clusterNetworkCIDR' in config['networkConfig']:
  806. sdn_cluster_network_cidr = \
  807. config['networkConfig']['clusterNetworkCIDR']
  808. if 'hostSubnetLength' in config['networkConfig']:
  809. sdn_host_subnet_length = \
  810. config['networkConfig']['hostSubnetLength']
  811. if 'sdn_cluster_network_cidr' not in facts['master']:
  812. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  813. if 'sdn_host_subnet_length' not in facts['master']:
  814. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  815. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  816. node_ip = facts['common']['ip']
  817. # default MTU if interface MTU cannot be detected
  818. facts['node']['sdn_mtu'] = '1450'
  819. for val in itervalues(system_facts):
  820. if isinstance(val, dict) and 'mtu' in val:
  821. mtu = val['mtu']
  822. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  823. facts['node']['sdn_mtu'] = str(mtu - 50)
  824. return facts
  825. def set_nodename(facts):
  826. if 'node' in facts and 'common' in facts:
  827. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  828. facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  829. else:
  830. facts['node']['nodename'] = facts['common']['hostname'].lower()
  831. return facts
  832. def migrate_oauth_template_facts(facts):
  833. """
  834. Migrate an old oauth template fact to a newer format if it's present.
  835. The legacy 'oauth_template' fact was just a filename, and assumed you were
  836. setting the 'login' template.
  837. The new pluralized 'oauth_templates' fact is a dict mapping the template
  838. name to a filename.
  839. Simplify the code after this by merging the old fact into the new.
  840. """
  841. if 'master' in facts and 'oauth_template' in facts['master']:
  842. if 'oauth_templates' not in facts['master']:
  843. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  844. elif 'login' not in facts['master']['oauth_templates']:
  845. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  846. return facts
  847. def format_url(use_ssl, hostname, port, path=''):
  848. """ Format url based on ssl flag, hostname, port and path
  849. Args:
  850. use_ssl (bool): is ssl enabled
  851. hostname (str): hostname
  852. port (str): port
  853. path (str): url path
  854. Returns:
  855. str: The generated url string
  856. """
  857. scheme = 'https' if use_ssl else 'http'
  858. netloc = hostname
  859. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  860. netloc += ":%s" % port
  861. try:
  862. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  863. except AttributeError:
  864. # pylint: disable=undefined-variable
  865. url = urlunparse((scheme, netloc, path, '', '', ''))
  866. return url
  867. def get_current_config(facts):
  868. """ Get current openshift config
  869. Args:
  870. facts (dict): existing facts
  871. Returns:
  872. dict: the facts dict updated with the current openshift config
  873. """
  874. current_config = dict()
  875. roles = [role for role in facts if role not in ['common', 'provider']]
  876. for role in roles:
  877. if 'roles' in current_config:
  878. current_config['roles'].append(role)
  879. else:
  880. current_config['roles'] = [role]
  881. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  882. # determine the location of files.
  883. # TODO: I suspect this isn't working right now, but it doesn't prevent
  884. # anything from working properly as far as I can tell, perhaps because
  885. # we override the kubeconfig path everywhere we use it?
  886. # Query kubeconfig settings
  887. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  888. if role == 'node':
  889. kubeconfig_dir = os.path.join(
  890. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  891. )
  892. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  893. if (os.path.isfile('/usr/bin/openshift')
  894. and os.path.isfile(kubeconfig_path)):
  895. try:
  896. _, output, _ = module.run_command(
  897. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  898. "json", "--kubeconfig=%s" % kubeconfig_path],
  899. check_rc=False
  900. )
  901. config = json.loads(output)
  902. cad = 'certificate-authority-data'
  903. try:
  904. for cluster in config['clusters']:
  905. config['clusters'][cluster][cad] = 'masked'
  906. except KeyError:
  907. pass
  908. try:
  909. for user in config['users']:
  910. config['users'][user][cad] = 'masked'
  911. config['users'][user]['client-key-data'] = 'masked'
  912. except KeyError:
  913. pass
  914. current_config['kubeconfig'] = config
  915. # override pylint broad-except warning, since we do not want
  916. # to bubble up any exceptions if oc config view
  917. # fails
  918. # pylint: disable=broad-except
  919. except Exception:
  920. pass
  921. return current_config
  922. def build_kubelet_args(facts):
  923. """Build node kubelet_args
  924. In the node-config.yaml file, kubeletArgument sub-keys have their
  925. values provided as a list. Hence the gratuitous use of ['foo'] below.
  926. """
  927. cloud_cfg_path = os.path.join(
  928. facts['common']['config_base'],
  929. 'cloudprovider')
  930. # We only have to do this stuff on hosts that are nodes
  931. if 'node' in facts:
  932. # Any changes to the kubeletArguments parameter are stored
  933. # here first.
  934. kubelet_args = {}
  935. if 'cloudprovider' in facts:
  936. # EVERY cloud is special <3
  937. if 'kind' in facts['cloudprovider']:
  938. if facts['cloudprovider']['kind'] == 'aws':
  939. kubelet_args['cloud-provider'] = ['aws']
  940. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  941. if facts['cloudprovider']['kind'] == 'openstack':
  942. kubelet_args['cloud-provider'] = ['openstack']
  943. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  944. if facts['cloudprovider']['kind'] == 'gce':
  945. kubelet_args['cloud-provider'] = ['gce']
  946. kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  947. # Automatically add node-labels to the kubeletArguments
  948. # parameter. See BZ1359848 for additional details.
  949. #
  950. # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
  951. if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
  952. # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
  953. # into ['foo=bar', 'a=b']
  954. #
  955. # On the openshift_node_labels inventory variable we loop
  956. # over each key-value tuple (from .items()) and join the
  957. # key to the value with an '=' character, this produces a
  958. # list.
  959. #
  960. # map() seems to be returning an itertools.imap object
  961. # instead of a list. We cast it to a list ourselves.
  962. labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
  963. if labels_str != '':
  964. kubelet_args['node-labels'] = labels_str
  965. # If we've added items to the kubelet_args dict then we need
  966. # to merge the new items back into the main facts object.
  967. if kubelet_args != {}:
  968. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
  969. return facts
  970. def build_controller_args(facts):
  971. """ Build master controller_args """
  972. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  973. 'cloudprovider')
  974. if 'master' in facts:
  975. controller_args = {}
  976. if 'cloudprovider' in facts:
  977. if 'kind' in facts['cloudprovider']:
  978. if facts['cloudprovider']['kind'] == 'aws':
  979. controller_args['cloud-provider'] = ['aws']
  980. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  981. if facts['cloudprovider']['kind'] == 'openstack':
  982. controller_args['cloud-provider'] = ['openstack']
  983. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  984. if facts['cloudprovider']['kind'] == 'gce':
  985. controller_args['cloud-provider'] = ['gce']
  986. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  987. if controller_args != {}:
  988. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
  989. return facts
  990. def build_api_server_args(facts):
  991. """ Build master api_server_args """
  992. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  993. 'cloudprovider')
  994. if 'master' in facts:
  995. api_server_args = {}
  996. if 'cloudprovider' in facts:
  997. if 'kind' in facts['cloudprovider']:
  998. if facts['cloudprovider']['kind'] == 'aws':
  999. api_server_args['cloud-provider'] = ['aws']
  1000. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  1001. if facts['cloudprovider']['kind'] == 'openstack':
  1002. api_server_args['cloud-provider'] = ['openstack']
  1003. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  1004. if facts['cloudprovider']['kind'] == 'gce':
  1005. api_server_args['cloud-provider'] = ['gce']
  1006. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  1007. if api_server_args != {}:
  1008. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
  1009. return facts
  1010. def is_service_running(service):
  1011. """ Queries systemd through dbus to see if the service is running """
  1012. service_running = False
  1013. bus = SystemBus()
  1014. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  1015. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  1016. try:
  1017. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  1018. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  1019. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  1020. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  1021. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  1022. if service_load_state == 'loaded' and service_active_state == 'active':
  1023. service_running = True
  1024. except DBusException:
  1025. pass
  1026. return service_running
  1027. def get_version_output(binary, version_cmd):
  1028. """ runs and returns the version output for a command """
  1029. cmd = []
  1030. for item in (binary, version_cmd):
  1031. if isinstance(item, list):
  1032. cmd.extend(item)
  1033. else:
  1034. cmd.append(item)
  1035. if os.path.isfile(cmd[0]):
  1036. _, output, _ = module.run_command(cmd)
  1037. return output
  1038. def get_docker_version_info():
  1039. """ Parses and returns the docker version info """
  1040. result = None
  1041. if is_service_running('docker'):
  1042. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  1043. if 'Server' in version_info:
  1044. result = {
  1045. 'api_version': version_info['Server']['API version'],
  1046. 'version': version_info['Server']['Version']
  1047. }
  1048. return result
  1049. def get_hosted_registry_insecure():
  1050. """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
  1051. registry is currently insecure.
  1052. """
  1053. hosted_registry_insecure = None
  1054. if os.path.exists('/etc/sysconfig/docker'):
  1055. try:
  1056. ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
  1057. ini_fp = io.StringIO(ini_str)
  1058. config = ConfigParser.RawConfigParser()
  1059. config.readfp(ini_fp)
  1060. options = config.get('root', 'OPTIONS')
  1061. if 'insecure-registry' in options:
  1062. hosted_registry_insecure = True
  1063. except:
  1064. pass
  1065. return hosted_registry_insecure
  1066. def get_openshift_version(facts):
  1067. """ Get current version of openshift on the host.
  1068. Checks a variety of ways ranging from fastest to slowest.
  1069. Args:
  1070. facts (dict): existing facts
  1071. optional cli_image for pulling the version number
  1072. Returns:
  1073. version: the current openshift version
  1074. """
  1075. version = None
  1076. # No need to run this method repeatedly on a system if we already know the
  1077. # version
  1078. if 'common' in facts:
  1079. if 'version' in facts['common'] and facts['common']['version'] is not None:
  1080. return chomp_commit_offset(facts['common']['version'])
  1081. if os.path.isfile('/usr/bin/openshift'):
  1082. _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
  1083. version = parse_openshift_version(output)
  1084. elif 'common' in facts and 'is_containerized' in facts['common']:
  1085. version = get_container_openshift_version(facts)
  1086. # Handle containerized masters that have not yet been configured as a node.
  1087. # This can be very slow and may get re-run multiple times, so we only use this
  1088. # if other methods failed to find a version.
  1089. if not version and os.path.isfile('/usr/local/bin/openshift'):
  1090. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
  1091. version = parse_openshift_version(output)
  1092. return chomp_commit_offset(version)
  1093. def chomp_commit_offset(version):
  1094. """Chomp any "+git.foo" commit offset string from the given `version`
  1095. and return the modified version string.
  1096. Ex:
  1097. - chomp_commit_offset(None) => None
  1098. - chomp_commit_offset(1337) => "1337"
  1099. - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
  1100. - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
  1101. - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
  1102. """
  1103. if version is None:
  1104. return version
  1105. else:
  1106. # Stringify, just in case it's a Number type. Split by '+' and
  1107. # return the first split. No concerns about strings without a
  1108. # '+', .split() returns an array of the original string.
  1109. return str(version).split('+')[0]
  1110. def get_container_openshift_version(facts):
  1111. """
  1112. If containerized, see if we can determine the installed version via the
  1113. systemd environment files.
  1114. """
  1115. for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
  1116. env_path = filename % facts['common']['service_type']
  1117. if not os.path.exists(env_path):
  1118. continue
  1119. with open(env_path) as env_file:
  1120. for line in env_file:
  1121. if line.startswith("IMAGE_VERSION="):
  1122. tag = line[len("IMAGE_VERSION="):].strip()
  1123. # Remove leading "v" and any trailing release info, we just want
  1124. # a version number here:
  1125. version = tag[1:].split("-")[0]
  1126. return version
  1127. return None
  1128. def parse_openshift_version(output):
  1129. """ Apply provider facts to supplied facts dict
  1130. Args:
  1131. string: output of 'openshift version'
  1132. Returns:
  1133. string: the version number
  1134. """
  1135. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  1136. ver = versions.get('openshift', '')
  1137. # Remove trailing build number and commit hash from older versions, we need to return a straight
  1138. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  1139. ver = ver.split('-')[0]
  1140. return ver
  1141. def apply_provider_facts(facts, provider_facts):
  1142. """ Apply provider facts to supplied facts dict
  1143. Args:
  1144. facts (dict): facts dict to update
  1145. provider_facts (dict): provider facts to apply
  1146. roles: host roles
  1147. Returns:
  1148. dict: the merged facts
  1149. """
  1150. if not provider_facts:
  1151. return facts
  1152. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  1153. for h_var, ip_var in common_vars:
  1154. ip_value = provider_facts['network'].get(ip_var)
  1155. if ip_value:
  1156. facts['common'][ip_var] = ip_value
  1157. facts['common'][h_var] = choose_hostname(
  1158. [provider_facts['network'].get(h_var)],
  1159. facts['common'][h_var]
  1160. )
  1161. facts['provider'] = provider_facts
  1162. return facts
  1163. # Disabling pylint too many branches. This function needs refactored
  1164. # but is a very core part of openshift_facts.
  1165. # pylint: disable=too-many-branches
  1166. def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
  1167. """ Recursively merge facts dicts
  1168. Args:
  1169. orig (dict): existing facts
  1170. new (dict): facts to update
  1171. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1172. '.' notation ex: ['master.named_certificates']
  1173. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1174. '.' notation ex: ['master.master_count']
  1175. Returns:
  1176. dict: the merged facts
  1177. """
  1178. additive_facts = ['named_certificates']
  1179. protected_facts = ['ha', 'master_count']
  1180. # Facts we do not ever want to merge. These originate in inventory variables
  1181. # and contain JSON dicts. We don't ever want to trigger a merge
  1182. # here, just completely overwrite with the new if they are present there.
  1183. inventory_json_facts = ['admission_plugin_config',
  1184. 'kube_admission_plugin_config',
  1185. 'image_policy_config']
  1186. facts = dict()
  1187. for key, value in iteritems(orig):
  1188. # Key exists in both old and new facts.
  1189. if key in new:
  1190. if key in inventory_json_facts:
  1191. # Watchout for JSON facts that sometimes load as strings.
  1192. # (can happen if the JSON contains a boolean)
  1193. if isinstance(new[key], basestring):
  1194. facts[key] = yaml.safe_load(new[key])
  1195. else:
  1196. facts[key] = copy.deepcopy(new[key])
  1197. # Continue to recurse if old and new fact is a dictionary.
  1198. elif isinstance(value, dict) and isinstance(new[key], dict):
  1199. # Collect the subset of additive facts to overwrite if
  1200. # key matches. These will be passed to the subsequent
  1201. # merge_facts call.
  1202. relevant_additive_facts = []
  1203. for item in additive_facts_to_overwrite:
  1204. if '.' in item and item.startswith(key + '.'):
  1205. relevant_additive_facts.append(item)
  1206. # Collect the subset of protected facts to overwrite
  1207. # if key matches. These will be passed to the
  1208. # subsequent merge_facts call.
  1209. relevant_protected_facts = []
  1210. for item in protected_facts_to_overwrite:
  1211. if '.' in item and item.startswith(key + '.'):
  1212. relevant_protected_facts.append(item)
  1213. facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
  1214. # Key matches an additive fact and we are not overwriting
  1215. # it so we will append the new value to the existing value.
  1216. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  1217. if isinstance(value, list) and isinstance(new[key], list):
  1218. new_fact = []
  1219. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  1220. if item not in new_fact:
  1221. new_fact.append(item)
  1222. facts[key] = new_fact
  1223. # Key matches a protected fact and we are not overwriting
  1224. # it so we will determine if it is okay to change this
  1225. # fact.
  1226. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
  1227. # The master count (int) can only increase unless it
  1228. # has been passed as a protected fact to overwrite.
  1229. if key == 'master_count':
  1230. if int(value) <= int(new[key]):
  1231. facts[key] = copy.deepcopy(new[key])
  1232. else:
  1233. module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
  1234. # ha (bool) can not change unless it has been passed
  1235. # as a protected fact to overwrite.
  1236. if key == 'ha':
  1237. if safe_get_bool(value) != safe_get_bool(new[key]):
  1238. module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
  1239. else:
  1240. facts[key] = value
  1241. # No other condition has been met. Overwrite the old fact
  1242. # with the new value.
  1243. else:
  1244. facts[key] = copy.deepcopy(new[key])
  1245. # Key isn't in new so add it to facts to keep it.
  1246. else:
  1247. facts[key] = copy.deepcopy(value)
  1248. new_keys = set(new.keys()) - set(orig.keys())
  1249. for key in new_keys:
  1250. # Watchout for JSON facts that sometimes load as strings.
  1251. # (can happen if the JSON contains a boolean)
  1252. if key in inventory_json_facts and isinstance(new[key], basestring):
  1253. facts[key] = yaml.safe_load(new[key])
  1254. else:
  1255. facts[key] = copy.deepcopy(new[key])
  1256. return facts
  1257. def save_local_facts(filename, facts):
  1258. """ Save local facts
  1259. Args:
  1260. filename (str): local facts file
  1261. facts (dict): facts to set
  1262. """
  1263. try:
  1264. fact_dir = os.path.dirname(filename)
  1265. try:
  1266. os.makedirs(fact_dir) # try to make the directory
  1267. except OSError as exception:
  1268. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  1269. raise # pass any other exceptions up the chain
  1270. with open(filename, 'w') as fact_file:
  1271. fact_file.write(module.jsonify(facts))
  1272. os.chmod(filename, 0o600)
  1273. except (IOError, OSError) as ex:
  1274. raise OpenShiftFactsFileWriteError(
  1275. "Could not create fact file: %s, error: %s" % (filename, ex)
  1276. )
  1277. def get_local_facts_from_file(filename):
  1278. """ Retrieve local facts from fact file
  1279. Args:
  1280. filename (str): local facts file
  1281. Returns:
  1282. dict: the retrieved facts
  1283. """
  1284. local_facts = dict()
  1285. try:
  1286. # Handle conversion of INI style facts file to json style
  1287. ini_facts = ConfigParser.SafeConfigParser()
  1288. ini_facts.read(filename)
  1289. for section in ini_facts.sections():
  1290. local_facts[section] = dict()
  1291. for key, value in ini_facts.items(section):
  1292. local_facts[section][key] = value
  1293. except (ConfigParser.MissingSectionHeaderError,
  1294. ConfigParser.ParsingError):
  1295. try:
  1296. with open(filename, 'r') as facts_file:
  1297. local_facts = json.load(facts_file)
  1298. except (ValueError, IOError):
  1299. pass
  1300. return local_facts
  1301. def sort_unique(alist):
  1302. """ Sorts and de-dupes a list
  1303. Args:
  1304. list: a list
  1305. Returns:
  1306. list: a sorted de-duped list
  1307. """
  1308. alist.sort()
  1309. out = list()
  1310. for i in alist:
  1311. if i not in out:
  1312. out.append(i)
  1313. return out
  1314. def safe_get_bool(fact):
  1315. """ Get a boolean fact safely.
  1316. Args:
  1317. facts: fact to convert
  1318. Returns:
  1319. bool: given fact as a bool
  1320. """
  1321. return bool(strtobool(str(fact)))
  1322. def set_proxy_facts(facts):
  1323. """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
  1324. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1325. 1. http_proxy, https_proxy, no_proxy
  1326. 2. builddefaults_*
  1327. 3. builddefaults_git_*
  1328. Args:
  1329. facts(dict): existing facts
  1330. Returns:
  1331. facts(dict): Updated facts with missing values
  1332. """
  1333. if 'common' in facts:
  1334. common = facts['common']
  1335. if 'http_proxy' in common or 'https_proxy' in common:
  1336. if 'no_proxy' in common and \
  1337. isinstance(common['no_proxy'], basestring):
  1338. common['no_proxy'] = common['no_proxy'].split(",")
  1339. elif 'no_proxy' not in common:
  1340. common['no_proxy'] = []
  1341. if 'generate_no_proxy_hosts' in common and \
  1342. safe_get_bool(common['generate_no_proxy_hosts']):
  1343. if 'no_proxy_internal_hostnames' in common:
  1344. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  1345. # We always add local dns domain and ourselves no matter what
  1346. common['no_proxy'].append('.' + common['dns_domain'])
  1347. common['no_proxy'].append(common['hostname'])
  1348. common['no_proxy'] = sort_unique(common['no_proxy'])
  1349. facts['common'] = common
  1350. if 'builddefaults' in facts:
  1351. builddefaults = facts['builddefaults']
  1352. common = facts['common']
  1353. # Copy values from common to builddefaults
  1354. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1355. builddefaults['http_proxy'] = common['http_proxy']
  1356. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1357. builddefaults['https_proxy'] = common['https_proxy']
  1358. # make no_proxy into a list if it's not
  1359. if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
  1360. builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
  1361. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1362. builddefaults['no_proxy'] = common['no_proxy']
  1363. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1364. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1365. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1366. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1367. # If we're actually defining a proxy config then create admission_plugin_config
  1368. # if it doesn't exist, then merge builddefaults[config] structure
  1369. # into admission_plugin_config
  1370. if 'admission_plugin_config' not in facts['master']:
  1371. facts['master']['admission_plugin_config'] = dict()
  1372. if 'config' in builddefaults and ('http_proxy' in builddefaults or \
  1373. 'https_proxy' in builddefaults):
  1374. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  1375. facts['builddefaults'] = builddefaults
  1376. return facts
  1377. # pylint: disable=too-many-statements
  1378. def set_container_facts_if_unset(facts):
  1379. """ Set containerized facts.
  1380. Args:
  1381. facts (dict): existing facts
  1382. Returns:
  1383. dict: the facts dict updated with the generated containerization
  1384. facts
  1385. """
  1386. deployment_type = facts['common']['deployment_type']
  1387. if deployment_type in ['enterprise', 'openshift-enterprise']:
  1388. master_image = 'openshift3/ose'
  1389. cli_image = master_image
  1390. node_image = 'openshift3/node'
  1391. ovs_image = 'openshift3/openvswitch'
  1392. etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
  1393. pod_image = 'openshift3/ose-pod'
  1394. router_image = 'openshift3/ose-haproxy-router'
  1395. registry_image = 'openshift3/ose-docker-registry'
  1396. deployer_image = 'openshift3/ose-deployer'
  1397. elif deployment_type == 'atomic-enterprise':
  1398. master_image = 'aep3_beta/aep'
  1399. cli_image = master_image
  1400. node_image = 'aep3_beta/node'
  1401. ovs_image = 'aep3_beta/openvswitch'
  1402. etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
  1403. pod_image = 'aep3_beta/aep-pod'
  1404. router_image = 'aep3_beta/aep-haproxy-router'
  1405. registry_image = 'aep3_beta/aep-docker-registry'
  1406. deployer_image = 'aep3_beta/aep-deployer'
  1407. else:
  1408. master_image = 'openshift/origin'
  1409. cli_image = master_image
  1410. node_image = 'openshift/node'
  1411. ovs_image = 'openshift/openvswitch'
  1412. etcd_image = 'registry.access.redhat.com/rhel7/etcd3'
  1413. pod_image = 'openshift/origin-pod'
  1414. router_image = 'openshift/origin-haproxy-router'
  1415. registry_image = 'openshift/origin-docker-registry'
  1416. deployer_image = 'openshift/origin-deployer'
  1417. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1418. if 'is_containerized' not in facts['common']:
  1419. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1420. if 'cli_image' not in facts['common']:
  1421. facts['common']['cli_image'] = cli_image
  1422. if 'pod_image' not in facts['common']:
  1423. facts['common']['pod_image'] = pod_image
  1424. if 'router_image' not in facts['common']:
  1425. facts['common']['router_image'] = router_image
  1426. if 'registry_image' not in facts['common']:
  1427. facts['common']['registry_image'] = registry_image
  1428. if 'deployer_image' not in facts['common']:
  1429. facts['common']['deployer_image'] = deployer_image
  1430. if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
  1431. facts['etcd']['etcd_image'] = etcd_image
  1432. if 'master' in facts and 'master_image' not in facts['master']:
  1433. facts['master']['master_image'] = master_image
  1434. if 'node' in facts:
  1435. if 'node_image' not in facts['node']:
  1436. facts['node']['node_image'] = node_image
  1437. if 'ovs_image' not in facts['node']:
  1438. facts['node']['ovs_image'] = ovs_image
  1439. if safe_get_bool(facts['common']['is_containerized']):
  1440. facts['common']['admin_binary'] = '/usr/local/bin/oadm'
  1441. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1442. return facts
  1443. def set_installed_variant_rpm_facts(facts):
  1444. """ Set RPM facts of installed variant
  1445. Args:
  1446. facts (dict): existing facts
  1447. Returns:
  1448. dict: the facts dict updated with installed_variant_rpms
  1449. """
  1450. installed_rpms = []
  1451. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1452. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1453. variant_rpms = [base_rpm] + \
  1454. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1455. ['tuned-profiles-%s-node' % base_rpm]
  1456. for rpm in variant_rpms:
  1457. exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
  1458. if exit_code == 0:
  1459. installed_rpms.append(rpm)
  1460. facts['common']['installed_variant_rpms'] = installed_rpms
  1461. return facts
  1462. class OpenShiftFactsInternalError(Exception):
  1463. """Origin Facts Error"""
  1464. pass
  1465. class OpenShiftFactsUnsupportedRoleError(Exception):
  1466. """Origin Facts Unsupported Role Error"""
  1467. pass
  1468. class OpenShiftFactsFileWriteError(Exception):
  1469. """Origin Facts File Write Error"""
  1470. pass
  1471. class OpenShiftFactsMetadataUnavailableError(Exception):
  1472. """Origin Facts Metadata Unavailable Error"""
  1473. pass
  1474. class OpenShiftFacts(object):
  1475. """ Origin Facts
  1476. Attributes:
  1477. facts (dict): facts for the host
  1478. Args:
  1479. module (AnsibleModule): an AnsibleModule object
  1480. role (str): role for setting local facts
  1481. filename (str): local facts file to use
  1482. local_facts (dict): local facts to set
  1483. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1484. '.' notation ex: ['master.named_certificates']
  1485. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1486. '.' notation ex: ['master.master_count']
  1487. Raises:
  1488. OpenShiftFactsUnsupportedRoleError:
  1489. """
  1490. known_roles = ['builddefaults',
  1491. 'clock',
  1492. 'cloudprovider',
  1493. 'common',
  1494. 'docker',
  1495. 'etcd',
  1496. 'hosted',
  1497. 'master',
  1498. 'node']
  1499. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1500. # pylint: disable=too-many-arguments,no-value-for-parameter
  1501. def __init__(self, role, filename, local_facts,
  1502. additive_facts_to_overwrite=None,
  1503. openshift_env=None,
  1504. openshift_env_structures=None,
  1505. protected_facts_to_overwrite=None):
  1506. self.changed = False
  1507. self.filename = filename
  1508. if role not in self.known_roles:
  1509. raise OpenShiftFactsUnsupportedRoleError(
  1510. "Role %s is not supported by this module" % role
  1511. )
  1512. self.role = role
  1513. try:
  1514. # ansible-2.1
  1515. # pylint: disable=too-many-function-args,invalid-name
  1516. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
  1517. for (k, v) in self.system_facts.items():
  1518. self.system_facts["ansible_%s" % k.replace('-', '_')] = v
  1519. except UnboundLocalError:
  1520. # ansible-2.2
  1521. self.system_facts = get_all_facts(module)['ansible_facts']
  1522. self.facts = self.generate_facts(local_facts,
  1523. additive_facts_to_overwrite,
  1524. openshift_env,
  1525. openshift_env_structures,
  1526. protected_facts_to_overwrite)
  1527. def generate_facts(self,
  1528. local_facts,
  1529. additive_facts_to_overwrite,
  1530. openshift_env,
  1531. openshift_env_structures,
  1532. protected_facts_to_overwrite):
  1533. """ Generate facts
  1534. Args:
  1535. local_facts (dict): local_facts for overriding generated defaults
  1536. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1537. '.' notation ex: ['master.named_certificates']
  1538. openshift_env (dict): openshift_env facts for overriding generated defaults
  1539. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1540. '.' notation ex: ['master.master_count']
  1541. Returns:
  1542. dict: The generated facts
  1543. """
  1544. local_facts = self.init_local_facts(local_facts,
  1545. additive_facts_to_overwrite,
  1546. openshift_env,
  1547. openshift_env_structures,
  1548. protected_facts_to_overwrite)
  1549. roles = local_facts.keys()
  1550. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1551. deployment_type = local_facts['common']['deployment_type']
  1552. else:
  1553. deployment_type = 'origin'
  1554. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1555. deployment_subtype = local_facts['common']['deployment_subtype']
  1556. else:
  1557. deployment_subtype = 'basic'
  1558. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1559. provider_facts = self.init_provider_facts()
  1560. facts = apply_provider_facts(defaults, provider_facts)
  1561. facts = merge_facts(facts,
  1562. local_facts,
  1563. additive_facts_to_overwrite,
  1564. protected_facts_to_overwrite)
  1565. facts = migrate_oauth_template_facts(facts)
  1566. facts['current_config'] = get_current_config(facts)
  1567. facts = set_url_facts_if_unset(facts)
  1568. facts = set_project_cfg_facts_if_unset(facts)
  1569. facts = set_flannel_facts_if_unset(facts)
  1570. facts = set_nuage_facts_if_unset(facts)
  1571. facts = set_node_schedulability(facts)
  1572. facts = set_selectors(facts)
  1573. facts = set_identity_providers_if_unset(facts)
  1574. facts = set_deployment_facts_if_unset(facts)
  1575. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1576. facts = set_container_facts_if_unset(facts)
  1577. facts = build_kubelet_args(facts)
  1578. facts = build_controller_args(facts)
  1579. facts = build_api_server_args(facts)
  1580. facts = set_version_facts_if_unset(facts)
  1581. facts = set_dnsmasq_facts_if_unset(facts)
  1582. facts = set_manageiq_facts_if_unset(facts)
  1583. facts = set_aggregate_facts(facts)
  1584. facts = set_etcd_facts_if_unset(facts)
  1585. facts = set_proxy_facts(facts)
  1586. if not safe_get_bool(facts['common']['is_containerized']):
  1587. facts = set_installed_variant_rpm_facts(facts)
  1588. facts = set_nodename(facts)
  1589. return dict(openshift=facts)
  1590. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1591. """ Get default fact values
  1592. Args:
  1593. roles (list): list of roles for this host
  1594. Returns:
  1595. dict: The generated default facts
  1596. """
  1597. defaults = {}
  1598. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1599. exit_code, output, _ = module.run_command(['hostname', '-f'])
  1600. hostname_f = output.strip() if exit_code == 0 else ''
  1601. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1602. self.system_facts['ansible_fqdn']]
  1603. hostname = choose_hostname(hostname_values, ip_addr)
  1604. defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
  1605. public_ip=ip_addr,
  1606. deployment_type=deployment_type,
  1607. deployment_subtype=deployment_subtype,
  1608. hostname=hostname,
  1609. public_hostname=hostname,
  1610. portal_net='172.30.0.0/16',
  1611. client_binary='oc', admin_binary='oadm',
  1612. dns_domain='cluster.local',
  1613. install_examples=True,
  1614. debug_level=2)
  1615. if 'master' in roles:
  1616. scheduler_predicates = [
  1617. {"name": "MatchNodeSelector"},
  1618. {"name": "PodFitsResources"},
  1619. {"name": "PodFitsPorts"},
  1620. {"name": "NoDiskConflict"},
  1621. {"name": "NoVolumeZoneConflict"},
  1622. {"name": "MaxEBSVolumeCount"},
  1623. {"name": "MaxGCEPDVolumeCount"},
  1624. {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
  1625. ]
  1626. scheduler_priorities = [
  1627. {"name": "LeastRequestedPriority", "weight": 1},
  1628. {"name": "SelectorSpreadPriority", "weight": 1},
  1629. {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
  1630. ]
  1631. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1632. controllers_port='8444',
  1633. console_use_ssl=True,
  1634. console_path='/console',
  1635. console_port='8443', etcd_use_ssl=True,
  1636. etcd_hosts='', etcd_port='4001',
  1637. portal_net='172.30.0.0/16',
  1638. embedded_etcd=True, embedded_kube=True,
  1639. embedded_dns=True,
  1640. bind_addr='0.0.0.0',
  1641. session_max_seconds=3600,
  1642. session_name='ssn',
  1643. session_secrets_file='',
  1644. access_token_max_seconds=86400,
  1645. auth_token_max_seconds=500,
  1646. oauth_grant_method='auto',
  1647. scheduler_predicates=scheduler_predicates,
  1648. scheduler_priorities=scheduler_priorities,
  1649. dynamic_provisioning_enabled=True,
  1650. max_requests_inflight=500)
  1651. if 'node' in roles:
  1652. defaults['node'] = dict(labels={}, annotations={},
  1653. iptables_sync_period='30s',
  1654. local_quota_per_fsgroup="",
  1655. set_node_ip=False)
  1656. if 'docker' in roles:
  1657. docker = dict(disable_push_dockerhub=False,
  1658. options='--log-driver=json-file --log-opt max-size=50m')
  1659. version_info = get_docker_version_info()
  1660. if version_info is not None:
  1661. docker['api_version'] = version_info['api_version']
  1662. docker['version'] = version_info['version']
  1663. docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
  1664. hosted_registry_insecure = get_hosted_registry_insecure()
  1665. if hosted_registry_insecure is not None:
  1666. docker['hosted_registry_insecure'] = hosted_registry_insecure
  1667. defaults['docker'] = docker
  1668. if 'clock' in roles:
  1669. exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
  1670. chrony_installed = bool(exit_code == 0)
  1671. defaults['clock'] = dict(
  1672. enabled=True,
  1673. chrony_installed=chrony_installed)
  1674. if 'cloudprovider' in roles:
  1675. defaults['cloudprovider'] = dict(kind=None)
  1676. if 'hosted' in roles or self.role == 'hosted':
  1677. defaults['hosted'] = dict(
  1678. metrics=dict(
  1679. deploy=False,
  1680. duration=7,
  1681. resolution='10s',
  1682. storage=dict(
  1683. kind=None,
  1684. volume=dict(
  1685. name='metrics',
  1686. size='10Gi'
  1687. ),
  1688. nfs=dict(
  1689. directory='/exports',
  1690. options='*(rw,root_squash)'
  1691. ),
  1692. host=None,
  1693. access_modes=['ReadWriteOnce'],
  1694. create_pv=True,
  1695. create_pvc=False
  1696. )
  1697. ),
  1698. logging=dict(
  1699. storage=dict(
  1700. kind=None,
  1701. volume=dict(
  1702. name='logging-es',
  1703. size='10Gi'
  1704. ),
  1705. nfs=dict(
  1706. directory='/exports',
  1707. options='*(rw,root_squash)'
  1708. ),
  1709. host=None,
  1710. access_modes=['ReadWriteOnce'],
  1711. create_pv=True,
  1712. create_pvc=False
  1713. )
  1714. ),
  1715. registry=dict(
  1716. storage=dict(
  1717. kind=None,
  1718. volume=dict(
  1719. name='registry',
  1720. size='5Gi'
  1721. ),
  1722. nfs=dict(
  1723. directory='/exports',
  1724. options='*(rw,root_squash)'),
  1725. host=None,
  1726. access_modes=['ReadWriteMany'],
  1727. create_pv=True,
  1728. create_pvc=True
  1729. )
  1730. ),
  1731. router=dict()
  1732. )
  1733. return defaults
  1734. def guess_host_provider(self):
  1735. """ Guess the host provider
  1736. Returns:
  1737. dict: The generated default facts for the detected provider
  1738. """
  1739. # TODO: cloud provider facts should probably be submitted upstream
  1740. product_name = self.system_facts['ansible_product_name']
  1741. product_version = self.system_facts['ansible_product_version']
  1742. virt_type = self.system_facts['ansible_virtualization_type']
  1743. virt_role = self.system_facts['ansible_virtualization_role']
  1744. provider = None
  1745. metadata = None
  1746. # TODO: this is not exposed through module_utils/facts.py in ansible,
  1747. # need to create PR for ansible to expose it
  1748. bios_vendor = get_file_content(
  1749. '/sys/devices/virtual/dmi/id/bios_vendor'
  1750. )
  1751. if bios_vendor == 'Google':
  1752. provider = 'gce'
  1753. metadata_url = ('http://metadata.google.internal/'
  1754. 'computeMetadata/v1/?recursive=true')
  1755. headers = {'Metadata-Flavor': 'Google'}
  1756. metadata = get_provider_metadata(metadata_url, True, headers,
  1757. True)
  1758. # Filter sshKeys and serviceAccounts from gce metadata
  1759. if metadata:
  1760. metadata['project']['attributes'].pop('sshKeys', None)
  1761. metadata['instance'].pop('serviceAccounts', None)
  1762. elif (virt_type == 'xen' and virt_role == 'guest'
  1763. and re.match(r'.*\.amazon$', product_version)):
  1764. provider = 'aws'
  1765. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1766. metadata = get_provider_metadata(metadata_url)
  1767. elif re.search(r'OpenStack', product_name):
  1768. provider = 'openstack'
  1769. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1770. 'meta_data.json')
  1771. metadata = get_provider_metadata(metadata_url, True, None,
  1772. True)
  1773. if metadata:
  1774. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1775. metadata['ec2_compat'] = get_provider_metadata(
  1776. ec2_compat_url
  1777. )
  1778. # disable pylint maybe-no-member because overloaded use of
  1779. # the module name causes pylint to not detect that results
  1780. # is an array or hash
  1781. # pylint: disable=maybe-no-member
  1782. # Filter public_keys and random_seed from openstack metadata
  1783. metadata.pop('public_keys', None)
  1784. metadata.pop('random_seed', None)
  1785. if not metadata['ec2_compat']:
  1786. metadata = None
  1787. return dict(name=provider, metadata=metadata)
  1788. def init_provider_facts(self):
  1789. """ Initialize the provider facts
  1790. Returns:
  1791. dict: The normalized provider facts
  1792. """
  1793. provider_info = self.guess_host_provider()
  1794. provider_facts = normalize_provider_facts(
  1795. provider_info.get('name'),
  1796. provider_info.get('metadata')
  1797. )
  1798. return provider_facts
  1799. @staticmethod
  1800. def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
  1801. """ Split openshift_env facts based on openshift_env structures.
  1802. Args:
  1803. openshift_env_fact (string): the openshift_env fact to split
  1804. ex: 'openshift_cloudprovider_openstack_auth_url'
  1805. openshift_env_structures (list): a list of structures to determine fact keys
  1806. ex: ['openshift.cloudprovider.openstack.*']
  1807. Returns:
  1808. list: a list of keys that represent the fact
  1809. ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
  1810. """
  1811. # By default, we'll split an openshift_env fact by underscores.
  1812. fact_keys = openshift_env_fact.split('_')
  1813. # Determine if any of the provided variable structures match the fact.
  1814. matching_structure = None
  1815. if openshift_env_structures != None:
  1816. for structure in openshift_env_structures:
  1817. if re.match(structure, openshift_env_fact):
  1818. matching_structure = structure
  1819. # Fact didn't match any variable structures so return the default fact keys.
  1820. if matching_structure is None:
  1821. return fact_keys
  1822. final_keys = []
  1823. structure_keys = matching_structure.split('.')
  1824. for structure_key in structure_keys:
  1825. # Matched current key. Add to final keys.
  1826. if structure_key == fact_keys[structure_keys.index(structure_key)]:
  1827. final_keys.append(structure_key)
  1828. # Wildcard means we will be taking everything from here to the end of the fact.
  1829. elif structure_key == '*':
  1830. final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
  1831. # Shouldn't have gotten here, return the fact keys.
  1832. else:
  1833. return fact_keys
  1834. return final_keys
  1835. # Disabling too-many-branches and too-many-locals.
  1836. # This should be cleaned up as a TODO item.
  1837. #pylint: disable=too-many-branches, too-many-locals
  1838. def init_local_facts(self, facts=None,
  1839. additive_facts_to_overwrite=None,
  1840. openshift_env=None,
  1841. openshift_env_structures=None,
  1842. protected_facts_to_overwrite=None):
  1843. """ Initialize the local facts
  1844. Args:
  1845. facts (dict): local facts to set
  1846. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1847. '.' notation ex: ['master.named_certificates']
  1848. openshift_env (dict): openshift env facts to set
  1849. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1850. '.' notation ex: ['master.master_count']
  1851. Returns:
  1852. dict: The result of merging the provided facts with existing
  1853. local facts
  1854. """
  1855. changed = False
  1856. facts_to_set = dict()
  1857. if facts is not None:
  1858. facts_to_set[self.role] = facts
  1859. if openshift_env != {} and openshift_env != None:
  1860. for fact, value in iteritems(openshift_env):
  1861. oo_env_facts = dict()
  1862. current_level = oo_env_facts
  1863. keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
  1864. if len(keys) > 0 and keys[0] != self.role:
  1865. continue
  1866. for key in keys:
  1867. if key == keys[-1]:
  1868. current_level[key] = value
  1869. elif key not in current_level:
  1870. current_level[key] = dict()
  1871. current_level = current_level[key]
  1872. facts_to_set = merge_facts(orig=facts_to_set,
  1873. new=oo_env_facts,
  1874. additive_facts_to_overwrite=[],
  1875. protected_facts_to_overwrite=[])
  1876. local_facts = get_local_facts_from_file(self.filename)
  1877. migrated_facts = migrate_local_facts(local_facts)
  1878. new_local_facts = merge_facts(migrated_facts,
  1879. facts_to_set,
  1880. additive_facts_to_overwrite,
  1881. protected_facts_to_overwrite)
  1882. if 'docker' in new_local_facts:
  1883. # remove duplicate and empty strings from registry lists
  1884. for cat in ['additional', 'blocked', 'insecure']:
  1885. key = '{0}_registries'.format(cat)
  1886. if key in new_local_facts['docker']:
  1887. val = new_local_facts['docker'][key]
  1888. if isinstance(val, basestring):
  1889. val = [x.strip() for x in val.split(',')]
  1890. new_local_facts['docker'][key] = list(set(val) - set(['']))
  1891. # Convert legacy log_options comma sep string to a list if present:
  1892. if 'log_options' in new_local_facts['docker'] and \
  1893. isinstance(new_local_facts['docker']['log_options'], basestring):
  1894. new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
  1895. new_local_facts = self.remove_empty_facts(new_local_facts)
  1896. if new_local_facts != local_facts:
  1897. self.validate_local_facts(new_local_facts)
  1898. changed = True
  1899. if not module.check_mode:
  1900. save_local_facts(self.filename, new_local_facts)
  1901. self.changed = changed
  1902. return new_local_facts
  1903. def remove_empty_facts(self, facts=None):
  1904. """ Remove empty facts
  1905. Args:
  1906. facts (dict): facts to clean
  1907. """
  1908. facts_to_remove = []
  1909. for fact, value in iteritems(facts):
  1910. if isinstance(facts[fact], dict):
  1911. facts[fact] = self.remove_empty_facts(facts[fact])
  1912. else:
  1913. if value == "" or value == [""] or value is None:
  1914. facts_to_remove.append(fact)
  1915. for fact in facts_to_remove:
  1916. del facts[fact]
  1917. return facts
  1918. def validate_local_facts(self, facts=None):
  1919. """ Validate local facts
  1920. Args:
  1921. facts (dict): local facts to validate
  1922. """
  1923. invalid_facts = dict()
  1924. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1925. if invalid_facts:
  1926. msg = 'Invalid facts detected:\n'
  1927. for key in invalid_facts.keys():
  1928. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1929. module.fail_json(msg=msg,
  1930. changed=self.changed)
  1931. # disabling pylint errors for line-too-long since we're dealing
  1932. # with best effort reduction of error messages here.
  1933. # disabling errors for too-many-branches since we require checking
  1934. # many conditions.
  1935. # pylint: disable=line-too-long, too-many-branches
  1936. @staticmethod
  1937. def validate_master_facts(facts, invalid_facts):
  1938. """ Validate master facts
  1939. Args:
  1940. facts (dict): local facts to validate
  1941. invalid_facts (dict): collected invalid_facts
  1942. Returns:
  1943. dict: Invalid facts
  1944. """
  1945. if 'master' in facts:
  1946. # openshift.master.session_auth_secrets
  1947. if 'session_auth_secrets' in facts['master']:
  1948. session_auth_secrets = facts['master']['session_auth_secrets']
  1949. if not issubclass(type(session_auth_secrets), list):
  1950. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  1951. elif 'session_encryption_secrets' not in facts['master']:
  1952. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  1953. 'if openshift_master_session_auth_secrets is provided.')
  1954. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  1955. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  1956. 'openshift_master_session_encryption_secrets must be '
  1957. 'equal length.')
  1958. else:
  1959. for secret in session_auth_secrets:
  1960. if len(secret) < 32:
  1961. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  1962. 'Secrets must be at least 32 characters in length.')
  1963. # openshift.master.session_encryption_secrets
  1964. if 'session_encryption_secrets' in facts['master']:
  1965. session_encryption_secrets = facts['master']['session_encryption_secrets']
  1966. if not issubclass(type(session_encryption_secrets), list):
  1967. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  1968. elif 'session_auth_secrets' not in facts['master']:
  1969. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  1970. 'set if openshift_master_session_encryption_secrets '
  1971. 'is provided.')
  1972. else:
  1973. for secret in session_encryption_secrets:
  1974. if len(secret) not in [16, 24, 32]:
  1975. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  1976. 'Secrets must be 16, 24, or 32 characters in length.')
  1977. return invalid_facts
  1978. def main():
  1979. """ main """
  1980. # disabling pylint errors for global-variable-undefined and invalid-name
  1981. # for 'global module' usage, since it is required to use ansible_facts
  1982. # pylint: disable=global-variable-undefined, invalid-name
  1983. global module
  1984. module = AnsibleModule(
  1985. argument_spec=dict(
  1986. role=dict(default='common', required=False,
  1987. choices=OpenShiftFacts.known_roles),
  1988. local_facts=dict(default=None, type='dict', required=False),
  1989. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  1990. openshift_env=dict(default={}, type='dict', required=False),
  1991. openshift_env_structures=dict(default=[], type='list', required=False),
  1992. protected_facts_to_overwrite=dict(default=[], type='list', required=False)
  1993. ),
  1994. supports_check_mode=True,
  1995. add_file_common_args=True,
  1996. )
  1997. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']
  1998. module.params['gather_timeout'] = 10
  1999. module.params['filter'] = '*'
  2000. role = module.params['role']
  2001. local_facts = module.params['local_facts']
  2002. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
  2003. openshift_env = module.params['openshift_env']
  2004. openshift_env_structures = module.params['openshift_env_structures']
  2005. protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
  2006. fact_file = '/etc/ansible/facts.d/openshift.fact'
  2007. openshift_facts = OpenShiftFacts(role,
  2008. fact_file,
  2009. local_facts,
  2010. additive_facts_to_overwrite,
  2011. openshift_env,
  2012. openshift_env_structures,
  2013. protected_facts_to_overwrite)
  2014. file_params = module.params.copy()
  2015. file_params['path'] = fact_file
  2016. file_args = module.load_file_common_arguments(file_params)
  2017. changed = module.set_fs_attributes_if_different(file_args,
  2018. openshift_facts.changed)
  2019. return module.exit_json(changed=changed,
  2020. ansible_facts=openshift_facts.facts)
  2021. # ignore pylint errors related to the module_utils import
  2022. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  2023. # import module snippets
  2024. from ansible.module_utils.basic import *
  2025. from ansible.module_utils.facts import *
  2026. from ansible.module_utils.urls import *
  2027. from ansible.module_utils.six import iteritems, itervalues
  2028. from ansible.module_utils._text import to_native
  2029. from ansible.module_utils.six import b
  2030. if __name__ == '__main__':
  2031. main()