openshift_facts.py 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  5. # Status: Permanently disabled to keep this module as self-contained as possible.
  6. """Ansible module for retrieving and setting openshift related facts"""
  7. # pylint: disable=no-name-in-module, import-error, wrong-import-order
  8. import copy
  9. import errno
  10. import json
  11. import re
  12. import io
  13. import os
  14. import yaml
  15. import struct
  16. import socket
  17. from distutils.util import strtobool
  18. from distutils.version import LooseVersion
  19. from ansible.module_utils.six import string_types, text_type
  20. from ansible.module_utils.six.moves import configparser
  21. # ignore pylint errors related to the module_utils import
  22. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  23. # import module snippets
  24. from ansible.module_utils.basic import * # noqa: F403
  25. from ansible.module_utils.facts import * # noqa: F403
  26. from ansible.module_utils.urls import * # noqa: F403
  27. from ansible.module_utils.six import iteritems, itervalues
  28. from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
  29. from ansible.module_utils._text import to_native
  30. HAVE_DBUS = False
  31. try:
  32. from dbus import SystemBus, Interface
  33. from dbus.exceptions import DBusException
  34. HAVE_DBUS = True
  35. except ImportError:
  36. pass
  37. DOCUMENTATION = '''
  38. ---
  39. module: openshift_facts
  40. short_description: Cluster Facts
  41. author: Jason DeTiberus
  42. requirements: [ ]
  43. '''
  44. EXAMPLES = '''
  45. '''
  46. def migrate_docker_facts(facts):
  47. """ Apply migrations for docker facts """
  48. params = {
  49. 'common': (
  50. 'options'
  51. ),
  52. 'node': (
  53. 'log_driver',
  54. 'log_options'
  55. )
  56. }
  57. if 'docker' not in facts:
  58. facts['docker'] = {}
  59. # pylint: disable=consider-iterating-dictionary
  60. for role in params.keys():
  61. if role in facts:
  62. for param in params[role]:
  63. old_param = 'docker_' + param
  64. if old_param in facts[role]:
  65. facts['docker'][param] = facts[role].pop(old_param)
  66. if 'node' in facts and 'portal_net' in facts['node']:
  67. facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
  68. # log_options was originally meant to be a comma separated string, but
  69. # we now prefer an actual list, with backward compatibility:
  70. if 'log_options' in facts['docker'] and \
  71. isinstance(facts['docker']['log_options'], string_types):
  72. facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
  73. return facts
  74. # TODO: We should add a generic migration function that takes source and destination
  75. # paths and does the right thing rather than one function for common, one for node, etc.
  76. def migrate_common_facts(facts):
  77. """ Migrate facts from various roles into common """
  78. params = {
  79. 'node': ('portal_net'),
  80. 'master': ('portal_net')
  81. }
  82. if 'common' not in facts:
  83. facts['common'] = {}
  84. # pylint: disable=consider-iterating-dictionary
  85. for role in params.keys():
  86. if role in facts:
  87. for param in params[role]:
  88. if param in facts[role]:
  89. facts['common'][param] = facts[role].pop(param)
  90. return facts
  91. def migrate_node_facts(facts):
  92. """ Migrate facts from various roles into node """
  93. params = {
  94. 'common': ('dns_ip'),
  95. }
  96. if 'node' not in facts:
  97. facts['node'] = {}
  98. # pylint: disable=consider-iterating-dictionary
  99. for role in params.keys():
  100. if role in facts:
  101. for param in params[role]:
  102. if param in facts[role]:
  103. facts['node'][param] = facts[role].pop(param)
  104. return facts
  105. def migrate_hosted_facts(facts):
  106. """ Apply migrations for master facts """
  107. if 'master' in facts:
  108. if 'router_selector' in facts['master']:
  109. if 'hosted' not in facts:
  110. facts['hosted'] = {}
  111. if 'router' not in facts['hosted']:
  112. facts['hosted']['router'] = {}
  113. facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
  114. if 'registry_selector' in facts['master']:
  115. if 'hosted' not in facts:
  116. facts['hosted'] = {}
  117. if 'registry' not in facts['hosted']:
  118. facts['hosted']['registry'] = {}
  119. facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
  120. return facts
  121. def migrate_admission_plugin_facts(facts):
  122. """ Apply migrations for admission plugin facts """
  123. if 'master' in facts:
  124. if 'kube_admission_plugin_config' in facts['master']:
  125. if 'admission_plugin_config' not in facts['master']:
  126. facts['master']['admission_plugin_config'] = dict()
  127. # Merge existing kube_admission_plugin_config with admission_plugin_config.
  128. facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
  129. facts['master']['kube_admission_plugin_config'],
  130. additive_facts_to_overwrite=[],
  131. protected_facts_to_overwrite=[])
  132. # Remove kube_admission_plugin_config fact
  133. facts['master'].pop('kube_admission_plugin_config', None)
  134. return facts
  135. def migrate_local_facts(facts):
  136. """ Apply migrations of local facts """
  137. migrated_facts = copy.deepcopy(facts)
  138. migrated_facts = migrate_docker_facts(migrated_facts)
  139. migrated_facts = migrate_common_facts(migrated_facts)
  140. migrated_facts = migrate_node_facts(migrated_facts)
  141. migrated_facts = migrate_hosted_facts(migrated_facts)
  142. migrated_facts = migrate_admission_plugin_facts(migrated_facts)
  143. return migrated_facts
  144. def first_ip(network):
  145. """ Return the first IPv4 address in network
  146. Args:
  147. network (str): network in CIDR format
  148. Returns:
  149. str: first IPv4 address
  150. """
  151. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
  152. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
  153. (address, netmask) = network.split('/')
  154. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  155. return itoa((atoi(address) & netmask_i) + 1)
  156. def hostname_valid(hostname):
  157. """ Test if specified hostname should be considered valid
  158. Args:
  159. hostname (str): hostname to test
  160. Returns:
  161. bool: True if valid, otherwise False
  162. """
  163. if (not hostname or
  164. hostname.startswith('localhost') or
  165. hostname.endswith('localdomain') or
  166. # OpenShift will not allow a node with more than 63 chars in name.
  167. len(hostname) > 63):
  168. return False
  169. return True
  170. def choose_hostname(hostnames=None, fallback=''):
  171. """ Choose a hostname from the provided hostnames
  172. Given a list of hostnames and a fallback value, choose a hostname to
  173. use. This function will prefer fqdns if they exist (excluding any that
  174. begin with localhost or end with localdomain) over ip addresses.
  175. Args:
  176. hostnames (list): list of hostnames
  177. fallback (str): default value to set if hostnames does not contain
  178. a valid hostname
  179. Returns:
  180. str: chosen hostname
  181. """
  182. hostname = fallback
  183. if hostnames is None:
  184. return hostname
  185. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  186. ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
  187. hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
  188. for host_list in (hosts, ips):
  189. for host in host_list:
  190. if hostname_valid(host):
  191. return host
  192. return hostname
  193. def query_metadata(metadata_url, headers=None, expect_json=False):
  194. """ Return metadata from the provided metadata_url
  195. Args:
  196. metadata_url (str): metadata url
  197. headers (dict): headers to set for metadata request
  198. expect_json (bool): does the metadata_url return json
  199. Returns:
  200. dict or list: metadata request result
  201. """
  202. result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
  203. if info['status'] != 200:
  204. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  205. if expect_json:
  206. return module.from_json(to_native(result.read())) # noqa: F405
  207. else:
  208. return [to_native(line.strip()) for line in result.readlines()]
  209. def walk_metadata(metadata_url, headers=None, expect_json=False):
  210. """ Walk the metadata tree and return a dictionary of the entire tree
  211. Args:
  212. metadata_url (str): metadata url
  213. headers (dict): headers to set for metadata request
  214. expect_json (bool): does the metadata_url return json
  215. Returns:
  216. dict: the result of walking the metadata tree
  217. """
  218. metadata = dict()
  219. for line in query_metadata(metadata_url, headers, expect_json):
  220. if line.endswith('/') and not line == 'public-keys/':
  221. key = line[:-1]
  222. metadata[key] = walk_metadata(metadata_url + line,
  223. headers, expect_json)
  224. else:
  225. results = query_metadata(metadata_url + line, headers,
  226. expect_json)
  227. if len(results) == 1:
  228. # disable pylint maybe-no-member because overloaded use of
  229. # the module name causes pylint to not detect that results
  230. # is an array or hash
  231. # pylint: disable=maybe-no-member
  232. metadata[line] = results.pop()
  233. else:
  234. metadata[line] = results
  235. return metadata
  236. def get_provider_metadata(metadata_url, supports_recursive=False,
  237. headers=None, expect_json=False):
  238. """ Retrieve the provider metadata
  239. Args:
  240. metadata_url (str): metadata url
  241. supports_recursive (bool): does the provider metadata api support
  242. recursion
  243. headers (dict): headers to set for metadata request
  244. expect_json (bool): does the metadata_url return json
  245. Returns:
  246. dict: the provider metadata
  247. """
  248. try:
  249. if supports_recursive:
  250. metadata = query_metadata(metadata_url, headers,
  251. expect_json)
  252. else:
  253. metadata = walk_metadata(metadata_url, headers,
  254. expect_json)
  255. except OpenShiftFactsMetadataUnavailableError:
  256. metadata = None
  257. return metadata
  258. def normalize_gce_facts(metadata, facts):
  259. """ Normalize gce facts
  260. Args:
  261. metadata (dict): provider metadata
  262. facts (dict): facts to update
  263. Returns:
  264. dict: the result of adding the normalized metadata to the provided
  265. facts dict
  266. """
  267. for interface in metadata['instance']['networkInterfaces']:
  268. int_info = dict(ips=[interface['ip']], network_type='gce')
  269. int_info['public_ips'] = [ac['externalIp'] for ac
  270. in interface['accessConfigs']]
  271. int_info['public_ips'].extend(interface['forwardedIps'])
  272. _, _, network_id = interface['network'].rpartition('/')
  273. int_info['network_id'] = network_id
  274. facts['network']['interfaces'].append(int_info)
  275. _, _, zone = metadata['instance']['zone'].rpartition('/')
  276. facts['zone'] = zone
  277. # GCE currently only supports a single interface
  278. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  279. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  280. facts['network']['public_ip'] = pub_ip
  281. # Split instance hostname from GCE metadata to use the short instance name
  282. facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
  283. # TODO: attempt to resolve public_hostname
  284. facts['network']['public_hostname'] = facts['network']['public_ip']
  285. return facts
  286. def normalize_aws_facts(metadata, facts):
  287. """ Normalize aws facts
  288. Args:
  289. metadata (dict): provider metadata
  290. facts (dict): facts to update
  291. Returns:
  292. dict: the result of adding the normalized metadata to the provided
  293. facts dict
  294. """
  295. for interface in sorted(
  296. metadata['network']['interfaces']['macs'].values(),
  297. key=lambda x: x['device-number']
  298. ):
  299. int_info = dict()
  300. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  301. for ips_var, int_var in iteritems(var_map):
  302. ips = interface.get(int_var)
  303. if isinstance(ips, string_types):
  304. int_info[ips_var] = [ips]
  305. else:
  306. int_info[ips_var] = ips
  307. if 'vpc-id' in interface:
  308. int_info['network_type'] = 'vpc'
  309. else:
  310. int_info['network_type'] = 'classic'
  311. if int_info['network_type'] == 'vpc':
  312. int_info['network_id'] = interface['subnet-id']
  313. else:
  314. int_info['network_id'] = None
  315. facts['network']['interfaces'].append(int_info)
  316. facts['zone'] = metadata['placement']['availability-zone']
  317. # TODO: actually attempt to determine default local and public ips
  318. # by using the ansible default ip fact and the ipv4-associations
  319. # from the ec2 metadata
  320. facts['network']['ip'] = metadata.get('local-ipv4')
  321. facts['network']['public_ip'] = metadata.get('public-ipv4')
  322. # TODO: verify that local hostname makes sense and is resolvable
  323. facts['network']['hostname'] = metadata.get('local-hostname')
  324. # TODO: verify that public hostname makes sense and is resolvable
  325. facts['network']['public_hostname'] = metadata.get('public-hostname')
  326. return facts
  327. def normalize_openstack_facts(metadata, facts):
  328. """ Normalize openstack facts
  329. Args:
  330. metadata (dict): provider metadata
  331. facts (dict): facts to update
  332. Returns:
  333. dict: the result of adding the normalized metadata to the provided
  334. facts dict
  335. """
  336. # openstack ec2 compat api does not support network interfaces and
  337. # the version tested on did not include the info in the openstack
  338. # metadata api, should be updated if neutron exposes this.
  339. facts['zone'] = metadata['availability_zone']
  340. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  341. facts['network']['ip'] = local_ipv4
  342. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  343. for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
  344. ('public_hostname', 'public-hostname', 'public-ipv4')]:
  345. try:
  346. if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
  347. facts['network'][f_var] = metadata['ec2_compat'][h_var]
  348. else:
  349. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  350. except socket.gaierror:
  351. facts['network'][f_var] = metadata['ec2_compat'][ip_var]
  352. return facts
  353. def normalize_provider_facts(provider, metadata):
  354. """ Normalize provider facts
  355. Args:
  356. provider (str): host provider
  357. metadata (dict): provider metadata
  358. Returns:
  359. dict: the normalized provider facts
  360. """
  361. if provider is None or metadata is None:
  362. return {}
  363. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  364. # and configure ipv6 facts if available
  365. # TODO: add support for setting user_data if available
  366. facts = dict(name=provider, metadata=metadata,
  367. network=dict(interfaces=[], ipv6_enabled=False))
  368. if provider == 'gce':
  369. facts = normalize_gce_facts(metadata, facts)
  370. elif provider == 'aws':
  371. facts = normalize_aws_facts(metadata, facts)
  372. elif provider == 'openstack':
  373. facts = normalize_openstack_facts(metadata, facts)
  374. return facts
  375. def set_node_schedulability(facts):
  376. """ Set schedulable facts if not already present in facts dict
  377. Args:
  378. facts (dict): existing facts
  379. Returns:
  380. dict: the facts dict updated with the generated schedulable
  381. facts if they were not already present
  382. """
  383. if 'node' in facts:
  384. if 'schedulable' not in facts['node']:
  385. if 'master' in facts:
  386. facts['node']['schedulable'] = False
  387. else:
  388. facts['node']['schedulable'] = True
  389. return facts
  390. # pylint: disable=too-many-branches
  391. def set_selectors(facts):
  392. """ Set selectors facts if not already present in facts dict
  393. Args:
  394. facts (dict): existing facts
  395. Returns:
  396. dict: the facts dict updated with the generated selectors
  397. facts if they were not already present
  398. """
  399. selector = "region=infra"
  400. if 'hosted' not in facts:
  401. facts['hosted'] = {}
  402. if 'router' not in facts['hosted']:
  403. facts['hosted']['router'] = {}
  404. if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
  405. facts['hosted']['router']['selector'] = selector
  406. if 'registry' not in facts['hosted']:
  407. facts['hosted']['registry'] = {}
  408. if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
  409. facts['hosted']['registry']['selector'] = selector
  410. if 'metrics' not in facts['hosted']:
  411. facts['hosted']['metrics'] = {}
  412. if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
  413. facts['hosted']['metrics']['selector'] = None
  414. if 'logging' not in facts:
  415. facts['logging'] = {}
  416. if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
  417. facts['logging']['selector'] = None
  418. if 'etcd' not in facts['hosted']:
  419. facts['hosted']['etcd'] = {}
  420. if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
  421. facts['hosted']['etcd']['selector'] = None
  422. if 'prometheus' not in facts:
  423. facts['prometheus'] = {}
  424. if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
  425. facts['prometheus']['selector'] = None
  426. if 'alertmanager' not in facts['prometheus']:
  427. facts['prometheus']['alertmanager'] = {}
  428. # pylint: disable=line-too-long
  429. if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
  430. facts['prometheus']['alertmanager']['selector'] = None
  431. if 'alertbuffer' not in facts['prometheus']:
  432. facts['prometheus']['alertbuffer'] = {}
  433. # pylint: disable=line-too-long
  434. if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
  435. facts['prometheus']['alertbuffer']['selector'] = None
  436. return facts
  437. def set_dnsmasq_facts_if_unset(facts):
  438. """ Set dnsmasq facts if not already present in facts
  439. Args:
  440. facts (dict) existing facts
  441. Returns:
  442. facts (dict) updated facts with values set if not previously set
  443. """
  444. if 'common' in facts:
  445. if 'master' in facts and 'dns_port' not in facts['master']:
  446. facts['master']['dns_port'] = 8053
  447. return facts
  448. def set_project_cfg_facts_if_unset(facts):
  449. """ Set Project Configuration facts if not already present in facts dict
  450. dict:
  451. Args:
  452. facts (dict): existing facts
  453. Returns:
  454. dict: the facts dict updated with the generated Project Configuration
  455. facts if they were not already present
  456. """
  457. config = {
  458. 'default_node_selector': '',
  459. 'project_request_message': '',
  460. 'project_request_template': '',
  461. 'mcs_allocator_range': 's0:/2',
  462. 'mcs_labels_per_project': 5,
  463. 'uid_allocator_range': '1000000000-1999999999/10000'
  464. }
  465. if 'master' in facts:
  466. for key, value in config.items():
  467. if key not in facts['master']:
  468. facts['master'][key] = value
  469. return facts
  470. def set_identity_providers_if_unset(facts):
  471. """ Set identity_providers fact if not already present in facts dict
  472. Args:
  473. facts (dict): existing facts
  474. Returns:
  475. dict: the facts dict updated with the generated identity providers
  476. facts if they were not already present
  477. """
  478. if 'master' in facts:
  479. deployment_type = facts['common']['deployment_type']
  480. if 'identity_providers' not in facts['master']:
  481. identity_provider = dict(
  482. name='allow_all', challenge=True, login=True,
  483. kind='AllowAllPasswordIdentityProvider'
  484. )
  485. if deployment_type == 'openshift-enterprise':
  486. identity_provider = dict(
  487. name='deny_all', challenge=True, login=True,
  488. kind='DenyAllPasswordIdentityProvider'
  489. )
  490. facts['master']['identity_providers'] = [identity_provider]
  491. return facts
  492. def set_url_facts_if_unset(facts):
  493. """ Set url facts if not already present in facts dict
  494. Args:
  495. facts (dict): existing facts
  496. Returns:
  497. dict: the facts dict updated with the generated url facts if they
  498. were not already present
  499. """
  500. if 'master' in facts:
  501. hostname = facts['common']['hostname']
  502. cluster_hostname = facts['master'].get('cluster_hostname')
  503. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  504. public_hostname = facts['common']['public_hostname']
  505. api_hostname = cluster_hostname if cluster_hostname else hostname
  506. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  507. console_path = facts['master']['console_path']
  508. etcd_hosts = facts['master']['etcd_hosts']
  509. use_ssl = dict(
  510. api=facts['master']['api_use_ssl'],
  511. public_api=facts['master']['api_use_ssl'],
  512. loopback_api=facts['master']['api_use_ssl'],
  513. console=facts['master']['console_use_ssl'],
  514. public_console=facts['master']['console_use_ssl'],
  515. etcd=facts['master']['etcd_use_ssl']
  516. )
  517. ports = dict(
  518. api=facts['master']['api_port'],
  519. public_api=facts['master']['api_port'],
  520. loopback_api=facts['master']['api_port'],
  521. console=facts['master']['console_port'],
  522. public_console=facts['master']['console_port'],
  523. etcd=facts['master']['etcd_port'],
  524. )
  525. etcd_urls = []
  526. if etcd_hosts != '':
  527. facts['master']['etcd_port'] = ports['etcd']
  528. facts['master']['embedded_etcd'] = False
  529. for host in etcd_hosts:
  530. etcd_urls.append(format_url(use_ssl['etcd'], host,
  531. ports['etcd']))
  532. else:
  533. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  534. ports['etcd'])]
  535. facts['master'].setdefault('etcd_urls', etcd_urls)
  536. prefix_hosts = [('api', api_hostname),
  537. ('public_api', api_public_hostname),
  538. ('loopback_api', hostname)]
  539. for prefix, host in prefix_hosts:
  540. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  541. host,
  542. ports[prefix]))
  543. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  544. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  545. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  546. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  547. facts['master'].setdefault('loopback_user', r_lhu)
  548. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  549. for prefix, host in prefix_hosts:
  550. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  551. host,
  552. ports[prefix],
  553. console_path))
  554. return facts
  555. def set_aggregate_facts(facts):
  556. """ Set aggregate facts
  557. Args:
  558. facts (dict): existing facts
  559. Returns:
  560. dict: the facts dict updated with aggregated facts
  561. """
  562. all_hostnames = set()
  563. internal_hostnames = set()
  564. kube_svc_ip = first_ip(facts['common']['portal_net'])
  565. if 'common' in facts:
  566. all_hostnames.add(facts['common']['hostname'])
  567. all_hostnames.add(facts['common']['public_hostname'])
  568. all_hostnames.add(facts['common']['ip'])
  569. all_hostnames.add(facts['common']['public_ip'])
  570. facts['common']['kube_svc_ip'] = kube_svc_ip
  571. internal_hostnames.add(facts['common']['hostname'])
  572. internal_hostnames.add(facts['common']['ip'])
  573. cluster_domain = facts['common']['dns_domain']
  574. if 'master' in facts:
  575. if 'cluster_hostname' in facts['master']:
  576. all_hostnames.add(facts['master']['cluster_hostname'])
  577. if 'cluster_public_hostname' in facts['master']:
  578. all_hostnames.add(facts['master']['cluster_public_hostname'])
  579. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  580. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  581. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  582. all_hostnames.update(svc_names)
  583. internal_hostnames.update(svc_names)
  584. all_hostnames.add(kube_svc_ip)
  585. internal_hostnames.add(kube_svc_ip)
  586. facts['common']['all_hostnames'] = list(all_hostnames)
  587. facts['common']['internal_hostnames'] = list(internal_hostnames)
  588. return facts
  589. def set_etcd_facts_if_unset(facts):
  590. """
  591. If using embedded etcd, loads the data directory from master-config.yaml.
  592. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
  593. If anything goes wrong parsing these, the fact will not be set.
  594. """
  595. if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
  596. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  597. if 'etcd_data_dir' not in etcd_facts:
  598. try:
  599. # Parse master config to find actual etcd data dir:
  600. master_cfg_path = os.path.join(facts['common']['config_base'],
  601. 'master/master-config.yaml')
  602. master_cfg_f = open(master_cfg_path, 'r')
  603. config = yaml.safe_load(master_cfg_f.read())
  604. master_cfg_f.close()
  605. etcd_facts['etcd_data_dir'] = \
  606. config['etcdConfig']['storageDirectory']
  607. facts['etcd'] = etcd_facts
  608. # We don't want exceptions bubbling up here:
  609. # pylint: disable=broad-except
  610. except Exception:
  611. pass
  612. else:
  613. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  614. # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
  615. try:
  616. # Add a fake section for parsing:
  617. ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
  618. ini_fp = io.StringIO(ini_str)
  619. config = configparser.RawConfigParser()
  620. config.readfp(ini_fp)
  621. etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
  622. if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
  623. etcd_data_dir = etcd_data_dir[1:-1]
  624. etcd_facts['etcd_data_dir'] = etcd_data_dir
  625. facts['etcd'] = etcd_facts
  626. # We don't want exceptions bubbling up here:
  627. # pylint: disable=broad-except
  628. except Exception:
  629. pass
  630. return facts
  631. def set_deployment_facts_if_unset(facts):
  632. """ Set Facts that vary based on deployment_type. This currently
  633. includes common.service_type, master.registry_url, node.registry_url,
  634. node.storage_plugin_deps
  635. Args:
  636. facts (dict): existing facts
  637. Returns:
  638. dict: the facts dict updated with the generated deployment_type
  639. facts
  640. """
  641. # disabled to avoid breaking up facts related to deployment type into
  642. # multiple methods for now.
  643. # pylint: disable=too-many-statements, too-many-branches
  644. if 'common' in facts:
  645. deployment_type = facts['common']['deployment_type']
  646. if 'service_type' not in facts['common']:
  647. service_type = 'atomic-openshift'
  648. if deployment_type == 'origin':
  649. service_type = 'origin'
  650. facts['common']['service_type'] = service_type
  651. for role in ('master', 'node'):
  652. if role in facts:
  653. deployment_type = facts['common']['deployment_type']
  654. if 'registry_url' not in facts[role]:
  655. registry_url = 'openshift/origin-${component}:${version}'
  656. if deployment_type == 'openshift-enterprise':
  657. registry_url = 'openshift3/ose-${component}:${version}'
  658. facts[role]['registry_url'] = registry_url
  659. if 'master' in facts:
  660. deployment_type = facts['common']['deployment_type']
  661. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  662. if 'disabled_features' not in facts['master']:
  663. if facts['common']['deployment_subtype'] == 'registry':
  664. facts['master']['disabled_features'] = openshift_features
  665. if 'node' in facts:
  666. deployment_type = facts['common']['deployment_type']
  667. if 'storage_plugin_deps' not in facts['node']:
  668. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  669. return facts
  670. # pylint: disable=too-many-statements
  671. def set_version_facts_if_unset(facts):
  672. """ Set version facts. This currently includes common.version and
  673. common.version_gte_3_x
  674. Args:
  675. facts (dict): existing facts
  676. Returns:
  677. dict: the facts dict updated with version facts.
  678. """
  679. if 'common' in facts:
  680. openshift_version = get_openshift_version(facts)
  681. if openshift_version and openshift_version != "latest":
  682. version = LooseVersion(openshift_version)
  683. facts['common']['version'] = openshift_version
  684. facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
  685. version_gte_3_6 = version >= LooseVersion('3.6')
  686. version_gte_3_7 = version >= LooseVersion('3.7')
  687. version_gte_3_8 = version >= LooseVersion('3.8')
  688. else:
  689. # 'Latest' version is set to True, 'Next' versions set to False
  690. version_gte_3_6 = True
  691. version_gte_3_7 = True
  692. version_gte_3_8 = False
  693. facts['common']['version_gte_3_6'] = version_gte_3_6
  694. facts['common']['version_gte_3_7'] = version_gte_3_7
  695. facts['common']['version_gte_3_8'] = version_gte_3_8
  696. if version_gte_3_8:
  697. examples_content_version = 'v3.8'
  698. elif version_gte_3_7:
  699. examples_content_version = 'v3.7'
  700. elif version_gte_3_6:
  701. examples_content_version = 'v3.6'
  702. else:
  703. examples_content_version = 'v1.5'
  704. facts['common']['examples_content_version'] = examples_content_version
  705. return facts
  706. def set_sdn_facts_if_unset(facts, system_facts):
  707. """ Set sdn facts if not already present in facts dict
  708. Args:
  709. facts (dict): existing facts
  710. system_facts (dict): ansible_facts
  711. Returns:
  712. dict: the facts dict updated with the generated sdn facts if they
  713. were not already present
  714. """
  715. if 'master' in facts:
  716. # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
  717. # these might be overridden if they exist in the master config file
  718. sdn_cluster_network_cidr = '10.128.0.0/14'
  719. sdn_host_subnet_length = '9'
  720. master_cfg_path = os.path.join(facts['common']['config_base'],
  721. 'master/master-config.yaml')
  722. if os.path.isfile(master_cfg_path):
  723. with open(master_cfg_path, 'r') as master_cfg_f:
  724. config = yaml.safe_load(master_cfg_f.read())
  725. if 'networkConfig' in config:
  726. if 'clusterNetworkCIDR' in config['networkConfig']:
  727. sdn_cluster_network_cidr = \
  728. config['networkConfig']['clusterNetworkCIDR']
  729. if 'hostSubnetLength' in config['networkConfig']:
  730. sdn_host_subnet_length = \
  731. config['networkConfig']['hostSubnetLength']
  732. if 'sdn_cluster_network_cidr' not in facts['master']:
  733. facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
  734. if 'sdn_host_subnet_length' not in facts['master']:
  735. facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
  736. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  737. node_ip = facts['common']['ip']
  738. # default MTU if interface MTU cannot be detected
  739. facts['node']['sdn_mtu'] = '1450'
  740. for val in itervalues(system_facts):
  741. if isinstance(val, dict) and 'mtu' in val:
  742. mtu = val['mtu']
  743. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  744. facts['node']['sdn_mtu'] = str(mtu - 50)
  745. return facts
  746. def set_nodename(facts):
  747. """ set nodename """
  748. if 'node' in facts and 'common' in facts:
  749. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
  750. facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
  751. # TODO: The openstack cloudprovider nodename setting was too opinionaed.
  752. # It needs to be generalized before it can be enabled again.
  753. # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  754. # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  755. else:
  756. facts['node']['nodename'] = facts['common']['hostname'].lower()
  757. return facts
  758. def migrate_oauth_template_facts(facts):
  759. """
  760. Migrate an old oauth template fact to a newer format if it's present.
  761. The legacy 'oauth_template' fact was just a filename, and assumed you were
  762. setting the 'login' template.
  763. The new pluralized 'oauth_templates' fact is a dict mapping the template
  764. name to a filename.
  765. Simplify the code after this by merging the old fact into the new.
  766. """
  767. if 'master' in facts and 'oauth_template' in facts['master']:
  768. if 'oauth_templates' not in facts['master']:
  769. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  770. elif 'login' not in facts['master']['oauth_templates']:
  771. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  772. return facts
  773. def format_url(use_ssl, hostname, port, path=''):
  774. """ Format url based on ssl flag, hostname, port and path
  775. Args:
  776. use_ssl (bool): is ssl enabled
  777. hostname (str): hostname
  778. port (str): port
  779. path (str): url path
  780. Returns:
  781. str: The generated url string
  782. """
  783. scheme = 'https' if use_ssl else 'http'
  784. netloc = hostname
  785. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  786. netloc += ":%s" % port
  787. try:
  788. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  789. except AttributeError:
  790. # pylint: disable=undefined-variable
  791. url = urlunparse((scheme, netloc, path, '', '', ''))
  792. return url
  793. def get_current_config(facts):
  794. """ Get current openshift config
  795. Args:
  796. facts (dict): existing facts
  797. Returns:
  798. dict: the facts dict updated with the current openshift config
  799. """
  800. current_config = dict()
  801. roles = [role for role in facts if role not in ['common', 'provider']]
  802. for role in roles:
  803. if 'roles' in current_config:
  804. current_config['roles'].append(role)
  805. else:
  806. current_config['roles'] = [role]
  807. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  808. # determine the location of files.
  809. # TODO: I suspect this isn't working right now, but it doesn't prevent
  810. # anything from working properly as far as I can tell, perhaps because
  811. # we override the kubeconfig path everywhere we use it?
  812. # Query kubeconfig settings
  813. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  814. if role == 'node':
  815. kubeconfig_dir = os.path.join(
  816. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  817. )
  818. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  819. if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
  820. try:
  821. _, output, _ = module.run_command( # noqa: F405
  822. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  823. "json", "--kubeconfig=%s" % kubeconfig_path],
  824. check_rc=False
  825. )
  826. config = json.loads(output)
  827. cad = 'certificate-authority-data'
  828. try:
  829. for cluster in config['clusters']:
  830. config['clusters'][cluster][cad] = 'masked'
  831. except KeyError:
  832. pass
  833. try:
  834. for user in config['users']:
  835. config['users'][user][cad] = 'masked'
  836. config['users'][user]['client-key-data'] = 'masked'
  837. except KeyError:
  838. pass
  839. current_config['kubeconfig'] = config
  840. # override pylint broad-except warning, since we do not want
  841. # to bubble up any exceptions if oc config view
  842. # fails
  843. # pylint: disable=broad-except
  844. except Exception:
  845. pass
  846. return current_config
  847. def build_kubelet_args(facts):
  848. """Build node kubelet_args
  849. In the node-config.yaml file, kubeletArgument sub-keys have their
  850. values provided as a list. Hence the gratuitous use of ['foo'] below.
  851. """
  852. cloud_cfg_path = os.path.join(
  853. facts['common']['config_base'],
  854. 'cloudprovider')
  855. # We only have to do this stuff on hosts that are nodes
  856. if 'node' in facts:
  857. # Any changes to the kubeletArguments parameter are stored
  858. # here first.
  859. kubelet_args = {}
  860. if 'cloudprovider' in facts:
  861. # EVERY cloud is special <3
  862. if 'kind' in facts['cloudprovider']:
  863. if facts['cloudprovider']['kind'] == 'aws':
  864. kubelet_args['cloud-provider'] = ['aws']
  865. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  866. if facts['cloudprovider']['kind'] == 'openstack':
  867. kubelet_args['cloud-provider'] = ['openstack']
  868. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  869. if facts['cloudprovider']['kind'] == 'gce':
  870. kubelet_args['cloud-provider'] = ['gce']
  871. kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  872. # Automatically add node-labels to the kubeletArguments
  873. # parameter. See BZ1359848 for additional details.
  874. #
  875. # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
  876. if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
  877. # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
  878. # into ['foo=bar', 'a=b']
  879. #
  880. # On the openshift_node_labels inventory variable we loop
  881. # over each key-value tuple (from .items()) and join the
  882. # key to the value with an '=' character, this produces a
  883. # list.
  884. #
  885. # map() seems to be returning an itertools.imap object
  886. # instead of a list. We cast it to a list ourselves.
  887. # pylint: disable=unnecessary-lambda
  888. labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
  889. if labels_str != '':
  890. kubelet_args['node-labels'] = labels_str
  891. # If we've added items to the kubelet_args dict then we need
  892. # to merge the new items back into the main facts object.
  893. if kubelet_args != {}:
  894. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
  895. return facts
  896. def build_controller_args(facts):
  897. """ Build master controller_args """
  898. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  899. 'cloudprovider')
  900. if 'master' in facts:
  901. controller_args = {}
  902. if 'cloudprovider' in facts:
  903. if 'kind' in facts['cloudprovider']:
  904. if facts['cloudprovider']['kind'] == 'aws':
  905. controller_args['cloud-provider'] = ['aws']
  906. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  907. if facts['cloudprovider']['kind'] == 'openstack':
  908. controller_args['cloud-provider'] = ['openstack']
  909. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  910. if facts['cloudprovider']['kind'] == 'gce':
  911. controller_args['cloud-provider'] = ['gce']
  912. controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  913. if controller_args != {}:
  914. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
  915. return facts
  916. def build_api_server_args(facts):
  917. """ Build master api_server_args """
  918. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  919. 'cloudprovider')
  920. if 'master' in facts:
  921. api_server_args = {}
  922. if 'cloudprovider' in facts:
  923. if 'kind' in facts['cloudprovider']:
  924. if facts['cloudprovider']['kind'] == 'aws':
  925. api_server_args['cloud-provider'] = ['aws']
  926. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  927. if facts['cloudprovider']['kind'] == 'openstack':
  928. api_server_args['cloud-provider'] = ['openstack']
  929. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  930. if facts['cloudprovider']['kind'] == 'gce':
  931. api_server_args['cloud-provider'] = ['gce']
  932. api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
  933. if api_server_args != {}:
  934. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
  935. return facts
  936. def is_service_running(service):
  937. """ Queries systemd through dbus to see if the service is running """
  938. service_running = False
  939. try:
  940. bus = SystemBus()
  941. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  942. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  943. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  944. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  945. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  946. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  947. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  948. if service_load_state == 'loaded' and service_active_state == 'active':
  949. service_running = True
  950. except DBusException:
  951. # TODO: do not swallow exception, as it may be hiding useful debugging
  952. # information.
  953. pass
  954. return service_running
  955. def rpm_rebuilddb():
  956. """
  957. Runs rpm --rebuilddb to ensure the db is in good shape.
  958. """
  959. module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
  960. def get_version_output(binary, version_cmd):
  961. """ runs and returns the version output for a command """
  962. cmd = []
  963. for item in (binary, version_cmd):
  964. if isinstance(item, list):
  965. cmd.extend(item)
  966. else:
  967. cmd.append(item)
  968. if os.path.isfile(cmd[0]):
  969. _, output, _ = module.run_command(cmd) # noqa: F405
  970. return output
  971. def get_docker_version_info():
  972. """ Parses and returns the docker version info """
  973. result = None
  974. if is_service_running('docker') or is_service_running('container-engine'):
  975. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  976. if 'Server' in version_info:
  977. result = {
  978. 'api_version': version_info['Server']['API version'],
  979. 'version': version_info['Server']['Version']
  980. }
  981. return result
  982. def get_hosted_registry_insecure():
  983. """ Parses OPTIONS from /etc/sysconfig/docker to determine if the
  984. registry is currently insecure.
  985. """
  986. hosted_registry_insecure = None
  987. if os.path.exists('/etc/sysconfig/docker'):
  988. try:
  989. ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
  990. ini_fp = io.StringIO(ini_str)
  991. config = configparser.RawConfigParser()
  992. config.readfp(ini_fp)
  993. options = config.get('root', 'OPTIONS')
  994. if 'insecure-registry' in options:
  995. hosted_registry_insecure = True
  996. except Exception: # pylint: disable=broad-except
  997. pass
  998. return hosted_registry_insecure
  999. def get_openshift_version(facts):
  1000. """ Get current version of openshift on the host.
  1001. Checks a variety of ways ranging from fastest to slowest.
  1002. Args:
  1003. facts (dict): existing facts
  1004. optional cli_image for pulling the version number
  1005. Returns:
  1006. version: the current openshift version
  1007. """
  1008. version = None
  1009. # No need to run this method repeatedly on a system if we already know the
  1010. # version
  1011. # TODO: We need a way to force reload this after upgrading bits.
  1012. if 'common' in facts:
  1013. if 'version' in facts['common'] and facts['common']['version'] is not None:
  1014. return chomp_commit_offset(facts['common']['version'])
  1015. if os.path.isfile('/usr/bin/openshift'):
  1016. _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
  1017. version = parse_openshift_version(output)
  1018. elif 'common' in facts and 'is_containerized' in facts['common']:
  1019. version = get_container_openshift_version(facts)
  1020. # Handle containerized masters that have not yet been configured as a node.
  1021. # This can be very slow and may get re-run multiple times, so we only use this
  1022. # if other methods failed to find a version.
  1023. if not version and os.path.isfile('/usr/local/bin/openshift'):
  1024. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
  1025. version = parse_openshift_version(output)
  1026. return chomp_commit_offset(version)
  1027. def chomp_commit_offset(version):
  1028. """Chomp any "+git.foo" commit offset string from the given `version`
  1029. and return the modified version string.
  1030. Ex:
  1031. - chomp_commit_offset(None) => None
  1032. - chomp_commit_offset(1337) => "1337"
  1033. - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
  1034. - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
  1035. - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
  1036. """
  1037. if version is None:
  1038. return version
  1039. else:
  1040. # Stringify, just in case it's a Number type. Split by '+' and
  1041. # return the first split. No concerns about strings without a
  1042. # '+', .split() returns an array of the original string.
  1043. return str(version).split('+')[0]
  1044. def get_container_openshift_version(facts):
  1045. """
  1046. If containerized, see if we can determine the installed version via the
  1047. systemd environment files.
  1048. """
  1049. for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
  1050. env_path = filename % facts['common']['service_type']
  1051. if not os.path.exists(env_path):
  1052. continue
  1053. with open(env_path) as env_file:
  1054. for line in env_file:
  1055. if line.startswith("IMAGE_VERSION="):
  1056. tag = line[len("IMAGE_VERSION="):].strip()
  1057. # Remove leading "v" and any trailing release info, we just want
  1058. # a version number here:
  1059. no_v_version = tag[1:] if tag[0] == 'v' else tag
  1060. version = no_v_version.split("-")[0]
  1061. return version
  1062. return None
  1063. def parse_openshift_version(output):
  1064. """ Apply provider facts to supplied facts dict
  1065. Args:
  1066. string: output of 'openshift version'
  1067. Returns:
  1068. string: the version number
  1069. """
  1070. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  1071. ver = versions.get('openshift', '')
  1072. # Remove trailing build number and commit hash from older versions, we need to return a straight
  1073. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  1074. ver = ver.split('-')[0]
  1075. return ver
  1076. def apply_provider_facts(facts, provider_facts):
  1077. """ Apply provider facts to supplied facts dict
  1078. Args:
  1079. facts (dict): facts dict to update
  1080. provider_facts (dict): provider facts to apply
  1081. roles: host roles
  1082. Returns:
  1083. dict: the merged facts
  1084. """
  1085. if not provider_facts:
  1086. return facts
  1087. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  1088. for h_var, ip_var in common_vars:
  1089. ip_value = provider_facts['network'].get(ip_var)
  1090. if ip_value:
  1091. facts['common'][ip_var] = ip_value
  1092. facts['common'][h_var] = choose_hostname(
  1093. [provider_facts['network'].get(h_var)],
  1094. facts['common'][h_var]
  1095. )
  1096. facts['provider'] = provider_facts
  1097. return facts
  1098. # Disabling pylint too many branches. This function needs refactored
  1099. # but is a very core part of openshift_facts.
  1100. # pylint: disable=too-many-branches, too-many-nested-blocks
  1101. def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
  1102. """ Recursively merge facts dicts
  1103. Args:
  1104. orig (dict): existing facts
  1105. new (dict): facts to update
  1106. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1107. '.' notation ex: ['master.named_certificates']
  1108. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1109. '.' notation ex: ['master.master_count']
  1110. Returns:
  1111. dict: the merged facts
  1112. """
  1113. additive_facts = ['named_certificates']
  1114. protected_facts = ['ha']
  1115. # Facts we do not ever want to merge. These originate in inventory variables
  1116. # and contain JSON dicts. We don't ever want to trigger a merge
  1117. # here, just completely overwrite with the new if they are present there.
  1118. inventory_json_facts = ['admission_plugin_config',
  1119. 'kube_admission_plugin_config',
  1120. 'image_policy_config',
  1121. "builddefaults",
  1122. "buildoverrides"]
  1123. facts = dict()
  1124. for key, value in iteritems(orig):
  1125. # Key exists in both old and new facts.
  1126. if key in new:
  1127. if key in inventory_json_facts:
  1128. # Watchout for JSON facts that sometimes load as strings.
  1129. # (can happen if the JSON contains a boolean)
  1130. if isinstance(new[key], string_types):
  1131. facts[key] = yaml.safe_load(new[key])
  1132. else:
  1133. facts[key] = copy.deepcopy(new[key])
  1134. # Continue to recurse if old and new fact is a dictionary.
  1135. elif isinstance(value, dict) and isinstance(new[key], dict):
  1136. # Collect the subset of additive facts to overwrite if
  1137. # key matches. These will be passed to the subsequent
  1138. # merge_facts call.
  1139. relevant_additive_facts = []
  1140. for item in additive_facts_to_overwrite:
  1141. if '.' in item and item.startswith(key + '.'):
  1142. relevant_additive_facts.append(item)
  1143. # Collect the subset of protected facts to overwrite
  1144. # if key matches. These will be passed to the
  1145. # subsequent merge_facts call.
  1146. relevant_protected_facts = []
  1147. for item in protected_facts_to_overwrite:
  1148. if '.' in item and item.startswith(key + '.'):
  1149. relevant_protected_facts.append(item)
  1150. facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
  1151. # Key matches an additive fact and we are not overwriting
  1152. # it so we will append the new value to the existing value.
  1153. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  1154. if isinstance(value, list) and isinstance(new[key], list):
  1155. new_fact = []
  1156. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  1157. if item not in new_fact:
  1158. new_fact.append(item)
  1159. facts[key] = new_fact
  1160. # Key matches a protected fact and we are not overwriting
  1161. # it so we will determine if it is okay to change this
  1162. # fact.
  1163. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
  1164. # ha (bool) can not change unless it has been passed
  1165. # as a protected fact to overwrite.
  1166. if key == 'ha':
  1167. if safe_get_bool(value) != safe_get_bool(new[key]):
  1168. # pylint: disable=line-too-long
  1169. module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
  1170. else:
  1171. facts[key] = value
  1172. # No other condition has been met. Overwrite the old fact
  1173. # with the new value.
  1174. else:
  1175. facts[key] = copy.deepcopy(new[key])
  1176. # Key isn't in new so add it to facts to keep it.
  1177. else:
  1178. facts[key] = copy.deepcopy(value)
  1179. new_keys = set(new.keys()) - set(orig.keys())
  1180. for key in new_keys:
  1181. # Watchout for JSON facts that sometimes load as strings.
  1182. # (can happen if the JSON contains a boolean)
  1183. if key in inventory_json_facts and isinstance(new[key], string_types):
  1184. facts[key] = yaml.safe_load(new[key])
  1185. else:
  1186. facts[key] = copy.deepcopy(new[key])
  1187. return facts
  1188. def save_local_facts(filename, facts):
  1189. """ Save local facts
  1190. Args:
  1191. filename (str): local facts file
  1192. facts (dict): facts to set
  1193. """
  1194. try:
  1195. fact_dir = os.path.dirname(filename)
  1196. try:
  1197. os.makedirs(fact_dir) # try to make the directory
  1198. except OSError as exception:
  1199. if exception.errno != errno.EEXIST: # but it is okay if it is already there
  1200. raise # pass any other exceptions up the chain
  1201. with open(filename, 'w') as fact_file:
  1202. fact_file.write(module.jsonify(facts)) # noqa: F405
  1203. os.chmod(filename, 0o600)
  1204. except (IOError, OSError) as ex:
  1205. raise OpenShiftFactsFileWriteError(
  1206. "Could not create fact file: %s, error: %s" % (filename, ex)
  1207. )
  1208. def get_local_facts_from_file(filename):
  1209. """ Retrieve local facts from fact file
  1210. Args:
  1211. filename (str): local facts file
  1212. Returns:
  1213. dict: the retrieved facts
  1214. """
  1215. local_facts = dict()
  1216. try:
  1217. # Handle conversion of INI style facts file to json style
  1218. ini_facts = configparser.SafeConfigParser()
  1219. ini_facts.read(filename)
  1220. for section in ini_facts.sections():
  1221. local_facts[section] = dict()
  1222. for key, value in ini_facts.items(section):
  1223. local_facts[section][key] = value
  1224. except (configparser.MissingSectionHeaderError,
  1225. configparser.ParsingError):
  1226. try:
  1227. with open(filename, 'r') as facts_file:
  1228. local_facts = json.load(facts_file)
  1229. except (ValueError, IOError):
  1230. pass
  1231. return local_facts
  1232. def sort_unique(alist):
  1233. """ Sorts and de-dupes a list
  1234. Args:
  1235. list: a list
  1236. Returns:
  1237. list: a sorted de-duped list
  1238. """
  1239. return sorted(list(set(alist)))
  1240. def safe_get_bool(fact):
  1241. """ Get a boolean fact safely.
  1242. Args:
  1243. facts: fact to convert
  1244. Returns:
  1245. bool: given fact as a bool
  1246. """
  1247. return bool(strtobool(str(fact)))
  1248. def set_proxy_facts(facts):
  1249. """ Set global proxy facts
  1250. Args:
  1251. facts(dict): existing facts
  1252. Returns:
  1253. facts(dict): Updated facts with missing values
  1254. """
  1255. if 'common' in facts:
  1256. common = facts['common']
  1257. if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
  1258. if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
  1259. common['no_proxy'] = common['no_proxy'].split(",")
  1260. elif 'no_proxy' not in common:
  1261. common['no_proxy'] = []
  1262. # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
  1263. # masters behind a proxy need to connect to etcd via IP
  1264. if 'no_proxy_etcd_host_ips' in common:
  1265. if isinstance(common['no_proxy_etcd_host_ips'], string_types):
  1266. common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
  1267. if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
  1268. if 'no_proxy_internal_hostnames' in common:
  1269. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  1270. # We always add local dns domain and ourselves no matter what
  1271. common['no_proxy'].append('.' + common['dns_domain'])
  1272. common['no_proxy'].append('.svc')
  1273. common['no_proxy'].append(common['hostname'])
  1274. common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
  1275. facts['common'] = common
  1276. return facts
  1277. def set_builddefaults_facts(facts):
  1278. """ Set build defaults including setting proxy values from http_proxy, https_proxy,
  1279. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1280. 1. http_proxy, https_proxy, no_proxy
  1281. 2. builddefaults_*
  1282. 3. builddefaults_git_*
  1283. Args:
  1284. facts(dict): existing facts
  1285. Returns:
  1286. facts(dict): Updated facts with missing values
  1287. """
  1288. if 'builddefaults' in facts:
  1289. builddefaults = facts['builddefaults']
  1290. common = facts['common']
  1291. # Copy values from common to builddefaults
  1292. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1293. builddefaults['http_proxy'] = common['http_proxy']
  1294. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1295. builddefaults['https_proxy'] = common['https_proxy']
  1296. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1297. builddefaults['no_proxy'] = common['no_proxy']
  1298. # Create git specific facts from generic values, if git specific values are
  1299. # not defined.
  1300. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1301. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1302. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1303. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1304. if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
  1305. builddefaults['git_no_proxy'] = builddefaults['no_proxy']
  1306. # If we're actually defining a builddefaults config then create admission_plugin_config
  1307. # then merge builddefaults[config] structure into admission_plugin_config
  1308. # 'config' is the 'openshift_builddefaults_json' inventory variable
  1309. if 'config' in builddefaults:
  1310. if 'admission_plugin_config' not in facts['master']:
  1311. # Scaffold out the full expected datastructure
  1312. facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
  1313. facts['master']['admission_plugin_config'].update(builddefaults['config'])
  1314. delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
  1315. return facts
  1316. def delete_empty_keys(keylist):
  1317. """ Delete dictionary elements from keylist where "value" is empty.
  1318. Args:
  1319. keylist(list): A list of builddefault configuration envs.
  1320. Returns:
  1321. none
  1322. Example:
  1323. keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1324. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1325. {'name': 'NO_PROXY', 'value': ''}]
  1326. After calling delete_empty_keys the provided list is modified to become:
  1327. [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
  1328. {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
  1329. """
  1330. count = 0
  1331. for i in range(0, len(keylist)):
  1332. if len(keylist[i - count]['value']) == 0:
  1333. del keylist[i - count]
  1334. count += 1
  1335. def set_buildoverrides_facts(facts):
  1336. """ Set build overrides
  1337. Args:
  1338. facts(dict): existing facts
  1339. Returns:
  1340. facts(dict): Updated facts with missing values
  1341. """
  1342. if 'buildoverrides' in facts:
  1343. buildoverrides = facts['buildoverrides']
  1344. # If we're actually defining a buildoverrides config then create admission_plugin_config
  1345. # then merge buildoverrides[config] structure into admission_plugin_config
  1346. if 'config' in buildoverrides:
  1347. if 'admission_plugin_config' not in facts['master']:
  1348. facts['master']['admission_plugin_config'] = dict()
  1349. facts['master']['admission_plugin_config'].update(buildoverrides['config'])
  1350. return facts
  1351. # pylint: disable=too-many-statements
  1352. def set_container_facts_if_unset(facts):
  1353. """ Set containerized facts.
  1354. Args:
  1355. facts (dict): existing facts
  1356. Returns:
  1357. dict: the facts dict updated with the generated containerization
  1358. facts
  1359. """
  1360. deployment_type = facts['common']['deployment_type']
  1361. if deployment_type == 'openshift-enterprise':
  1362. master_image = 'openshift3/ose'
  1363. cli_image = master_image
  1364. node_image = 'openshift3/node'
  1365. ovs_image = 'openshift3/openvswitch'
  1366. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1367. pod_image = 'openshift3/ose-pod'
  1368. router_image = 'openshift3/ose-haproxy-router'
  1369. registry_image = 'openshift3/ose-docker-registry'
  1370. deployer_image = 'openshift3/ose-deployer'
  1371. else:
  1372. master_image = 'openshift/origin'
  1373. cli_image = master_image
  1374. node_image = 'openshift/node'
  1375. ovs_image = 'openshift/openvswitch'
  1376. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1377. pod_image = 'openshift/origin-pod'
  1378. router_image = 'openshift/origin-haproxy-router'
  1379. registry_image = 'openshift/origin-docker-registry'
  1380. deployer_image = 'openshift/origin-deployer'
  1381. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1382. # If openshift_docker_use_system_container is set and is True ....
  1383. if 'use_system_container' in list(facts['docker'].keys()):
  1384. # use safe_get_bool as the inventory variable may not be a
  1385. # valid boolean on it's own.
  1386. if safe_get_bool(facts['docker']['use_system_container']):
  1387. # ... set the service name to container-engine
  1388. facts['docker']['service_name'] = 'container-engine'
  1389. if 'is_containerized' not in facts['common']:
  1390. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1391. if 'cli_image' not in facts['common']:
  1392. facts['common']['cli_image'] = cli_image
  1393. if 'pod_image' not in facts['common']:
  1394. facts['common']['pod_image'] = pod_image
  1395. if 'router_image' not in facts['common']:
  1396. facts['common']['router_image'] = router_image
  1397. if 'registry_image' not in facts['common']:
  1398. facts['common']['registry_image'] = registry_image
  1399. if 'deployer_image' not in facts['common']:
  1400. facts['common']['deployer_image'] = deployer_image
  1401. if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
  1402. facts['etcd']['etcd_image'] = etcd_image
  1403. if 'master' in facts and 'master_image' not in facts['master']:
  1404. facts['master']['master_image'] = master_image
  1405. facts['master']['master_system_image'] = master_image
  1406. if 'node' in facts:
  1407. if 'node_image' not in facts['node']:
  1408. facts['node']['node_image'] = node_image
  1409. facts['node']['node_system_image'] = node_image
  1410. if 'ovs_image' not in facts['node']:
  1411. facts['node']['ovs_image'] = ovs_image
  1412. facts['node']['ovs_system_image'] = ovs_image
  1413. if safe_get_bool(facts['common']['is_containerized']):
  1414. facts['common']['admin_binary'] = '/usr/local/bin/oadm'
  1415. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1416. return facts
  1417. def set_installed_variant_rpm_facts(facts):
  1418. """ Set RPM facts of installed variant
  1419. Args:
  1420. facts (dict): existing facts
  1421. Returns:
  1422. dict: the facts dict updated with installed_variant_rpms
  1423. """
  1424. installed_rpms = []
  1425. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1426. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1427. variant_rpms = [base_rpm] + \
  1428. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1429. ['tuned-profiles-%s-node' % base_rpm]
  1430. for rpm in variant_rpms:
  1431. exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
  1432. if exit_code == 0:
  1433. installed_rpms.append(rpm)
  1434. facts['common']['installed_variant_rpms'] = installed_rpms
  1435. return facts
  1436. class OpenShiftFactsInternalError(Exception):
  1437. """Origin Facts Error"""
  1438. pass
  1439. class OpenShiftFactsUnsupportedRoleError(Exception):
  1440. """Origin Facts Unsupported Role Error"""
  1441. pass
  1442. class OpenShiftFactsFileWriteError(Exception):
  1443. """Origin Facts File Write Error"""
  1444. pass
  1445. class OpenShiftFactsMetadataUnavailableError(Exception):
  1446. """Origin Facts Metadata Unavailable Error"""
  1447. pass
  1448. class OpenShiftFacts(object):
  1449. """ Origin Facts
  1450. Attributes:
  1451. facts (dict): facts for the host
  1452. Args:
  1453. module (AnsibleModule): an AnsibleModule object
  1454. role (str): role for setting local facts
  1455. filename (str): local facts file to use
  1456. local_facts (dict): local facts to set
  1457. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1458. '.' notation ex: ['master.named_certificates']
  1459. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1460. '.' notation ex: ['master.master_count']
  1461. Raises:
  1462. OpenShiftFactsUnsupportedRoleError:
  1463. """
  1464. known_roles = ['builddefaults',
  1465. 'buildoverrides',
  1466. 'cloudprovider',
  1467. 'common',
  1468. 'docker',
  1469. 'etcd',
  1470. 'hosted',
  1471. 'master',
  1472. 'node',
  1473. 'logging',
  1474. 'loggingops',
  1475. 'metrics',
  1476. 'prometheus']
  1477. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1478. # pylint: disable=too-many-arguments,no-value-for-parameter
  1479. def __init__(self, role, filename, local_facts,
  1480. additive_facts_to_overwrite=None,
  1481. openshift_env=None,
  1482. openshift_env_structures=None,
  1483. protected_facts_to_overwrite=None):
  1484. self.changed = False
  1485. self.filename = filename
  1486. if role not in self.known_roles:
  1487. raise OpenShiftFactsUnsupportedRoleError(
  1488. "Role %s is not supported by this module" % role
  1489. )
  1490. self.role = role
  1491. # Collect system facts and preface each fact with 'ansible_'.
  1492. try:
  1493. # pylint: disable=too-many-function-args,invalid-name
  1494. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
  1495. additional_facts = {}
  1496. for (k, v) in self.system_facts.items():
  1497. additional_facts["ansible_%s" % k.replace('-', '_')] = v
  1498. self.system_facts.update(additional_facts)
  1499. except UnboundLocalError:
  1500. # ansible-2.2,2.3
  1501. self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
  1502. self.facts = self.generate_facts(local_facts,
  1503. additive_facts_to_overwrite,
  1504. openshift_env,
  1505. openshift_env_structures,
  1506. protected_facts_to_overwrite)
  1507. def generate_facts(self,
  1508. local_facts,
  1509. additive_facts_to_overwrite,
  1510. openshift_env,
  1511. openshift_env_structures,
  1512. protected_facts_to_overwrite):
  1513. """ Generate facts
  1514. Args:
  1515. local_facts (dict): local_facts for overriding generated defaults
  1516. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1517. '.' notation ex: ['master.named_certificates']
  1518. openshift_env (dict): openshift_env facts for overriding generated defaults
  1519. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1520. '.' notation ex: ['master.master_count']
  1521. Returns:
  1522. dict: The generated facts
  1523. """
  1524. local_facts = self.init_local_facts(local_facts,
  1525. additive_facts_to_overwrite,
  1526. openshift_env,
  1527. openshift_env_structures,
  1528. protected_facts_to_overwrite)
  1529. roles = local_facts.keys()
  1530. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1531. deployment_type = local_facts['common']['deployment_type']
  1532. else:
  1533. deployment_type = 'origin'
  1534. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1535. deployment_subtype = local_facts['common']['deployment_subtype']
  1536. else:
  1537. deployment_subtype = 'basic'
  1538. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1539. provider_facts = self.init_provider_facts()
  1540. facts = apply_provider_facts(defaults, provider_facts)
  1541. facts = merge_facts(facts,
  1542. local_facts,
  1543. additive_facts_to_overwrite,
  1544. protected_facts_to_overwrite)
  1545. facts = migrate_oauth_template_facts(facts)
  1546. facts['current_config'] = get_current_config(facts)
  1547. facts = set_url_facts_if_unset(facts)
  1548. facts = set_project_cfg_facts_if_unset(facts)
  1549. facts = set_node_schedulability(facts)
  1550. facts = set_selectors(facts)
  1551. facts = set_identity_providers_if_unset(facts)
  1552. facts = set_deployment_facts_if_unset(facts)
  1553. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1554. facts = set_container_facts_if_unset(facts)
  1555. facts = build_kubelet_args(facts)
  1556. facts = build_controller_args(facts)
  1557. facts = build_api_server_args(facts)
  1558. facts = set_version_facts_if_unset(facts)
  1559. facts = set_dnsmasq_facts_if_unset(facts)
  1560. facts = set_aggregate_facts(facts)
  1561. facts = set_etcd_facts_if_unset(facts)
  1562. facts = set_proxy_facts(facts)
  1563. facts = set_builddefaults_facts(facts)
  1564. facts = set_buildoverrides_facts(facts)
  1565. if not safe_get_bool(facts['common']['is_containerized']):
  1566. facts = set_installed_variant_rpm_facts(facts)
  1567. facts = set_nodename(facts)
  1568. return dict(openshift=facts)
  1569. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1570. """ Get default fact values
  1571. Args:
  1572. roles (list): list of roles for this host
  1573. Returns:
  1574. dict: The generated default facts
  1575. """
  1576. defaults = {}
  1577. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1578. exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
  1579. hostname_f = output.strip() if exit_code == 0 else ''
  1580. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1581. self.system_facts['ansible_fqdn']]
  1582. hostname = choose_hostname(hostname_values, ip_addr).lower()
  1583. defaults['common'] = dict(ip=ip_addr,
  1584. public_ip=ip_addr,
  1585. deployment_type=deployment_type,
  1586. deployment_subtype=deployment_subtype,
  1587. hostname=hostname,
  1588. public_hostname=hostname,
  1589. portal_net='172.30.0.0/16',
  1590. client_binary='oc', admin_binary='oadm',
  1591. dns_domain='cluster.local',
  1592. config_base='/etc/origin')
  1593. if 'master' in roles:
  1594. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1595. controllers_port='8444',
  1596. console_use_ssl=True,
  1597. console_path='/console',
  1598. console_port='8443', etcd_use_ssl=True,
  1599. etcd_hosts='', etcd_port='4001',
  1600. portal_net='172.30.0.0/16',
  1601. embedded_etcd=True, embedded_kube=True,
  1602. embedded_dns=True,
  1603. bind_addr='0.0.0.0',
  1604. session_max_seconds=3600,
  1605. session_name='ssn',
  1606. session_secrets_file='',
  1607. access_token_max_seconds=86400,
  1608. auth_token_max_seconds=500,
  1609. oauth_grant_method='auto',
  1610. dynamic_provisioning_enabled=True,
  1611. max_requests_inflight=500)
  1612. if 'node' in roles:
  1613. defaults['node'] = dict(labels={}, annotations={},
  1614. iptables_sync_period='30s',
  1615. local_quota_per_fsgroup="",
  1616. set_node_ip=False)
  1617. if 'docker' in roles:
  1618. docker = dict(disable_push_dockerhub=False,
  1619. options='--log-driver=journald')
  1620. # NOTE: This is a workaround for a dnf output racecondition that can occur in
  1621. # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
  1622. if self.system_facts['ansible_pkg_mgr'] == 'dnf':
  1623. rpm_rebuilddb()
  1624. version_info = get_docker_version_info()
  1625. if version_info is not None:
  1626. docker['api_version'] = version_info['api_version']
  1627. docker['version'] = version_info['version']
  1628. docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
  1629. hosted_registry_insecure = get_hosted_registry_insecure()
  1630. if hosted_registry_insecure is not None:
  1631. docker['hosted_registry_insecure'] = hosted_registry_insecure
  1632. docker['service_name'] = 'docker'
  1633. defaults['docker'] = docker
  1634. if 'cloudprovider' in roles:
  1635. defaults['cloudprovider'] = dict(kind=None)
  1636. if 'hosted' in roles or self.role == 'hosted':
  1637. defaults['hosted'] = dict(
  1638. etcd=dict(
  1639. storage=dict(
  1640. kind=None,
  1641. volume=dict(
  1642. name='etcd',
  1643. size='1Gi'
  1644. ),
  1645. nfs=dict(
  1646. directory='/exports',
  1647. options='*(rw,root_squash)'
  1648. ),
  1649. host=None,
  1650. access=dict(
  1651. modes=['ReadWriteOnce']
  1652. ),
  1653. create_pv=True,
  1654. create_pvc=False
  1655. )
  1656. ),
  1657. registry=dict(
  1658. storage=dict(
  1659. kind=None,
  1660. volume=dict(
  1661. name='registry',
  1662. size='5Gi'
  1663. ),
  1664. nfs=dict(
  1665. directory='/exports',
  1666. options='*(rw,root_squash)'),
  1667. glusterfs=dict(
  1668. endpoints='glusterfs-registry-endpoints',
  1669. path='glusterfs-registry-volume',
  1670. readOnly=False,
  1671. swap=False,
  1672. swapcopy=True),
  1673. host=None,
  1674. access=dict(
  1675. modes=['ReadWriteMany']
  1676. ),
  1677. create_pv=True,
  1678. create_pvc=True
  1679. )
  1680. ),
  1681. router=dict()
  1682. )
  1683. defaults['logging'] = dict(
  1684. storage=dict(
  1685. kind=None,
  1686. volume=dict(
  1687. name='logging-es',
  1688. size='10Gi'
  1689. ),
  1690. nfs=dict(
  1691. directory='/exports',
  1692. options='*(rw,root_squash)'
  1693. ),
  1694. host=None,
  1695. access=dict(
  1696. modes=['ReadWriteOnce']
  1697. ),
  1698. create_pv=True,
  1699. create_pvc=False
  1700. )
  1701. )
  1702. defaults['loggingops'] = dict(
  1703. storage=dict(
  1704. kind=None,
  1705. volume=dict(
  1706. name='logging-es-ops',
  1707. size='10Gi'
  1708. ),
  1709. nfs=dict(
  1710. directory='/exports',
  1711. options='*(rw,root_squash)'
  1712. ),
  1713. host=None,
  1714. access=dict(
  1715. modes=['ReadWriteOnce']
  1716. ),
  1717. create_pv=True,
  1718. create_pvc=False
  1719. )
  1720. )
  1721. defaults['metrics'] = dict(
  1722. deploy=False,
  1723. duration=7,
  1724. resolution='10s',
  1725. storage=dict(
  1726. kind=None,
  1727. volume=dict(
  1728. name='metrics',
  1729. size='10Gi'
  1730. ),
  1731. nfs=dict(
  1732. directory='/exports',
  1733. options='*(rw,root_squash)'
  1734. ),
  1735. host=None,
  1736. access=dict(
  1737. modes=['ReadWriteOnce']
  1738. ),
  1739. create_pv=True,
  1740. create_pvc=False
  1741. )
  1742. )
  1743. defaults['prometheus'] = dict(
  1744. storage=dict(
  1745. kind=None,
  1746. volume=dict(
  1747. name='prometheus',
  1748. size='10Gi'
  1749. ),
  1750. nfs=dict(
  1751. directory='/exports',
  1752. options='*(rw,root_squash)'
  1753. ),
  1754. host=None,
  1755. access=dict(
  1756. modes=['ReadWriteOnce']
  1757. ),
  1758. create_pv=True,
  1759. create_pvc=False
  1760. )
  1761. )
  1762. defaults['prometheus']['alertmanager'] = dict(
  1763. storage=dict(
  1764. kind=None,
  1765. volume=dict(
  1766. name='prometheus-alertmanager',
  1767. size='10Gi'
  1768. ),
  1769. nfs=dict(
  1770. directory='/exports',
  1771. options='*(rw,root_squash)'
  1772. ),
  1773. host=None,
  1774. access=dict(
  1775. modes=['ReadWriteOnce']
  1776. ),
  1777. create_pv=True,
  1778. create_pvc=False
  1779. )
  1780. )
  1781. defaults['prometheus']['alertbuffer'] = dict(
  1782. storage=dict(
  1783. kind=None,
  1784. volume=dict(
  1785. name='prometheus-alertbuffer',
  1786. size='10Gi'
  1787. ),
  1788. nfs=dict(
  1789. directory='/exports',
  1790. options='*(rw,root_squash)'
  1791. ),
  1792. host=None,
  1793. access=dict(
  1794. modes=['ReadWriteOnce']
  1795. ),
  1796. create_pv=True,
  1797. create_pvc=False
  1798. )
  1799. )
  1800. return defaults
  1801. def guess_host_provider(self):
  1802. """ Guess the host provider
  1803. Returns:
  1804. dict: The generated default facts for the detected provider
  1805. """
  1806. # TODO: cloud provider facts should probably be submitted upstream
  1807. product_name = self.system_facts['ansible_product_name']
  1808. product_version = self.system_facts['ansible_product_version']
  1809. virt_type = self.system_facts['ansible_virtualization_type']
  1810. virt_role = self.system_facts['ansible_virtualization_role']
  1811. bios_vendor = self.system_facts['ansible_system_vendor']
  1812. provider = None
  1813. metadata = None
  1814. if bios_vendor == 'Google':
  1815. provider = 'gce'
  1816. metadata_url = ('http://metadata.google.internal/'
  1817. 'computeMetadata/v1/?recursive=true')
  1818. headers = {'Metadata-Flavor': 'Google'}
  1819. metadata = get_provider_metadata(metadata_url, True, headers,
  1820. True)
  1821. # Filter sshKeys and serviceAccounts from gce metadata
  1822. if metadata:
  1823. metadata['project']['attributes'].pop('sshKeys', None)
  1824. metadata['instance'].pop('serviceAccounts', None)
  1825. elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
  1826. provider = 'aws'
  1827. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1828. metadata = get_provider_metadata(metadata_url)
  1829. elif re.search(r'OpenStack', product_name):
  1830. provider = 'openstack'
  1831. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1832. 'meta_data.json')
  1833. metadata = get_provider_metadata(metadata_url, True, None,
  1834. True)
  1835. if metadata:
  1836. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1837. metadata['ec2_compat'] = get_provider_metadata(
  1838. ec2_compat_url
  1839. )
  1840. # disable pylint maybe-no-member because overloaded use of
  1841. # the module name causes pylint to not detect that results
  1842. # is an array or hash
  1843. # pylint: disable=maybe-no-member
  1844. # Filter public_keys and random_seed from openstack metadata
  1845. metadata.pop('public_keys', None)
  1846. metadata.pop('random_seed', None)
  1847. if not metadata['ec2_compat']:
  1848. metadata = None
  1849. return dict(name=provider, metadata=metadata)
  1850. def init_provider_facts(self):
  1851. """ Initialize the provider facts
  1852. Returns:
  1853. dict: The normalized provider facts
  1854. """
  1855. provider_info = self.guess_host_provider()
  1856. provider_facts = normalize_provider_facts(
  1857. provider_info.get('name'),
  1858. provider_info.get('metadata')
  1859. )
  1860. return provider_facts
  1861. @staticmethod
  1862. def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
  1863. """ Split openshift_env facts based on openshift_env structures.
  1864. Args:
  1865. openshift_env_fact (string): the openshift_env fact to split
  1866. ex: 'openshift_cloudprovider_openstack_auth_url'
  1867. openshift_env_structures (list): a list of structures to determine fact keys
  1868. ex: ['openshift.cloudprovider.openstack.*']
  1869. Returns:
  1870. list: a list of keys that represent the fact
  1871. ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
  1872. """
  1873. # By default, we'll split an openshift_env fact by underscores.
  1874. fact_keys = openshift_env_fact.split('_')
  1875. # Determine if any of the provided variable structures match the fact.
  1876. matching_structure = None
  1877. if openshift_env_structures is not None:
  1878. for structure in openshift_env_structures:
  1879. if re.match(structure, openshift_env_fact):
  1880. matching_structure = structure
  1881. # Fact didn't match any variable structures so return the default fact keys.
  1882. if matching_structure is None:
  1883. return fact_keys
  1884. final_keys = []
  1885. structure_keys = matching_structure.split('.')
  1886. for structure_key in structure_keys:
  1887. # Matched current key. Add to final keys.
  1888. if structure_key == fact_keys[structure_keys.index(structure_key)]:
  1889. final_keys.append(structure_key)
  1890. # Wildcard means we will be taking everything from here to the end of the fact.
  1891. elif structure_key == '*':
  1892. final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
  1893. # Shouldn't have gotten here, return the fact keys.
  1894. else:
  1895. return fact_keys
  1896. return final_keys
  1897. # Disabling too-many-branches and too-many-locals.
  1898. # This should be cleaned up as a TODO item.
  1899. # pylint: disable=too-many-branches, too-many-locals
  1900. def init_local_facts(self, facts=None,
  1901. additive_facts_to_overwrite=None,
  1902. openshift_env=None,
  1903. openshift_env_structures=None,
  1904. protected_facts_to_overwrite=None):
  1905. """ Initialize the local facts
  1906. Args:
  1907. facts (dict): local facts to set
  1908. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1909. '.' notation ex: ['master.named_certificates']
  1910. openshift_env (dict): openshift env facts to set
  1911. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1912. '.' notation ex: ['master.master_count']
  1913. Returns:
  1914. dict: The result of merging the provided facts with existing
  1915. local facts
  1916. """
  1917. changed = False
  1918. facts_to_set = dict()
  1919. if facts is not None:
  1920. facts_to_set[self.role] = facts
  1921. if openshift_env != {} and openshift_env is not None:
  1922. for fact, value in iteritems(openshift_env):
  1923. oo_env_facts = dict()
  1924. current_level = oo_env_facts
  1925. keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
  1926. if len(keys) > 0 and keys[0] != self.role:
  1927. continue
  1928. for key in keys:
  1929. if key == keys[-1]:
  1930. current_level[key] = value
  1931. elif key not in current_level:
  1932. current_level[key] = dict()
  1933. current_level = current_level[key]
  1934. facts_to_set = merge_facts(orig=facts_to_set,
  1935. new=oo_env_facts,
  1936. additive_facts_to_overwrite=[],
  1937. protected_facts_to_overwrite=[])
  1938. local_facts = get_local_facts_from_file(self.filename)
  1939. migrated_facts = migrate_local_facts(local_facts)
  1940. new_local_facts = merge_facts(migrated_facts,
  1941. facts_to_set,
  1942. additive_facts_to_overwrite,
  1943. protected_facts_to_overwrite)
  1944. if 'docker' in new_local_facts:
  1945. # Convert legacy log_options comma sep string to a list if present:
  1946. if 'log_options' in new_local_facts['docker'] and \
  1947. isinstance(new_local_facts['docker']['log_options'], string_types):
  1948. new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
  1949. new_local_facts = self.remove_empty_facts(new_local_facts)
  1950. if new_local_facts != local_facts:
  1951. self.validate_local_facts(new_local_facts)
  1952. changed = True
  1953. if not module.check_mode: # noqa: F405
  1954. save_local_facts(self.filename, new_local_facts)
  1955. self.changed = changed
  1956. return new_local_facts
  1957. def remove_empty_facts(self, facts=None):
  1958. """ Remove empty facts
  1959. Args:
  1960. facts (dict): facts to clean
  1961. """
  1962. facts_to_remove = []
  1963. for fact, value in iteritems(facts):
  1964. if isinstance(facts[fact], dict):
  1965. facts[fact] = self.remove_empty_facts(facts[fact])
  1966. else:
  1967. if value == "" or value == [""] or value is None:
  1968. facts_to_remove.append(fact)
  1969. for fact in facts_to_remove:
  1970. del facts[fact]
  1971. return facts
  1972. def validate_local_facts(self, facts=None):
  1973. """ Validate local facts
  1974. Args:
  1975. facts (dict): local facts to validate
  1976. """
  1977. invalid_facts = dict()
  1978. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1979. if invalid_facts:
  1980. msg = 'Invalid facts detected:\n'
  1981. # pylint: disable=consider-iterating-dictionary
  1982. for key in invalid_facts.keys():
  1983. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1984. module.fail_json(msg=msg, changed=self.changed) # noqa: F405
  1985. # disabling pylint errors for line-too-long since we're dealing
  1986. # with best effort reduction of error messages here.
  1987. # disabling errors for too-many-branches since we require checking
  1988. # many conditions.
  1989. # pylint: disable=line-too-long, too-many-branches
  1990. @staticmethod
  1991. def validate_master_facts(facts, invalid_facts):
  1992. """ Validate master facts
  1993. Args:
  1994. facts (dict): local facts to validate
  1995. invalid_facts (dict): collected invalid_facts
  1996. Returns:
  1997. dict: Invalid facts
  1998. """
  1999. if 'master' in facts:
  2000. # openshift.master.session_auth_secrets
  2001. if 'session_auth_secrets' in facts['master']:
  2002. session_auth_secrets = facts['master']['session_auth_secrets']
  2003. if not issubclass(type(session_auth_secrets), list):
  2004. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  2005. elif 'session_encryption_secrets' not in facts['master']:
  2006. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  2007. 'if openshift_master_session_auth_secrets is provided.')
  2008. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  2009. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  2010. 'openshift_master_session_encryption_secrets must be '
  2011. 'equal length.')
  2012. else:
  2013. for secret in session_auth_secrets:
  2014. if len(secret) < 32:
  2015. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  2016. 'Secrets must be at least 32 characters in length.')
  2017. # openshift.master.session_encryption_secrets
  2018. if 'session_encryption_secrets' in facts['master']:
  2019. session_encryption_secrets = facts['master']['session_encryption_secrets']
  2020. if not issubclass(type(session_encryption_secrets), list):
  2021. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  2022. elif 'session_auth_secrets' not in facts['master']:
  2023. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  2024. 'set if openshift_master_session_encryption_secrets '
  2025. 'is provided.')
  2026. else:
  2027. for secret in session_encryption_secrets:
  2028. if len(secret) not in [16, 24, 32]:
  2029. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  2030. 'Secrets must be 16, 24, or 32 characters in length.')
  2031. return invalid_facts
  2032. def main():
  2033. """ main """
  2034. # disabling pylint errors for global-variable-undefined and invalid-name
  2035. # for 'global module' usage, since it is required to use ansible_facts
  2036. # pylint: disable=global-variable-undefined, invalid-name
  2037. global module
  2038. module = AnsibleModule( # noqa: F405
  2039. argument_spec=dict(
  2040. role=dict(default='common', required=False,
  2041. choices=OpenShiftFacts.known_roles),
  2042. local_facts=dict(default=None, type='dict', required=False),
  2043. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  2044. openshift_env=dict(default={}, type='dict', required=False),
  2045. openshift_env_structures=dict(default=[], type='list', required=False),
  2046. protected_facts_to_overwrite=dict(default=[], type='list', required=False)
  2047. ),
  2048. supports_check_mode=True,
  2049. add_file_common_args=True,
  2050. )
  2051. if not HAVE_DBUS:
  2052. module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
  2053. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
  2054. module.params['gather_timeout'] = 10 # noqa: F405
  2055. module.params['filter'] = '*' # noqa: F405
  2056. role = module.params['role'] # noqa: F405
  2057. local_facts = module.params['local_facts'] # noqa: F405
  2058. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
  2059. openshift_env = module.params['openshift_env'] # noqa: F405
  2060. openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
  2061. protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
  2062. fact_file = '/etc/ansible/facts.d/openshift.fact'
  2063. openshift_facts = OpenShiftFacts(role,
  2064. fact_file,
  2065. local_facts,
  2066. additive_facts_to_overwrite,
  2067. openshift_env,
  2068. openshift_env_structures,
  2069. protected_facts_to_overwrite)
  2070. file_params = module.params.copy() # noqa: F405
  2071. file_params['path'] = fact_file
  2072. file_args = module.load_file_common_arguments(file_params) # noqa: F405
  2073. changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
  2074. openshift_facts.changed)
  2075. return module.exit_json(changed=changed, # noqa: F405
  2076. ansible_facts=openshift_facts.facts)
  2077. if __name__ == '__main__':
  2078. main()