openshift_facts.py 90 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214
  1. #!/usr/bin/python
  2. # pylint: disable=too-many-lines
  3. # -*- coding: utf-8 -*-
  4. # vim: expandtab:tabstop=4:shiftwidth=4
  5. # Reason: Disable pylint too-many-lines because we don't want to split up this file.
  6. # Status: Permanently disabled to keep this module as self-contained as possible.
  7. """Ansible module for retrieving and setting openshift related facts"""
  8. import ConfigParser
  9. import copy
  10. import io
  11. import os
  12. import yaml
  13. from distutils.util import strtobool
  14. from distutils.version import LooseVersion
  15. import struct
  16. import socket
  17. from dbus import SystemBus, Interface
  18. from dbus.exceptions import DBusException
  19. DOCUMENTATION = '''
  20. ---
  21. module: openshift_facts
  22. short_description: Cluster Facts
  23. author: Jason DeTiberus
  24. requirements: [ ]
  25. '''
  26. EXAMPLES = '''
  27. '''
  28. def migrate_docker_facts(facts):
  29. """ Apply migrations for docker facts """
  30. params = {
  31. 'common': (
  32. 'additional_registries',
  33. 'insecure_registries',
  34. 'blocked_registries',
  35. 'options'
  36. ),
  37. 'node': (
  38. 'log_driver',
  39. 'log_options'
  40. )
  41. }
  42. if 'docker' not in facts:
  43. facts['docker'] = {}
  44. for role in params.keys():
  45. if role in facts:
  46. for param in params[role]:
  47. old_param = 'docker_' + param
  48. if old_param in facts[role]:
  49. facts['docker'][param] = facts[role].pop(old_param)
  50. if 'node' in facts and 'portal_net' in facts['node']:
  51. facts['docker']['hosted_registry_insecure'] = True
  52. facts['docker']['hosted_registry_network'] = facts['node'].pop('portal_net')
  53. # log_options was originally meant to be a comma separated string, but
  54. # we now prefer an actual list, with backward compatability:
  55. if 'log_options' in facts['docker'] and \
  56. isinstance(facts['docker']['log_options'], basestring):
  57. facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
  58. return facts
  59. # TODO: We should add a generic migration function that takes source and destination
  60. # paths and does the right thing rather than one function for common, one for node, etc.
  61. def migrate_common_facts(facts):
  62. """ Migrate facts from various roles into common """
  63. params = {
  64. 'node': ('portal_net'),
  65. 'master': ('portal_net')
  66. }
  67. if 'common' not in facts:
  68. facts['common'] = {}
  69. for role in params.keys():
  70. if role in facts:
  71. for param in params[role]:
  72. if param in facts[role]:
  73. facts['common'][param] = facts[role].pop(param)
  74. return facts
  75. def migrate_node_facts(facts):
  76. """ Migrate facts from various roles into node """
  77. params = {
  78. 'common': ('dns_ip'),
  79. }
  80. if 'node' not in facts:
  81. facts['node'] = {}
  82. for role in params.keys():
  83. if role in facts:
  84. for param in params[role]:
  85. if param in facts[role]:
  86. facts['node'][param] = facts[role].pop(param)
  87. return facts
  88. def migrate_local_facts(facts):
  89. """ Apply migrations of local facts """
  90. migrated_facts = copy.deepcopy(facts)
  91. migrated_facts = migrate_docker_facts(migrated_facts)
  92. migrated_facts = migrate_common_facts(migrated_facts)
  93. migrated_facts = migrate_node_facts(migrated_facts)
  94. migrated_facts = migrate_hosted_facts(migrated_facts)
  95. return migrated_facts
  96. def migrate_hosted_facts(facts):
  97. """ Apply migrations for master facts """
  98. if 'master' in facts:
  99. if 'router_selector' in facts['master']:
  100. if 'hosted' not in facts:
  101. facts['hosted'] = {}
  102. if 'router' not in facts['hosted']:
  103. facts['hosted']['router'] = {}
  104. facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
  105. if 'registry_selector' in facts['master']:
  106. if 'hosted' not in facts:
  107. facts['hosted'] = {}
  108. if 'registry' not in facts['hosted']:
  109. facts['hosted']['registry'] = {}
  110. facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
  111. return facts
  112. def first_ip(network):
  113. """ Return the first IPv4 address in network
  114. Args:
  115. network (str): network in CIDR format
  116. Returns:
  117. str: first IPv4 address
  118. """
  119. atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
  120. itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
  121. (address, netmask) = network.split('/')
  122. netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
  123. return itoa((atoi(address) & netmask_i) + 1)
  124. def hostname_valid(hostname):
  125. """ Test if specified hostname should be considered valid
  126. Args:
  127. hostname (str): hostname to test
  128. Returns:
  129. bool: True if valid, otherwise False
  130. """
  131. if (not hostname or
  132. hostname.startswith('localhost') or
  133. hostname.endswith('localdomain') or
  134. hostname.endswith('novalocal') or
  135. len(hostname.split('.')) < 2):
  136. return False
  137. return True
  138. def choose_hostname(hostnames=None, fallback=''):
  139. """ Choose a hostname from the provided hostnames
  140. Given a list of hostnames and a fallback value, choose a hostname to
  141. use. This function will prefer fqdns if they exist (excluding any that
  142. begin with localhost or end with localdomain) over ip addresses.
  143. Args:
  144. hostnames (list): list of hostnames
  145. fallback (str): default value to set if hostnames does not contain
  146. a valid hostname
  147. Returns:
  148. str: chosen hostname
  149. """
  150. hostname = fallback
  151. if hostnames is None:
  152. return hostname
  153. ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
  154. ips = [i for i in hostnames
  155. if (i is not None and isinstance(i, basestring)
  156. and re.match(ip_regex, i))]
  157. hosts = [i for i in hostnames
  158. if i is not None and i != '' and i not in ips]
  159. for host_list in (hosts, ips):
  160. for host in host_list:
  161. if hostname_valid(host):
  162. return host
  163. return hostname
  164. def query_metadata(metadata_url, headers=None, expect_json=False):
  165. """ Return metadata from the provided metadata_url
  166. Args:
  167. metadata_url (str): metadata url
  168. headers (dict): headers to set for metadata request
  169. expect_json (bool): does the metadata_url return json
  170. Returns:
  171. dict or list: metadata request result
  172. """
  173. result, info = fetch_url(module, metadata_url, headers=headers)
  174. if info['status'] != 200:
  175. raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
  176. if expect_json:
  177. return module.from_json(result.read())
  178. else:
  179. return [line.strip() for line in result.readlines()]
  180. def walk_metadata(metadata_url, headers=None, expect_json=False):
  181. """ Walk the metadata tree and return a dictionary of the entire tree
  182. Args:
  183. metadata_url (str): metadata url
  184. headers (dict): headers to set for metadata request
  185. expect_json (bool): does the metadata_url return json
  186. Returns:
  187. dict: the result of walking the metadata tree
  188. """
  189. metadata = dict()
  190. for line in query_metadata(metadata_url, headers, expect_json):
  191. if line.endswith('/') and not line == 'public-keys/':
  192. key = line[:-1]
  193. metadata[key] = walk_metadata(metadata_url + line,
  194. headers, expect_json)
  195. else:
  196. results = query_metadata(metadata_url + line, headers,
  197. expect_json)
  198. if len(results) == 1:
  199. # disable pylint maybe-no-member because overloaded use of
  200. # the module name causes pylint to not detect that results
  201. # is an array or hash
  202. # pylint: disable=maybe-no-member
  203. metadata[line] = results.pop()
  204. else:
  205. metadata[line] = results
  206. return metadata
  207. def get_provider_metadata(metadata_url, supports_recursive=False,
  208. headers=None, expect_json=False):
  209. """ Retrieve the provider metadata
  210. Args:
  211. metadata_url (str): metadata url
  212. supports_recursive (bool): does the provider metadata api support
  213. recursion
  214. headers (dict): headers to set for metadata request
  215. expect_json (bool): does the metadata_url return json
  216. Returns:
  217. dict: the provider metadata
  218. """
  219. try:
  220. if supports_recursive:
  221. metadata = query_metadata(metadata_url, headers,
  222. expect_json)
  223. else:
  224. metadata = walk_metadata(metadata_url, headers,
  225. expect_json)
  226. except OpenShiftFactsMetadataUnavailableError:
  227. metadata = None
  228. return metadata
  229. def normalize_gce_facts(metadata, facts):
  230. """ Normalize gce facts
  231. Args:
  232. metadata (dict): provider metadata
  233. facts (dict): facts to update
  234. Returns:
  235. dict: the result of adding the normalized metadata to the provided
  236. facts dict
  237. """
  238. for interface in metadata['instance']['networkInterfaces']:
  239. int_info = dict(ips=[interface['ip']], network_type='gce')
  240. int_info['public_ips'] = [ac['externalIp'] for ac
  241. in interface['accessConfigs']]
  242. int_info['public_ips'].extend(interface['forwardedIps'])
  243. _, _, network_id = interface['network'].rpartition('/')
  244. int_info['network_id'] = network_id
  245. facts['network']['interfaces'].append(int_info)
  246. _, _, zone = metadata['instance']['zone'].rpartition('/')
  247. facts['zone'] = zone
  248. # GCE currently only supports a single interface
  249. facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
  250. pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
  251. facts['network']['public_ip'] = pub_ip
  252. facts['network']['hostname'] = metadata['instance']['hostname']
  253. # TODO: attempt to resolve public_hostname
  254. facts['network']['public_hostname'] = facts['network']['public_ip']
  255. return facts
  256. def normalize_aws_facts(metadata, facts):
  257. """ Normalize aws facts
  258. Args:
  259. metadata (dict): provider metadata
  260. facts (dict): facts to update
  261. Returns:
  262. dict: the result of adding the normalized metadata to the provided
  263. facts dict
  264. """
  265. for interface in sorted(
  266. metadata['network']['interfaces']['macs'].values(),
  267. key=lambda x: x['device-number']
  268. ):
  269. int_info = dict()
  270. var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
  271. for ips_var, int_var in var_map.iteritems():
  272. ips = interface.get(int_var)
  273. if isinstance(ips, basestring):
  274. int_info[ips_var] = [ips]
  275. else:
  276. int_info[ips_var] = ips
  277. if 'vpc-id' in interface:
  278. int_info['network_type'] = 'vpc'
  279. else:
  280. int_info['network_type'] = 'classic'
  281. if int_info['network_type'] == 'vpc':
  282. int_info['network_id'] = interface['subnet-id']
  283. else:
  284. int_info['network_id'] = None
  285. facts['network']['interfaces'].append(int_info)
  286. facts['zone'] = metadata['placement']['availability-zone']
  287. # TODO: actually attempt to determine default local and public ips
  288. # by using the ansible default ip fact and the ipv4-associations
  289. # from the ec2 metadata
  290. facts['network']['ip'] = metadata.get('local-ipv4')
  291. facts['network']['public_ip'] = metadata.get('public-ipv4')
  292. # TODO: verify that local hostname makes sense and is resolvable
  293. facts['network']['hostname'] = metadata.get('local-hostname')
  294. # TODO: verify that public hostname makes sense and is resolvable
  295. facts['network']['public_hostname'] = metadata.get('public-hostname')
  296. return facts
  297. def normalize_openstack_facts(metadata, facts):
  298. """ Normalize openstack facts
  299. Args:
  300. metadata (dict): provider metadata
  301. facts (dict): facts to update
  302. Returns:
  303. dict: the result of adding the normalized metadata to the provided
  304. facts dict
  305. """
  306. # openstack ec2 compat api does not support network interfaces and
  307. # the version tested on did not include the info in the openstack
  308. # metadata api, should be updated if neutron exposes this.
  309. facts['zone'] = metadata['availability_zone']
  310. local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
  311. facts['network']['ip'] = local_ipv4
  312. facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
  313. # TODO: verify local hostname makes sense and is resolvable
  314. facts['network']['hostname'] = metadata['hostname']
  315. # TODO: verify that public hostname makes sense and is resolvable
  316. pub_h = metadata['ec2_compat']['public-hostname']
  317. facts['network']['public_hostname'] = pub_h
  318. return facts
  319. def normalize_provider_facts(provider, metadata):
  320. """ Normalize provider facts
  321. Args:
  322. provider (str): host provider
  323. metadata (dict): provider metadata
  324. Returns:
  325. dict: the normalized provider facts
  326. """
  327. if provider is None or metadata is None:
  328. return {}
  329. # TODO: test for ipv6_enabled where possible (gce, aws do not support)
  330. # and configure ipv6 facts if available
  331. # TODO: add support for setting user_data if available
  332. facts = dict(name=provider, metadata=metadata,
  333. network=dict(interfaces=[], ipv6_enabled=False))
  334. if provider == 'gce':
  335. facts = normalize_gce_facts(metadata, facts)
  336. elif provider == 'aws':
  337. facts = normalize_aws_facts(metadata, facts)
  338. elif provider == 'openstack':
  339. facts = normalize_openstack_facts(metadata, facts)
  340. return facts
  341. def set_flannel_facts_if_unset(facts):
  342. """ Set flannel facts if not already present in facts dict
  343. dict: the facts dict updated with the flannel facts if
  344. missing
  345. Args:
  346. facts (dict): existing facts
  347. Returns:
  348. dict: the facts dict updated with the flannel
  349. facts if they were not already present
  350. """
  351. if 'common' in facts:
  352. if 'use_flannel' not in facts['common']:
  353. use_flannel = False
  354. facts['common']['use_flannel'] = use_flannel
  355. return facts
  356. def set_nuage_facts_if_unset(facts):
  357. """ Set nuage facts if not already present in facts dict
  358. dict: the facts dict updated with the nuage facts if
  359. missing
  360. Args:
  361. facts (dict): existing facts
  362. Returns:
  363. dict: the facts dict updated with the nuage
  364. facts if they were not already present
  365. """
  366. if 'common' in facts:
  367. if 'use_nuage' not in facts['common']:
  368. use_nuage = False
  369. facts['common']['use_nuage'] = use_nuage
  370. return facts
  371. def set_node_schedulability(facts):
  372. """ Set schedulable facts if not already present in facts dict
  373. Args:
  374. facts (dict): existing facts
  375. Returns:
  376. dict: the facts dict updated with the generated schedulable
  377. facts if they were not already present
  378. """
  379. if 'node' in facts:
  380. if 'schedulable' not in facts['node']:
  381. if 'master' in facts:
  382. facts['node']['schedulable'] = False
  383. else:
  384. facts['node']['schedulable'] = True
  385. return facts
  386. def set_selectors(facts):
  387. """ Set selectors facts if not already present in facts dict
  388. Args:
  389. facts (dict): existing facts
  390. Returns:
  391. dict: the facts dict updated with the generated selectors
  392. facts if they were not already present
  393. """
  394. deployment_type = facts['common']['deployment_type']
  395. if deployment_type == 'online':
  396. selector = "type=infra"
  397. else:
  398. selector = "region=infra"
  399. if 'hosted' not in facts:
  400. facts['hosted'] = {}
  401. if 'router' not in facts['hosted']:
  402. facts['hosted']['router'] = {}
  403. if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
  404. facts['hosted']['router']['selector'] = selector
  405. if 'registry' not in facts['hosted']:
  406. facts['hosted']['registry'] = {}
  407. if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
  408. facts['hosted']['registry']['selector'] = selector
  409. if 'metrics' not in facts['hosted']:
  410. facts['hosted']['metrics'] = {}
  411. if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
  412. facts['hosted']['metrics']['selector'] = None
  413. if 'logging' not in facts['hosted']:
  414. facts['hosted']['logging'] = {}
  415. if 'selector' not in facts['hosted']['logging'] or facts['hosted']['logging']['selector'] in [None, 'None']:
  416. facts['hosted']['logging']['selector'] = None
  417. return facts
  418. def set_dnsmasq_facts_if_unset(facts):
  419. """ Set dnsmasq facts if not already present in facts
  420. Args:
  421. facts (dict) existing facts
  422. Returns:
  423. facts (dict) updated facts with values set if not previously set
  424. """
  425. if 'common' in facts:
  426. facts['common']['use_dnsmasq'] = bool('use_dnsmasq' not in facts['common'] and
  427. safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
  428. if 'master' in facts and 'dns_port' not in facts['master']:
  429. if safe_get_bool(facts['common']['use_dnsmasq']):
  430. facts['master']['dns_port'] = 8053
  431. else:
  432. facts['master']['dns_port'] = 53
  433. return facts
  434. def set_project_cfg_facts_if_unset(facts):
  435. """ Set Project Configuration facts if not already present in facts dict
  436. dict:
  437. Args:
  438. facts (dict): existing facts
  439. Returns:
  440. dict: the facts dict updated with the generated Project Configuration
  441. facts if they were not already present
  442. """
  443. config = {
  444. 'default_node_selector': '',
  445. 'project_request_message': '',
  446. 'project_request_template': '',
  447. 'mcs_allocator_range': 's0:/2',
  448. 'mcs_labels_per_project': 5,
  449. 'uid_allocator_range': '1000000000-1999999999/10000'
  450. }
  451. if 'master' in facts:
  452. for key, value in config.items():
  453. if key not in facts['master']:
  454. facts['master'][key] = value
  455. return facts
  456. def set_identity_providers_if_unset(facts):
  457. """ Set identity_providers fact if not already present in facts dict
  458. Args:
  459. facts (dict): existing facts
  460. Returns:
  461. dict: the facts dict updated with the generated identity providers
  462. facts if they were not already present
  463. """
  464. if 'master' in facts:
  465. deployment_type = facts['common']['deployment_type']
  466. if 'identity_providers' not in facts['master']:
  467. identity_provider = dict(
  468. name='allow_all', challenge=True, login=True,
  469. kind='AllowAllPasswordIdentityProvider'
  470. )
  471. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  472. identity_provider = dict(
  473. name='deny_all', challenge=True, login=True,
  474. kind='DenyAllPasswordIdentityProvider'
  475. )
  476. facts['master']['identity_providers'] = [identity_provider]
  477. return facts
  478. def set_url_facts_if_unset(facts):
  479. """ Set url facts if not already present in facts dict
  480. Args:
  481. facts (dict): existing facts
  482. Returns:
  483. dict: the facts dict updated with the generated url facts if they
  484. were not already present
  485. """
  486. if 'master' in facts:
  487. hostname = facts['common']['hostname']
  488. cluster_hostname = facts['master'].get('cluster_hostname')
  489. cluster_public_hostname = facts['master'].get('cluster_public_hostname')
  490. public_hostname = facts['common']['public_hostname']
  491. api_hostname = cluster_hostname if cluster_hostname else hostname
  492. api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
  493. console_path = facts['master']['console_path']
  494. etcd_hosts = facts['master']['etcd_hosts']
  495. use_ssl = dict(
  496. api=facts['master']['api_use_ssl'],
  497. public_api=facts['master']['api_use_ssl'],
  498. loopback_api=facts['master']['api_use_ssl'],
  499. console=facts['master']['console_use_ssl'],
  500. public_console=facts['master']['console_use_ssl'],
  501. etcd=facts['master']['etcd_use_ssl']
  502. )
  503. ports = dict(
  504. api=facts['master']['api_port'],
  505. public_api=facts['master']['api_port'],
  506. loopback_api=facts['master']['api_port'],
  507. console=facts['master']['console_port'],
  508. public_console=facts['master']['console_port'],
  509. etcd=facts['master']['etcd_port'],
  510. )
  511. etcd_urls = []
  512. if etcd_hosts != '':
  513. facts['master']['etcd_port'] = ports['etcd']
  514. facts['master']['embedded_etcd'] = False
  515. for host in etcd_hosts:
  516. etcd_urls.append(format_url(use_ssl['etcd'], host,
  517. ports['etcd']))
  518. else:
  519. etcd_urls = [format_url(use_ssl['etcd'], hostname,
  520. ports['etcd'])]
  521. facts['master'].setdefault('etcd_urls', etcd_urls)
  522. prefix_hosts = [('api', api_hostname),
  523. ('public_api', api_public_hostname),
  524. ('loopback_api', hostname)]
  525. for prefix, host in prefix_hosts:
  526. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  527. host,
  528. ports[prefix]))
  529. r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
  530. r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
  531. facts['master'].setdefault('loopback_cluster_name', r_lhn)
  532. facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
  533. facts['master'].setdefault('loopback_user', r_lhu)
  534. prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
  535. for prefix, host in prefix_hosts:
  536. facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
  537. host,
  538. ports[prefix],
  539. console_path))
  540. return facts
  541. def set_aggregate_facts(facts):
  542. """ Set aggregate facts
  543. Args:
  544. facts (dict): existing facts
  545. Returns:
  546. dict: the facts dict updated with aggregated facts
  547. """
  548. all_hostnames = set()
  549. internal_hostnames = set()
  550. kube_svc_ip = first_ip(facts['common']['portal_net'])
  551. if 'common' in facts:
  552. all_hostnames.add(facts['common']['hostname'])
  553. all_hostnames.add(facts['common']['public_hostname'])
  554. all_hostnames.add(facts['common']['ip'])
  555. all_hostnames.add(facts['common']['public_ip'])
  556. facts['common']['kube_svc_ip'] = kube_svc_ip
  557. internal_hostnames.add(facts['common']['hostname'])
  558. internal_hostnames.add(facts['common']['ip'])
  559. cluster_domain = facts['common']['dns_domain']
  560. if 'master' in facts:
  561. if 'cluster_hostname' in facts['master']:
  562. all_hostnames.add(facts['master']['cluster_hostname'])
  563. if 'cluster_public_hostname' in facts['master']:
  564. all_hostnames.add(facts['master']['cluster_public_hostname'])
  565. svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
  566. 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
  567. 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
  568. all_hostnames.update(svc_names)
  569. internal_hostnames.update(svc_names)
  570. all_hostnames.add(kube_svc_ip)
  571. internal_hostnames.add(kube_svc_ip)
  572. facts['common']['all_hostnames'] = list(all_hostnames)
  573. facts['common']['internal_hostnames'] = list(internal_hostnames)
  574. return facts
  575. def set_etcd_facts_if_unset(facts):
  576. """
  577. If using embedded etcd, loads the data directory from master-config.yaml.
  578. If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
  579. If anything goes wrong parsing these, the fact will not be set.
  580. """
  581. if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
  582. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  583. if 'etcd_data_dir' not in etcd_facts:
  584. try:
  585. # Parse master config to find actual etcd data dir:
  586. master_cfg_path = os.path.join(facts['common']['config_base'],
  587. 'master/master-config.yaml')
  588. master_cfg_f = open(master_cfg_path, 'r')
  589. config = yaml.safe_load(master_cfg_f.read())
  590. master_cfg_f.close()
  591. etcd_facts['etcd_data_dir'] = \
  592. config['etcdConfig']['storageDirectory']
  593. facts['etcd'] = etcd_facts
  594. # We don't want exceptions bubbling up here:
  595. # pylint: disable=broad-except
  596. except Exception:
  597. pass
  598. else:
  599. etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
  600. # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
  601. try:
  602. # Add a fake section for parsing:
  603. ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
  604. ini_fp = io.StringIO(ini_str)
  605. config = ConfigParser.RawConfigParser()
  606. config.readfp(ini_fp)
  607. etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
  608. if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
  609. etcd_data_dir = etcd_data_dir[1:-1]
  610. etcd_facts['etcd_data_dir'] = etcd_data_dir
  611. facts['etcd'] = etcd_facts
  612. # We don't want exceptions bubbling up here:
  613. # pylint: disable=broad-except
  614. except Exception:
  615. pass
  616. return facts
  617. def set_deployment_facts_if_unset(facts):
  618. """ Set Facts that vary based on deployment_type. This currently
  619. includes common.service_type, common.config_base, master.registry_url,
  620. node.registry_url, node.storage_plugin_deps
  621. Args:
  622. facts (dict): existing facts
  623. Returns:
  624. dict: the facts dict updated with the generated deployment_type
  625. facts
  626. """
  627. # disabled to avoid breaking up facts related to deployment type into
  628. # multiple methods for now.
  629. # pylint: disable=too-many-statements, too-many-branches
  630. if 'common' in facts:
  631. deployment_type = facts['common']['deployment_type']
  632. if 'service_type' not in facts['common']:
  633. service_type = 'atomic-openshift'
  634. if deployment_type == 'origin':
  635. service_type = 'origin'
  636. elif deployment_type in ['enterprise']:
  637. service_type = 'openshift'
  638. facts['common']['service_type'] = service_type
  639. if 'config_base' not in facts['common']:
  640. config_base = '/etc/origin'
  641. if deployment_type in ['enterprise']:
  642. config_base = '/etc/openshift'
  643. # Handle upgrade scenarios when symlinks don't yet exist:
  644. if not os.path.exists(config_base) and os.path.exists('/etc/openshift'):
  645. config_base = '/etc/openshift'
  646. facts['common']['config_base'] = config_base
  647. if 'data_dir' not in facts['common']:
  648. data_dir = '/var/lib/origin'
  649. if deployment_type in ['enterprise']:
  650. data_dir = '/var/lib/openshift'
  651. # Handle upgrade scenarios when symlinks don't yet exist:
  652. if not os.path.exists(data_dir) and os.path.exists('/var/lib/openshift'):
  653. data_dir = '/var/lib/openshift'
  654. facts['common']['data_dir'] = data_dir
  655. if 'docker' in facts:
  656. deployment_type = facts['common']['deployment_type']
  657. if deployment_type in ['enterprise', 'atomic-enterprise', 'openshift-enterprise']:
  658. addtl_regs = facts['docker'].get('additional_registries', [])
  659. ent_reg = 'registry.access.redhat.com'
  660. if ent_reg not in addtl_regs:
  661. facts['docker']['additional_registries'] = addtl_regs + [ent_reg]
  662. for role in ('master', 'node'):
  663. if role in facts:
  664. deployment_type = facts['common']['deployment_type']
  665. if 'registry_url' not in facts[role]:
  666. registry_url = 'openshift/origin-${component}:${version}'
  667. if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
  668. registry_url = 'openshift3/ose-${component}:${version}'
  669. elif deployment_type == 'atomic-enterprise':
  670. registry_url = 'aep3_beta/aep-${component}:${version}'
  671. facts[role]['registry_url'] = registry_url
  672. if 'master' in facts:
  673. deployment_type = facts['common']['deployment_type']
  674. openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
  675. if 'disabled_features' in facts['master']:
  676. if deployment_type == 'atomic-enterprise':
  677. curr_disabled_features = set(facts['master']['disabled_features'])
  678. facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
  679. else:
  680. if facts['common']['deployment_subtype'] == 'registry':
  681. facts['master']['disabled_features'] = openshift_features
  682. if 'node' in facts:
  683. deployment_type = facts['common']['deployment_type']
  684. if 'storage_plugin_deps' not in facts['node']:
  685. if deployment_type in ['openshift-enterprise', 'atomic-enterprise', 'origin']:
  686. facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
  687. else:
  688. facts['node']['storage_plugin_deps'] = []
  689. return facts
  690. def set_version_facts_if_unset(facts):
  691. """ Set version facts. This currently includes common.version and
  692. common.version_gte_3_1_or_1_1.
  693. Args:
  694. facts (dict): existing facts
  695. Returns:
  696. dict: the facts dict updated with version facts.
  697. """
  698. if 'common' in facts:
  699. deployment_type = facts['common']['deployment_type']
  700. version = get_openshift_version(facts)
  701. if version:
  702. facts['common']['version'] = version
  703. if deployment_type == 'origin':
  704. version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
  705. version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('1.1.1')
  706. version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('1.2.0')
  707. version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('1.3.0')
  708. else:
  709. version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('3.0.2.905')
  710. version_gte_3_1_1_or_1_1_1 = LooseVersion(version) >= LooseVersion('3.1.1')
  711. version_gte_3_2_or_1_2 = LooseVersion(version) >= LooseVersion('3.1.1.901')
  712. version_gte_3_3_or_1_3 = LooseVersion(version) >= LooseVersion('3.3.0')
  713. else:
  714. version_gte_3_1_or_1_1 = True
  715. version_gte_3_1_1_or_1_1_1 = True
  716. version_gte_3_2_or_1_2 = True
  717. version_gte_3_3_or_1_3 = False
  718. facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
  719. facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
  720. facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
  721. facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
  722. if version_gte_3_3_or_1_3:
  723. examples_content_version = 'v1.3'
  724. elif version_gte_3_2_or_1_2:
  725. examples_content_version = 'v1.2'
  726. elif version_gte_3_1_or_1_1:
  727. examples_content_version = 'v1.1'
  728. else:
  729. examples_content_version = 'v1.0'
  730. facts['common']['examples_content_version'] = examples_content_version
  731. return facts
  732. def set_manageiq_facts_if_unset(facts):
  733. """ Set manageiq facts. This currently includes common.use_manageiq.
  734. Args:
  735. facts (dict): existing facts
  736. Returns:
  737. dict: the facts dict updated with version facts.
  738. Raises:
  739. OpenShiftFactsInternalError:
  740. """
  741. if 'common' not in facts:
  742. if 'version_gte_3_1_or_1_1' not in facts['common']:
  743. raise OpenShiftFactsInternalError(
  744. "Invalid invocation: The required facts are not set"
  745. )
  746. if 'use_manageiq' not in facts['common']:
  747. facts['common']['use_manageiq'] = facts['common']['version_gte_3_1_or_1_1']
  748. return facts
  749. def set_sdn_facts_if_unset(facts, system_facts):
  750. """ Set sdn facts if not already present in facts dict
  751. Args:
  752. facts (dict): existing facts
  753. system_facts (dict): ansible_facts
  754. Returns:
  755. dict: the facts dict updated with the generated sdn facts if they
  756. were not already present
  757. """
  758. if 'common' in facts:
  759. use_sdn = facts['common']['use_openshift_sdn']
  760. if not (use_sdn == '' or isinstance(use_sdn, bool)):
  761. use_sdn = safe_get_bool(use_sdn)
  762. facts['common']['use_openshift_sdn'] = use_sdn
  763. if 'sdn_network_plugin_name' not in facts['common']:
  764. plugin = 'redhat/openshift-ovs-subnet' if use_sdn else ''
  765. facts['common']['sdn_network_plugin_name'] = plugin
  766. if 'master' in facts:
  767. if 'sdn_cluster_network_cidr' not in facts['master']:
  768. facts['master']['sdn_cluster_network_cidr'] = '10.1.0.0/16'
  769. if 'sdn_host_subnet_length' not in facts['master']:
  770. facts['master']['sdn_host_subnet_length'] = '8'
  771. if 'node' in facts and 'sdn_mtu' not in facts['node']:
  772. node_ip = facts['common']['ip']
  773. # default MTU if interface MTU cannot be detected
  774. facts['node']['sdn_mtu'] = '1450'
  775. for val in system_facts.itervalues():
  776. if isinstance(val, dict) and 'mtu' in val:
  777. mtu = val['mtu']
  778. if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
  779. facts['node']['sdn_mtu'] = str(mtu - 50)
  780. return facts
  781. def set_nodename(facts):
  782. if 'node' in facts and 'common' in facts:
  783. if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
  784. facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
  785. else:
  786. facts['node']['nodename'] = facts['common']['hostname'].lower()
  787. return facts
  788. def migrate_oauth_template_facts(facts):
  789. """
  790. Migrate an old oauth template fact to a newer format if it's present.
  791. The legacy 'oauth_template' fact was just a filename, and assumed you were
  792. setting the 'login' template.
  793. The new pluralized 'oauth_templates' fact is a dict mapping the template
  794. name to a filename.
  795. Simplify the code after this by merging the old fact into the new.
  796. """
  797. if 'master' in facts and 'oauth_template' in facts['master']:
  798. if 'oauth_templates' not in facts['master']:
  799. facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
  800. elif 'login' not in facts['master']['oauth_templates']:
  801. facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
  802. return facts
  803. def format_url(use_ssl, hostname, port, path=''):
  804. """ Format url based on ssl flag, hostname, port and path
  805. Args:
  806. use_ssl (bool): is ssl enabled
  807. hostname (str): hostname
  808. port (str): port
  809. path (str): url path
  810. Returns:
  811. str: The generated url string
  812. """
  813. scheme = 'https' if use_ssl else 'http'
  814. netloc = hostname
  815. if (use_ssl and port != '443') or (not use_ssl and port != '80'):
  816. netloc += ":%s" % port
  817. try:
  818. url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
  819. except AttributeError:
  820. # pylint: disable=undefined-variable
  821. url = urlunparse((scheme, netloc, path, '', '', ''))
  822. return url
  823. def get_current_config(facts):
  824. """ Get current openshift config
  825. Args:
  826. facts (dict): existing facts
  827. Returns:
  828. dict: the facts dict updated with the current openshift config
  829. """
  830. current_config = dict()
  831. roles = [role for role in facts if role not in ['common', 'provider']]
  832. for role in roles:
  833. if 'roles' in current_config:
  834. current_config['roles'].append(role)
  835. else:
  836. current_config['roles'] = [role]
  837. # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
  838. # determine the location of files.
  839. # TODO: I suspect this isn't working right now, but it doesn't prevent
  840. # anything from working properly as far as I can tell, perhaps because
  841. # we override the kubeconfig path everywhere we use it?
  842. # Query kubeconfig settings
  843. kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
  844. if role == 'node':
  845. kubeconfig_dir = os.path.join(
  846. kubeconfig_dir, "node-%s" % facts['common']['hostname']
  847. )
  848. kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
  849. if (os.path.isfile('/usr/bin/openshift')
  850. and os.path.isfile(kubeconfig_path)):
  851. try:
  852. _, output, _ = module.run_command(
  853. ["/usr/bin/openshift", "ex", "config", "view", "-o",
  854. "json", "--kubeconfig=%s" % kubeconfig_path],
  855. check_rc=False
  856. )
  857. config = json.loads(output)
  858. cad = 'certificate-authority-data'
  859. try:
  860. for cluster in config['clusters']:
  861. config['clusters'][cluster][cad] = 'masked'
  862. except KeyError:
  863. pass
  864. try:
  865. for user in config['users']:
  866. config['users'][user][cad] = 'masked'
  867. config['users'][user]['client-key-data'] = 'masked'
  868. except KeyError:
  869. pass
  870. current_config['kubeconfig'] = config
  871. # override pylint broad-except warning, since we do not want
  872. # to bubble up any exceptions if oc config view
  873. # fails
  874. # pylint: disable=broad-except
  875. except Exception:
  876. pass
  877. return current_config
  878. def build_kubelet_args(facts):
  879. """ Build node kubelet_args """
  880. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  881. 'cloudprovider')
  882. if 'node' in facts:
  883. kubelet_args = {}
  884. if 'cloudprovider' in facts:
  885. if 'kind' in facts['cloudprovider']:
  886. if facts['cloudprovider']['kind'] == 'aws':
  887. kubelet_args['cloud-provider'] = ['aws']
  888. kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  889. if facts['cloudprovider']['kind'] == 'openstack':
  890. kubelet_args['cloud-provider'] = ['openstack']
  891. kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  892. if facts['cloudprovider']['kind'] == 'gce':
  893. kubelet_args['cloud-provider'] = ['gce']
  894. if kubelet_args != {}:
  895. facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
  896. return facts
  897. def build_controller_args(facts):
  898. """ Build master controller_args """
  899. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  900. 'cloudprovider')
  901. if 'master' in facts:
  902. controller_args = {}
  903. if 'cloudprovider' in facts:
  904. if 'kind' in facts['cloudprovider']:
  905. if facts['cloudprovider']['kind'] == 'aws':
  906. controller_args['cloud-provider'] = ['aws']
  907. controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  908. if facts['cloudprovider']['kind'] == 'openstack':
  909. controller_args['cloud-provider'] = ['openstack']
  910. controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  911. if facts['cloudprovider']['kind'] == 'gce':
  912. controller_args['cloud-provider'] = ['gce']
  913. if controller_args != {}:
  914. facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
  915. return facts
  916. def build_api_server_args(facts):
  917. """ Build master api_server_args """
  918. cloud_cfg_path = os.path.join(facts['common']['config_base'],
  919. 'cloudprovider')
  920. if 'master' in facts:
  921. api_server_args = {}
  922. if 'cloudprovider' in facts:
  923. if 'kind' in facts['cloudprovider']:
  924. if facts['cloudprovider']['kind'] == 'aws':
  925. api_server_args['cloud-provider'] = ['aws']
  926. api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
  927. if facts['cloudprovider']['kind'] == 'openstack':
  928. api_server_args['cloud-provider'] = ['openstack']
  929. api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
  930. if facts['cloudprovider']['kind'] == 'gce':
  931. api_server_args['cloud-provider'] = ['gce']
  932. if api_server_args != {}:
  933. facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
  934. return facts
  935. def is_service_running(service):
  936. """ Queries systemd through dbus to see if the service is running """
  937. service_running = False
  938. bus = SystemBus()
  939. systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
  940. manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
  941. try:
  942. service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
  943. service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
  944. service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
  945. service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
  946. service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
  947. if service_load_state == 'loaded' and service_active_state == 'active':
  948. service_running = True
  949. except DBusException:
  950. pass
  951. return service_running
  952. def get_version_output(binary, version_cmd):
  953. """ runs and returns the version output for a command """
  954. cmd = []
  955. for item in (binary, version_cmd):
  956. if isinstance(item, list):
  957. cmd.extend(item)
  958. else:
  959. cmd.append(item)
  960. if os.path.isfile(cmd[0]):
  961. _, output, _ = module.run_command(cmd)
  962. return output
  963. def get_docker_version_info():
  964. """ Parses and returns the docker version info """
  965. result = None
  966. if is_service_running('docker'):
  967. version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
  968. if 'Server' in version_info:
  969. result = {
  970. 'api_version': version_info['Server']['API version'],
  971. 'version': version_info['Server']['Version']
  972. }
  973. return result
  974. def get_openshift_version(facts):
  975. """ Get current version of openshift on the host.
  976. Checks a variety of ways ranging from fastest to slowest.
  977. Args:
  978. facts (dict): existing facts
  979. optional cli_image for pulling the version number
  980. Returns:
  981. version: the current openshift version
  982. """
  983. version = None
  984. # No need to run this method repeatedly on a system if we already know the
  985. # version
  986. if 'common' in facts:
  987. if 'version' in facts['common'] and facts['common']['version'] is not None:
  988. return facts['common']['version']
  989. if os.path.isfile('/usr/bin/openshift'):
  990. _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
  991. version = parse_openshift_version(output)
  992. elif 'common' in facts and 'is_containerized' in facts['common']:
  993. version = get_container_openshift_version(facts)
  994. # Handle containerized masters that have not yet been configured as a node.
  995. # This can be very slow and may get re-run multiple times, so we only use this
  996. # if other methods failed to find a version.
  997. if not version and os.path.isfile('/usr/local/bin/openshift'):
  998. _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
  999. version = parse_openshift_version(output)
  1000. return version
  1001. def get_container_openshift_version(facts):
  1002. """
  1003. If containerized, see if we can determine the installed version via the
  1004. systemd environment files.
  1005. """
  1006. for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
  1007. env_path = filename % facts['common']['service_type']
  1008. if not os.path.exists(env_path):
  1009. continue
  1010. with open(env_path) as env_file:
  1011. for line in env_file:
  1012. if line.startswith("IMAGE_VERSION="):
  1013. tag = line[len("IMAGE_VERSION="):].strip()
  1014. # Remove leading "v" and any trailing release info, we just want
  1015. # a version number here:
  1016. version = tag[1:].split("-")[0]
  1017. return version
  1018. return None
  1019. def parse_openshift_version(output):
  1020. """ Apply provider facts to supplied facts dict
  1021. Args:
  1022. string: output of 'openshift version'
  1023. Returns:
  1024. string: the version number
  1025. """
  1026. versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
  1027. ver = versions.get('openshift', '')
  1028. # Remove trailing build number and commit hash from older versions, we need to return a straight
  1029. # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
  1030. ver = ver.split('-')[0]
  1031. return ver
  1032. def apply_provider_facts(facts, provider_facts):
  1033. """ Apply provider facts to supplied facts dict
  1034. Args:
  1035. facts (dict): facts dict to update
  1036. provider_facts (dict): provider facts to apply
  1037. roles: host roles
  1038. Returns:
  1039. dict: the merged facts
  1040. """
  1041. if not provider_facts:
  1042. return facts
  1043. common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
  1044. for h_var, ip_var in common_vars:
  1045. ip_value = provider_facts['network'].get(ip_var)
  1046. if ip_value:
  1047. facts['common'][ip_var] = ip_value
  1048. facts['common'][h_var] = choose_hostname(
  1049. [provider_facts['network'].get(h_var)],
  1050. facts['common'][h_var]
  1051. )
  1052. facts['provider'] = provider_facts
  1053. return facts
  1054. # Disabling pylint too many branches. This function needs refactored
  1055. # but is a very core part of openshift_facts.
  1056. # pylint: disable=too-many-branches
  1057. def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
  1058. """ Recursively merge facts dicts
  1059. Args:
  1060. orig (dict): existing facts
  1061. new (dict): facts to update
  1062. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1063. '.' notation ex: ['master.named_certificates']
  1064. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1065. '.' notation ex: ['master.master_count']
  1066. Returns:
  1067. dict: the merged facts
  1068. """
  1069. additive_facts = ['named_certificates']
  1070. protected_facts = ['ha', 'master_count']
  1071. # Facts we do not ever want to merge. These originate in inventory variables
  1072. # and contain JSON dicts. We don't ever want to trigger a merge
  1073. # here, just completely overwrite with the new if they are present there.
  1074. inventory_json_facts = ['admission_plugin_config',
  1075. 'kube_admission_plugin_config',
  1076. 'image_policy_config']
  1077. facts = dict()
  1078. for key, value in orig.iteritems():
  1079. # Key exists in both old and new facts.
  1080. if key in new:
  1081. if key in inventory_json_facts:
  1082. # Watchout for JSON facts that sometimes load as strings.
  1083. # (can happen if the JSON contains a boolean)
  1084. if isinstance(new[key], basestring):
  1085. facts[key] = yaml.safe_load(new[key])
  1086. else:
  1087. facts[key] = copy.deepcopy(new[key])
  1088. # Continue to recurse if old and new fact is a dictionary.
  1089. elif isinstance(value, dict) and isinstance(new[key], dict):
  1090. # Collect the subset of additive facts to overwrite if
  1091. # key matches. These will be passed to the subsequent
  1092. # merge_facts call.
  1093. relevant_additive_facts = []
  1094. for item in additive_facts_to_overwrite:
  1095. if '.' in item and item.startswith(key + '.'):
  1096. relevant_additive_facts.append(item)
  1097. # Collect the subset of protected facts to overwrite
  1098. # if key matches. These will be passed to the
  1099. # subsequent merge_facts call.
  1100. relevant_protected_facts = []
  1101. for item in protected_facts_to_overwrite:
  1102. if '.' in item and item.startswith(key + '.'):
  1103. relevant_protected_facts.append(item)
  1104. facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
  1105. # Key matches an additive fact and we are not overwriting
  1106. # it so we will append the new value to the existing value.
  1107. elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
  1108. if isinstance(value, list) and isinstance(new[key], list):
  1109. new_fact = []
  1110. for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
  1111. if item not in new_fact:
  1112. new_fact.append(item)
  1113. facts[key] = new_fact
  1114. # Key matches a protected fact and we are not overwriting
  1115. # it so we will determine if it is okay to change this
  1116. # fact.
  1117. elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
  1118. # The master count (int) can only increase unless it
  1119. # has been passed as a protected fact to overwrite.
  1120. if key == 'master_count':
  1121. if int(value) <= int(new[key]):
  1122. facts[key] = copy.deepcopy(new[key])
  1123. else:
  1124. module.fail_json(msg='openshift_facts received a lower value for openshift.master.master_count')
  1125. # ha (bool) can not change unless it has been passed
  1126. # as a protected fact to overwrite.
  1127. if key == 'ha':
  1128. if safe_get_bool(value) != safe_get_bool(new[key]):
  1129. module.fail_json(msg='openshift_facts received a different value for openshift.master.ha')
  1130. else:
  1131. facts[key] = value
  1132. # No other condition has been met. Overwrite the old fact
  1133. # with the new value.
  1134. else:
  1135. facts[key] = copy.deepcopy(new[key])
  1136. # Key isn't in new so add it to facts to keep it.
  1137. else:
  1138. facts[key] = copy.deepcopy(value)
  1139. new_keys = set(new.keys()) - set(orig.keys())
  1140. for key in new_keys:
  1141. # Watchout for JSON facts that sometimes load as strings.
  1142. # (can happen if the JSON contains a boolean)
  1143. if key in inventory_json_facts and isinstance(new[key], basestring):
  1144. facts[key] = yaml.safe_load(new[key])
  1145. else:
  1146. facts[key] = copy.deepcopy(new[key])
  1147. return facts
  1148. def save_local_facts(filename, facts):
  1149. """ Save local facts
  1150. Args:
  1151. filename (str): local facts file
  1152. facts (dict): facts to set
  1153. """
  1154. try:
  1155. fact_dir = os.path.dirname(filename)
  1156. if not os.path.exists(fact_dir):
  1157. os.makedirs(fact_dir)
  1158. with open(filename, 'w') as fact_file:
  1159. fact_file.write(module.jsonify(facts))
  1160. os.chmod(filename, 0o600)
  1161. except (IOError, OSError) as ex:
  1162. raise OpenShiftFactsFileWriteError(
  1163. "Could not create fact file: %s, error: %s" % (filename, ex)
  1164. )
  1165. def get_local_facts_from_file(filename):
  1166. """ Retrieve local facts from fact file
  1167. Args:
  1168. filename (str): local facts file
  1169. Returns:
  1170. dict: the retrieved facts
  1171. """
  1172. local_facts = dict()
  1173. try:
  1174. # Handle conversion of INI style facts file to json style
  1175. ini_facts = ConfigParser.SafeConfigParser()
  1176. ini_facts.read(filename)
  1177. for section in ini_facts.sections():
  1178. local_facts[section] = dict()
  1179. for key, value in ini_facts.items(section):
  1180. local_facts[section][key] = value
  1181. except (ConfigParser.MissingSectionHeaderError,
  1182. ConfigParser.ParsingError):
  1183. try:
  1184. with open(filename, 'r') as facts_file:
  1185. local_facts = json.load(facts_file)
  1186. except (ValueError, IOError):
  1187. pass
  1188. return local_facts
  1189. def sort_unique(alist):
  1190. """ Sorts and de-dupes a list
  1191. Args:
  1192. list: a list
  1193. Returns:
  1194. list: a sorted de-duped list
  1195. """
  1196. alist.sort()
  1197. out = list()
  1198. for i in alist:
  1199. if i not in out:
  1200. out.append(i)
  1201. return out
  1202. def safe_get_bool(fact):
  1203. """ Get a boolean fact safely.
  1204. Args:
  1205. facts: fact to convert
  1206. Returns:
  1207. bool: given fact as a bool
  1208. """
  1209. return bool(strtobool(str(fact)))
  1210. def set_proxy_facts(facts):
  1211. """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
  1212. no_proxy to the more specific builddefaults and builddefaults_git vars.
  1213. 1. http_proxy, https_proxy, no_proxy
  1214. 2. builddefaults_*
  1215. 3. builddefaults_git_*
  1216. Args:
  1217. facts(dict): existing facts
  1218. Returns:
  1219. facts(dict): Updated facts with missing values
  1220. """
  1221. if 'common' in facts:
  1222. common = facts['common']
  1223. if 'http_proxy' in common or 'https_proxy' in common:
  1224. if 'no_proxy' in common and \
  1225. isinstance(common['no_proxy'], basestring):
  1226. common['no_proxy'] = common['no_proxy'].split(",")
  1227. elif 'no_proxy' not in common:
  1228. common['no_proxy'] = []
  1229. if 'generate_no_proxy_hosts' in common and \
  1230. safe_get_bool(common['generate_no_proxy_hosts']):
  1231. if 'no_proxy_internal_hostnames' in common:
  1232. common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
  1233. common['no_proxy'].append('.' + common['dns_domain'])
  1234. # We always add ourselves no matter what
  1235. common['no_proxy'].append(common['hostname'])
  1236. common['no_proxy'] = sort_unique(common['no_proxy'])
  1237. facts['common'] = common
  1238. if 'builddefaults' in facts:
  1239. builddefaults = facts['builddefaults']
  1240. common = facts['common']
  1241. # Copy values from common to builddefaults
  1242. if 'http_proxy' not in builddefaults and 'http_proxy' in common:
  1243. builddefaults['http_proxy'] = common['http_proxy']
  1244. if 'https_proxy' not in builddefaults and 'https_proxy' in common:
  1245. builddefaults['https_proxy'] = common['https_proxy']
  1246. # make no_proxy into a list if it's not
  1247. if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
  1248. builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
  1249. if 'no_proxy' not in builddefaults and 'no_proxy' in common:
  1250. builddefaults['no_proxy'] = common['no_proxy']
  1251. if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
  1252. builddefaults['git_http_proxy'] = builddefaults['http_proxy']
  1253. if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
  1254. builddefaults['git_https_proxy'] = builddefaults['https_proxy']
  1255. # If we're actually defining a proxy config then create kube_admission_plugin_config
  1256. # if it doesn't exist, then merge builddefaults[config] structure
  1257. # into kube_admission_plugin_config
  1258. if 'kube_admission_plugin_config' not in facts['master']:
  1259. facts['master']['kube_admission_plugin_config'] = dict()
  1260. if 'config' in builddefaults and ('http_proxy' in builddefaults or \
  1261. 'https_proxy' in builddefaults):
  1262. facts['master']['kube_admission_plugin_config'].update(builddefaults['config'])
  1263. facts['builddefaults'] = builddefaults
  1264. return facts
  1265. # pylint: disable=too-many-statements
  1266. def set_container_facts_if_unset(facts):
  1267. """ Set containerized facts.
  1268. Args:
  1269. facts (dict): existing facts
  1270. Returns:
  1271. dict: the facts dict updated with the generated containerization
  1272. facts
  1273. """
  1274. deployment_type = facts['common']['deployment_type']
  1275. if deployment_type in ['enterprise', 'openshift-enterprise']:
  1276. master_image = 'openshift3/ose'
  1277. cli_image = master_image
  1278. node_image = 'openshift3/node'
  1279. ovs_image = 'openshift3/openvswitch'
  1280. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1281. pod_image = 'openshift3/ose-pod'
  1282. router_image = 'openshift3/ose-haproxy-router'
  1283. registry_image = 'openshift3/ose-docker-registry'
  1284. deployer_image = 'openshift3/ose-deployer'
  1285. elif deployment_type == 'atomic-enterprise':
  1286. master_image = 'aep3_beta/aep'
  1287. cli_image = master_image
  1288. node_image = 'aep3_beta/node'
  1289. ovs_image = 'aep3_beta/openvswitch'
  1290. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1291. pod_image = 'aep3_beta/aep-pod'
  1292. router_image = 'aep3_beta/aep-haproxy-router'
  1293. registry_image = 'aep3_beta/aep-docker-registry'
  1294. deployer_image = 'aep3_beta/aep-deployer'
  1295. else:
  1296. master_image = 'openshift/origin'
  1297. cli_image = master_image
  1298. node_image = 'openshift/node'
  1299. ovs_image = 'openshift/openvswitch'
  1300. etcd_image = 'registry.access.redhat.com/rhel7/etcd'
  1301. pod_image = 'openshift/origin-pod'
  1302. router_image = 'openshift/origin-haproxy-router'
  1303. registry_image = 'openshift/origin-docker-registry'
  1304. deployer_image = 'openshift/origin-deployer'
  1305. facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
  1306. if 'is_containerized' not in facts['common']:
  1307. facts['common']['is_containerized'] = facts['common']['is_atomic']
  1308. if 'cli_image' not in facts['common']:
  1309. facts['common']['cli_image'] = cli_image
  1310. if 'pod_image' not in facts['common']:
  1311. facts['common']['pod_image'] = pod_image
  1312. if 'router_image' not in facts['common']:
  1313. facts['common']['router_image'] = router_image
  1314. if 'registry_image' not in facts['common']:
  1315. facts['common']['registry_image'] = registry_image
  1316. if 'deployer_image' not in facts['common']:
  1317. facts['common']['deployer_image'] = deployer_image
  1318. if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
  1319. facts['etcd']['etcd_image'] = etcd_image
  1320. if 'master' in facts and 'master_image' not in facts['master']:
  1321. facts['master']['master_image'] = master_image
  1322. if 'node' in facts:
  1323. if 'node_image' not in facts['node']:
  1324. facts['node']['node_image'] = node_image
  1325. if 'ovs_image' not in facts['node']:
  1326. facts['node']['ovs_image'] = ovs_image
  1327. if safe_get_bool(facts['common']['is_containerized']):
  1328. facts['common']['admin_binary'] = '/usr/local/bin/oadm'
  1329. facts['common']['client_binary'] = '/usr/local/bin/oc'
  1330. return facts
  1331. def set_installed_variant_rpm_facts(facts):
  1332. """ Set RPM facts of installed variant
  1333. Args:
  1334. facts (dict): existing facts
  1335. Returns:
  1336. dict: the facts dict updated with installed_variant_rpms
  1337. """
  1338. installed_rpms = []
  1339. for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
  1340. optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
  1341. variant_rpms = [base_rpm] + \
  1342. ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
  1343. ['tuned-profiles-%s-node' % base_rpm]
  1344. for rpm in variant_rpms:
  1345. exit_code, _, _ = module.run_command(['rpm', '-q', rpm])
  1346. if exit_code == 0:
  1347. installed_rpms.append(rpm)
  1348. facts['common']['installed_variant_rpms'] = installed_rpms
  1349. return facts
  1350. class OpenShiftFactsInternalError(Exception):
  1351. """Origin Facts Error"""
  1352. pass
  1353. class OpenShiftFactsUnsupportedRoleError(Exception):
  1354. """Origin Facts Unsupported Role Error"""
  1355. pass
  1356. class OpenShiftFactsFileWriteError(Exception):
  1357. """Origin Facts File Write Error"""
  1358. pass
  1359. class OpenShiftFactsMetadataUnavailableError(Exception):
  1360. """Origin Facts Metadata Unavailable Error"""
  1361. pass
  1362. class OpenShiftFacts(object):
  1363. """ Origin Facts
  1364. Attributes:
  1365. facts (dict): facts for the host
  1366. Args:
  1367. module (AnsibleModule): an AnsibleModule object
  1368. role (str): role for setting local facts
  1369. filename (str): local facts file to use
  1370. local_facts (dict): local facts to set
  1371. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1372. '.' notation ex: ['master.named_certificates']
  1373. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1374. '.' notation ex: ['master.master_count']
  1375. Raises:
  1376. OpenShiftFactsUnsupportedRoleError:
  1377. """
  1378. known_roles = ['builddefaults',
  1379. 'clock',
  1380. 'cloudprovider',
  1381. 'common',
  1382. 'docker',
  1383. 'etcd',
  1384. 'hosted',
  1385. 'loadbalancer',
  1386. 'master',
  1387. 'node']
  1388. # Disabling too-many-arguments, this should be cleaned up as a TODO item.
  1389. # pylint: disable=too-many-arguments,no-value-for-parameter
  1390. def __init__(self, role, filename, local_facts,
  1391. additive_facts_to_overwrite=None,
  1392. openshift_env=None,
  1393. openshift_env_structures=None,
  1394. protected_facts_to_overwrite=None):
  1395. self.changed = False
  1396. self.filename = filename
  1397. if role not in self.known_roles:
  1398. raise OpenShiftFactsUnsupportedRoleError(
  1399. "Role %s is not supported by this module" % role
  1400. )
  1401. self.role = role
  1402. try:
  1403. # ansible-2.1
  1404. # pylint: disable=too-many-function-args,invalid-name
  1405. self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter'])
  1406. for (k, v) in self.system_facts.items():
  1407. self.system_facts["ansible_%s" % k.replace('-', '_')] = v
  1408. except UnboundLocalError:
  1409. # ansible-2.2
  1410. self.system_facts = get_all_facts(module)['ansible_facts']
  1411. self.facts = self.generate_facts(local_facts,
  1412. additive_facts_to_overwrite,
  1413. openshift_env,
  1414. openshift_env_structures,
  1415. protected_facts_to_overwrite)
  1416. def generate_facts(self,
  1417. local_facts,
  1418. additive_facts_to_overwrite,
  1419. openshift_env,
  1420. openshift_env_structures,
  1421. protected_facts_to_overwrite):
  1422. """ Generate facts
  1423. Args:
  1424. local_facts (dict): local_facts for overriding generated defaults
  1425. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1426. '.' notation ex: ['master.named_certificates']
  1427. openshift_env (dict): openshift_env facts for overriding generated defaults
  1428. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1429. '.' notation ex: ['master.master_count']
  1430. Returns:
  1431. dict: The generated facts
  1432. """
  1433. local_facts = self.init_local_facts(local_facts,
  1434. additive_facts_to_overwrite,
  1435. openshift_env,
  1436. openshift_env_structures,
  1437. protected_facts_to_overwrite)
  1438. roles = local_facts.keys()
  1439. if 'common' in local_facts and 'deployment_type' in local_facts['common']:
  1440. deployment_type = local_facts['common']['deployment_type']
  1441. else:
  1442. deployment_type = 'origin'
  1443. if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
  1444. deployment_subtype = local_facts['common']['deployment_subtype']
  1445. else:
  1446. deployment_subtype = 'basic'
  1447. defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
  1448. provider_facts = self.init_provider_facts()
  1449. facts = apply_provider_facts(defaults, provider_facts)
  1450. facts = merge_facts(facts,
  1451. local_facts,
  1452. additive_facts_to_overwrite,
  1453. protected_facts_to_overwrite)
  1454. facts = migrate_oauth_template_facts(facts)
  1455. facts['current_config'] = get_current_config(facts)
  1456. facts = set_url_facts_if_unset(facts)
  1457. facts = set_project_cfg_facts_if_unset(facts)
  1458. facts = set_flannel_facts_if_unset(facts)
  1459. facts = set_nuage_facts_if_unset(facts)
  1460. facts = set_node_schedulability(facts)
  1461. facts = set_selectors(facts)
  1462. facts = set_identity_providers_if_unset(facts)
  1463. facts = set_sdn_facts_if_unset(facts, self.system_facts)
  1464. facts = set_deployment_facts_if_unset(facts)
  1465. facts = set_container_facts_if_unset(facts)
  1466. facts = build_kubelet_args(facts)
  1467. facts = build_controller_args(facts)
  1468. facts = build_api_server_args(facts)
  1469. facts = set_version_facts_if_unset(facts)
  1470. facts = set_dnsmasq_facts_if_unset(facts)
  1471. facts = set_manageiq_facts_if_unset(facts)
  1472. facts = set_aggregate_facts(facts)
  1473. facts = set_etcd_facts_if_unset(facts)
  1474. facts = set_proxy_facts(facts)
  1475. if not safe_get_bool(facts['common']['is_containerized']):
  1476. facts = set_installed_variant_rpm_facts(facts)
  1477. facts = set_nodename(facts)
  1478. return dict(openshift=facts)
  1479. def get_defaults(self, roles, deployment_type, deployment_subtype):
  1480. """ Get default fact values
  1481. Args:
  1482. roles (list): list of roles for this host
  1483. Returns:
  1484. dict: The generated default facts
  1485. """
  1486. defaults = {}
  1487. ip_addr = self.system_facts['ansible_default_ipv4']['address']
  1488. exit_code, output, _ = module.run_command(['hostname', '-f'])
  1489. hostname_f = output.strip() if exit_code == 0 else ''
  1490. hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
  1491. self.system_facts['ansible_fqdn']]
  1492. hostname = choose_hostname(hostname_values, ip_addr)
  1493. defaults['common'] = dict(use_openshift_sdn=True, ip=ip_addr,
  1494. public_ip=ip_addr,
  1495. deployment_type=deployment_type,
  1496. deployment_subtype=deployment_subtype,
  1497. hostname=hostname,
  1498. public_hostname=hostname,
  1499. portal_net='172.30.0.0/16',
  1500. client_binary='oc', admin_binary='oadm',
  1501. dns_domain='cluster.local',
  1502. install_examples=True,
  1503. debug_level=2)
  1504. if 'master' in roles:
  1505. scheduler_predicates = [
  1506. {"name": "MatchNodeSelector"},
  1507. {"name": "PodFitsResources"},
  1508. {"name": "PodFitsPorts"},
  1509. {"name": "NoDiskConflict"},
  1510. {"name": "NoVolumeZoneConflict"},
  1511. {"name": "MaxEBSVolumeCount"},
  1512. {"name": "MaxGCEPDVolumeCount"},
  1513. {"name": "Region", "argument": {"serviceAffinity" : {"labels" : ["region"]}}}
  1514. ]
  1515. scheduler_priorities = [
  1516. {"name": "LeastRequestedPriority", "weight": 1},
  1517. {"name": "SelectorSpreadPriority", "weight": 1},
  1518. {"name": "Zone", "weight" : 2, "argument": {"serviceAntiAffinity" : {"label": "zone"}}}
  1519. ]
  1520. defaults['master'] = dict(api_use_ssl=True, api_port='8443',
  1521. controllers_port='8444',
  1522. console_use_ssl=True,
  1523. console_path='/console',
  1524. console_port='8443', etcd_use_ssl=True,
  1525. etcd_hosts='', etcd_port='4001',
  1526. portal_net='172.30.0.0/16',
  1527. embedded_etcd=True, embedded_kube=True,
  1528. embedded_dns=True,
  1529. bind_addr='0.0.0.0',
  1530. session_max_seconds=3600,
  1531. session_name='ssn',
  1532. session_secrets_file='',
  1533. access_token_max_seconds=86400,
  1534. auth_token_max_seconds=500,
  1535. oauth_grant_method='auto',
  1536. scheduler_predicates=scheduler_predicates,
  1537. scheduler_priorities=scheduler_priorities,
  1538. dynamic_provisioning_enabled=True,
  1539. max_requests_inflight=500)
  1540. if 'node' in roles:
  1541. defaults['node'] = dict(labels={}, annotations={},
  1542. iptables_sync_period='30s',
  1543. local_quota_per_fsgroup="",
  1544. set_node_ip=False)
  1545. if 'docker' in roles:
  1546. docker = dict(disable_push_dockerhub=False,
  1547. hosted_registry_insecure=True,
  1548. options='--log-driver=json-file --log-opt max-size=50m')
  1549. version_info = get_docker_version_info()
  1550. if version_info is not None:
  1551. docker['api_version'] = version_info['api_version']
  1552. docker['version'] = version_info['version']
  1553. docker['gte_1_10'] = LooseVersion(version_info['version']) >= LooseVersion('1.10')
  1554. defaults['docker'] = docker
  1555. if 'clock' in roles:
  1556. exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
  1557. chrony_installed = bool(exit_code == 0)
  1558. defaults['clock'] = dict(
  1559. enabled=True,
  1560. chrony_installed=chrony_installed)
  1561. if 'cloudprovider' in roles:
  1562. defaults['cloudprovider'] = dict(kind=None)
  1563. if 'hosted' in roles or self.role == 'hosted':
  1564. defaults['hosted'] = dict(
  1565. metrics=dict(
  1566. deploy=False,
  1567. duration=7,
  1568. resolution='10s',
  1569. storage=dict(
  1570. kind=None,
  1571. volume=dict(
  1572. name='metrics',
  1573. size='10Gi'
  1574. ),
  1575. nfs=dict(
  1576. directory='/exports',
  1577. options='*(rw,root_squash)'
  1578. ),
  1579. host=None,
  1580. access_modes=['ReadWriteOnce'],
  1581. create_pv=True,
  1582. create_pvc=False
  1583. )
  1584. ),
  1585. logging=dict(
  1586. storage=dict(
  1587. kind=None,
  1588. volume=dict(
  1589. name='logging-es',
  1590. size='10Gi'
  1591. ),
  1592. nfs=dict(
  1593. directory='/exports',
  1594. options='*(rw,root_squash)'
  1595. ),
  1596. host=None,
  1597. access_modes=['ReadWriteOnce'],
  1598. create_pv=True,
  1599. create_pvc=False
  1600. )
  1601. ),
  1602. registry=dict(
  1603. storage=dict(
  1604. kind=None,
  1605. volume=dict(
  1606. name='registry',
  1607. size='5Gi'
  1608. ),
  1609. nfs=dict(
  1610. directory='/exports',
  1611. options='*(rw,root_squash)'),
  1612. host=None,
  1613. access_modes=['ReadWriteMany'],
  1614. create_pv=True,
  1615. create_pvc=True
  1616. )
  1617. ),
  1618. router=dict()
  1619. )
  1620. if 'loadbalancer' in roles:
  1621. loadbalancer = dict(frontend_port='8443',
  1622. default_maxconn='20000',
  1623. global_maxconn='20000',
  1624. limit_nofile='100000')
  1625. defaults['loadbalancer'] = loadbalancer
  1626. return defaults
  1627. def guess_host_provider(self):
  1628. """ Guess the host provider
  1629. Returns:
  1630. dict: The generated default facts for the detected provider
  1631. """
  1632. # TODO: cloud provider facts should probably be submitted upstream
  1633. product_name = self.system_facts['ansible_product_name']
  1634. product_version = self.system_facts['ansible_product_version']
  1635. virt_type = self.system_facts['ansible_virtualization_type']
  1636. virt_role = self.system_facts['ansible_virtualization_role']
  1637. provider = None
  1638. metadata = None
  1639. # TODO: this is not exposed through module_utils/facts.py in ansible,
  1640. # need to create PR for ansible to expose it
  1641. bios_vendor = get_file_content(
  1642. '/sys/devices/virtual/dmi/id/bios_vendor'
  1643. )
  1644. if bios_vendor == 'Google':
  1645. provider = 'gce'
  1646. metadata_url = ('http://metadata.google.internal/'
  1647. 'computeMetadata/v1/?recursive=true')
  1648. headers = {'Metadata-Flavor': 'Google'}
  1649. metadata = get_provider_metadata(metadata_url, True, headers,
  1650. True)
  1651. # Filter sshKeys and serviceAccounts from gce metadata
  1652. if metadata:
  1653. metadata['project']['attributes'].pop('sshKeys', None)
  1654. metadata['instance'].pop('serviceAccounts', None)
  1655. elif (virt_type == 'xen' and virt_role == 'guest'
  1656. and re.match(r'.*\.amazon$', product_version)):
  1657. provider = 'aws'
  1658. metadata_url = 'http://169.254.169.254/latest/meta-data/'
  1659. metadata = get_provider_metadata(metadata_url)
  1660. elif re.search(r'OpenStack', product_name):
  1661. provider = 'openstack'
  1662. metadata_url = ('http://169.254.169.254/openstack/latest/'
  1663. 'meta_data.json')
  1664. metadata = get_provider_metadata(metadata_url, True, None,
  1665. True)
  1666. if metadata:
  1667. ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
  1668. metadata['ec2_compat'] = get_provider_metadata(
  1669. ec2_compat_url
  1670. )
  1671. # disable pylint maybe-no-member because overloaded use of
  1672. # the module name causes pylint to not detect that results
  1673. # is an array or hash
  1674. # pylint: disable=maybe-no-member
  1675. # Filter public_keys and random_seed from openstack metadata
  1676. metadata.pop('public_keys', None)
  1677. metadata.pop('random_seed', None)
  1678. if not metadata['ec2_compat']:
  1679. metadata = None
  1680. return dict(name=provider, metadata=metadata)
  1681. def init_provider_facts(self):
  1682. """ Initialize the provider facts
  1683. Returns:
  1684. dict: The normalized provider facts
  1685. """
  1686. provider_info = self.guess_host_provider()
  1687. provider_facts = normalize_provider_facts(
  1688. provider_info.get('name'),
  1689. provider_info.get('metadata')
  1690. )
  1691. return provider_facts
  1692. @staticmethod
  1693. def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
  1694. """ Split openshift_env facts based on openshift_env structures.
  1695. Args:
  1696. openshift_env_fact (string): the openshift_env fact to split
  1697. ex: 'openshift_cloudprovider_openstack_auth_url'
  1698. openshift_env_structures (list): a list of structures to determine fact keys
  1699. ex: ['openshift.cloudprovider.openstack.*']
  1700. Returns:
  1701. list: a list of keys that represent the fact
  1702. ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
  1703. """
  1704. # By default, we'll split an openshift_env fact by underscores.
  1705. fact_keys = openshift_env_fact.split('_')
  1706. # Determine if any of the provided variable structures match the fact.
  1707. matching_structure = None
  1708. if openshift_env_structures != None:
  1709. for structure in openshift_env_structures:
  1710. if re.match(structure, openshift_env_fact):
  1711. matching_structure = structure
  1712. # Fact didn't match any variable structures so return the default fact keys.
  1713. if matching_structure is None:
  1714. return fact_keys
  1715. final_keys = []
  1716. structure_keys = matching_structure.split('.')
  1717. for structure_key in structure_keys:
  1718. # Matched current key. Add to final keys.
  1719. if structure_key == fact_keys[structure_keys.index(structure_key)]:
  1720. final_keys.append(structure_key)
  1721. # Wildcard means we will be taking everything from here to the end of the fact.
  1722. elif structure_key == '*':
  1723. final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
  1724. # Shouldn't have gotten here, return the fact keys.
  1725. else:
  1726. return fact_keys
  1727. return final_keys
  1728. # Disabling too-many-branches and too-many-locals.
  1729. # This should be cleaned up as a TODO item.
  1730. #pylint: disable=too-many-branches, too-many-locals
  1731. def init_local_facts(self, facts=None,
  1732. additive_facts_to_overwrite=None,
  1733. openshift_env=None,
  1734. openshift_env_structures=None,
  1735. protected_facts_to_overwrite=None):
  1736. """ Initialize the local facts
  1737. Args:
  1738. facts (dict): local facts to set
  1739. additive_facts_to_overwrite (list): additive facts to overwrite in jinja
  1740. '.' notation ex: ['master.named_certificates']
  1741. openshift_env (dict): openshift env facts to set
  1742. protected_facts_to_overwrite (list): protected facts to overwrite in jinja
  1743. '.' notation ex: ['master.master_count']
  1744. Returns:
  1745. dict: The result of merging the provided facts with existing
  1746. local facts
  1747. """
  1748. changed = False
  1749. facts_to_set = dict()
  1750. if facts is not None:
  1751. facts_to_set[self.role] = facts
  1752. if openshift_env != {} and openshift_env != None:
  1753. for fact, value in openshift_env.iteritems():
  1754. oo_env_facts = dict()
  1755. current_level = oo_env_facts
  1756. keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
  1757. if len(keys) > 0 and keys[0] != self.role:
  1758. continue
  1759. for key in keys:
  1760. if key == keys[-1]:
  1761. current_level[key] = value
  1762. elif key not in current_level:
  1763. current_level[key] = dict()
  1764. current_level = current_level[key]
  1765. facts_to_set = merge_facts(orig=facts_to_set,
  1766. new=oo_env_facts,
  1767. additive_facts_to_overwrite=[],
  1768. protected_facts_to_overwrite=[])
  1769. local_facts = get_local_facts_from_file(self.filename)
  1770. migrated_facts = migrate_local_facts(local_facts)
  1771. new_local_facts = merge_facts(migrated_facts,
  1772. facts_to_set,
  1773. additive_facts_to_overwrite,
  1774. protected_facts_to_overwrite)
  1775. if 'docker' in new_local_facts:
  1776. # remove duplicate and empty strings from registry lists
  1777. for cat in ['additional', 'blocked', 'insecure']:
  1778. key = '{0}_registries'.format(cat)
  1779. if key in new_local_facts['docker']:
  1780. val = new_local_facts['docker'][key]
  1781. if isinstance(val, basestring):
  1782. val = [x.strip() for x in val.split(',')]
  1783. new_local_facts['docker'][key] = list(set(val) - set(['']))
  1784. # Convert legacy log_options comma sep string to a list if present:
  1785. if 'log_options' in new_local_facts['docker'] and \
  1786. isinstance(new_local_facts['docker']['log_options'], basestring):
  1787. new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
  1788. new_local_facts = self.remove_empty_facts(new_local_facts)
  1789. if new_local_facts != local_facts:
  1790. self.validate_local_facts(new_local_facts)
  1791. changed = True
  1792. if not module.check_mode:
  1793. save_local_facts(self.filename, new_local_facts)
  1794. self.changed = changed
  1795. return new_local_facts
  1796. def remove_empty_facts(self, facts=None):
  1797. """ Remove empty facts
  1798. Args:
  1799. facts (dict): facts to clean
  1800. """
  1801. facts_to_remove = []
  1802. for fact, value in facts.iteritems():
  1803. if isinstance(facts[fact], dict):
  1804. facts[fact] = self.remove_empty_facts(facts[fact])
  1805. else:
  1806. if value == "" or value == [""] or value is None:
  1807. facts_to_remove.append(fact)
  1808. for fact in facts_to_remove:
  1809. del facts[fact]
  1810. return facts
  1811. def validate_local_facts(self, facts=None):
  1812. """ Validate local facts
  1813. Args:
  1814. facts (dict): local facts to validate
  1815. """
  1816. invalid_facts = dict()
  1817. invalid_facts = self.validate_master_facts(facts, invalid_facts)
  1818. if invalid_facts:
  1819. msg = 'Invalid facts detected:\n'
  1820. for key in invalid_facts.keys():
  1821. msg += '{0}: {1}\n'.format(key, invalid_facts[key])
  1822. module.fail_json(msg=msg,
  1823. changed=self.changed)
  1824. # disabling pylint errors for line-too-long since we're dealing
  1825. # with best effort reduction of error messages here.
  1826. # disabling errors for too-many-branches since we require checking
  1827. # many conditions.
  1828. # pylint: disable=line-too-long, too-many-branches
  1829. @staticmethod
  1830. def validate_master_facts(facts, invalid_facts):
  1831. """ Validate master facts
  1832. Args:
  1833. facts (dict): local facts to validate
  1834. invalid_facts (dict): collected invalid_facts
  1835. Returns:
  1836. dict: Invalid facts
  1837. """
  1838. if 'master' in facts:
  1839. # openshift.master.session_auth_secrets
  1840. if 'session_auth_secrets' in facts['master']:
  1841. session_auth_secrets = facts['master']['session_auth_secrets']
  1842. if not issubclass(type(session_auth_secrets), list):
  1843. invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
  1844. elif 'session_encryption_secrets' not in facts['master']:
  1845. invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
  1846. 'if openshift_master_session_auth_secrets is provided.')
  1847. elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
  1848. invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
  1849. 'openshift_master_session_encryption_secrets must be '
  1850. 'equal length.')
  1851. else:
  1852. for secret in session_auth_secrets:
  1853. if len(secret) < 32:
  1854. invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
  1855. 'Secrets must be at least 32 characters in length.')
  1856. # openshift.master.session_encryption_secrets
  1857. if 'session_encryption_secrets' in facts['master']:
  1858. session_encryption_secrets = facts['master']['session_encryption_secrets']
  1859. if not issubclass(type(session_encryption_secrets), list):
  1860. invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
  1861. elif 'session_auth_secrets' not in facts['master']:
  1862. invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
  1863. 'set if openshift_master_session_encryption_secrets '
  1864. 'is provided.')
  1865. else:
  1866. for secret in session_encryption_secrets:
  1867. if len(secret) not in [16, 24, 32]:
  1868. invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
  1869. 'Secrets must be 16, 24, or 32 characters in length.')
  1870. return invalid_facts
  1871. def main():
  1872. """ main """
  1873. # disabling pylint errors for global-variable-undefined and invalid-name
  1874. # for 'global module' usage, since it is required to use ansible_facts
  1875. # pylint: disable=global-variable-undefined, invalid-name
  1876. global module
  1877. module = AnsibleModule(
  1878. argument_spec=dict(
  1879. role=dict(default='common', required=False,
  1880. choices=OpenShiftFacts.known_roles),
  1881. local_facts=dict(default=None, type='dict', required=False),
  1882. additive_facts_to_overwrite=dict(default=[], type='list', required=False),
  1883. openshift_env=dict(default={}, type='dict', required=False),
  1884. openshift_env_structures=dict(default=[], type='list', required=False),
  1885. protected_facts_to_overwrite=dict(default=[], type='list', required=False)
  1886. ),
  1887. supports_check_mode=True,
  1888. add_file_common_args=True,
  1889. )
  1890. module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter']
  1891. module.params['gather_timeout'] = 10
  1892. module.params['filter'] = '*'
  1893. role = module.params['role']
  1894. local_facts = module.params['local_facts']
  1895. additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
  1896. openshift_env = module.params['openshift_env']
  1897. openshift_env_structures = module.params['openshift_env_structures']
  1898. protected_facts_to_overwrite = module.params['protected_facts_to_overwrite']
  1899. fact_file = '/etc/ansible/facts.d/openshift.fact'
  1900. openshift_facts = OpenShiftFacts(role,
  1901. fact_file,
  1902. local_facts,
  1903. additive_facts_to_overwrite,
  1904. openshift_env,
  1905. openshift_env_structures,
  1906. protected_facts_to_overwrite)
  1907. file_params = module.params.copy()
  1908. file_params['path'] = fact_file
  1909. file_args = module.load_file_common_arguments(file_params)
  1910. changed = module.set_fs_attributes_if_different(file_args,
  1911. openshift_facts.changed)
  1912. return module.exit_json(changed=changed,
  1913. ansible_facts=openshift_facts.facts)
  1914. # ignore pylint errors related to the module_utils import
  1915. # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
  1916. # import module snippets
  1917. from ansible.module_utils.basic import *
  1918. from ansible.module_utils.facts import *
  1919. from ansible.module_utils.urls import *
  1920. if __name__ == '__main__':
  1921. main()