12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294 |
- #!/usr/bin/python
- # pylint: disable=too-many-lines
- # -*- coding: utf-8 -*-
- # Reason: Disable pylint too-many-lines because we don't want to split up this file.
- # Status: Permanently disabled to keep this module as self-contained as possible.
- """Ansible module for retrieving and setting openshift related facts"""
- # pylint: disable=no-name-in-module, import-error, wrong-import-order
- import copy
- import errno
- import json
- import re
- import io
- import os
- import yaml
- import struct
- import socket
- from distutils.util import strtobool
- from distutils.version import LooseVersion
- from ansible.module_utils.six import string_types, text_type
- from ansible.module_utils.six.moves import configparser
- # ignore pylint errors related to the module_utils import
- # pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
- # import module snippets
- from ansible.module_utils.basic import * # noqa: F403
- from ansible.module_utils.facts import * # noqa: F403
- from ansible.module_utils.urls import * # noqa: F403
- from ansible.module_utils.six import iteritems, itervalues
- from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
- from ansible.module_utils._text import to_native
- HAVE_DBUS = False
- try:
- from dbus import SystemBus, Interface
- from dbus.exceptions import DBusException
- HAVE_DBUS = True
- except ImportError:
- pass
- DOCUMENTATION = '''
- ---
- module: openshift_facts
- short_description: Cluster Facts
- author: Jason DeTiberus
- requirements: [ ]
- '''
- EXAMPLES = '''
- '''
- # TODO: We should add a generic migration function that takes source and destination
- # paths and does the right thing rather than one function for common, one for node, etc.
- def migrate_common_facts(facts):
- """ Migrate facts from various roles into common """
- params = {
- 'node': ('portal_net'),
- 'master': ('portal_net')
- }
- if 'common' not in facts:
- facts['common'] = {}
- # pylint: disable=consider-iterating-dictionary
- for role in params.keys():
- if role in facts:
- for param in params[role]:
- if param in facts[role]:
- facts['common'][param] = facts[role].pop(param)
- return facts
- def migrate_node_facts(facts):
- """ Migrate facts from various roles into node """
- params = {
- 'common': ('dns_ip'),
- }
- if 'node' not in facts:
- facts['node'] = {}
- # pylint: disable=consider-iterating-dictionary
- for role in params.keys():
- if role in facts:
- for param in params[role]:
- if param in facts[role]:
- facts['node'][param] = facts[role].pop(param)
- return facts
- def migrate_hosted_facts(facts):
- """ Apply migrations for master facts """
- if 'master' in facts:
- if 'router_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- facts['hosted']['router']['selector'] = facts['master'].pop('router_selector')
- if 'registry_selector' in facts['master']:
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- facts['hosted']['registry']['selector'] = facts['master'].pop('registry_selector')
- return facts
- def migrate_admission_plugin_facts(facts):
- """ Apply migrations for admission plugin facts """
- if 'master' in facts:
- if 'kube_admission_plugin_config' in facts['master']:
- if 'admission_plugin_config' not in facts['master']:
- facts['master']['admission_plugin_config'] = dict()
- # Merge existing kube_admission_plugin_config with admission_plugin_config.
- facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
- facts['master']['kube_admission_plugin_config'],
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
- # Remove kube_admission_plugin_config fact
- facts['master'].pop('kube_admission_plugin_config', None)
- return facts
- def migrate_local_facts(facts):
- """ Apply migrations of local facts """
- migrated_facts = copy.deepcopy(facts)
- migrated_facts = migrate_common_facts(migrated_facts)
- migrated_facts = migrate_node_facts(migrated_facts)
- migrated_facts = migrate_hosted_facts(migrated_facts)
- migrated_facts = migrate_admission_plugin_facts(migrated_facts)
- return migrated_facts
- def first_ip(network):
- """ Return the first IPv4 address in network
- Args:
- network (str): network in CIDR format
- Returns:
- str: first IPv4 address
- """
- atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
- itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
- (address, netmask) = network.split('/')
- netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
- return itoa((atoi(address) & netmask_i) + 1)
- def hostname_valid(hostname):
- """ Test if specified hostname should be considered valid
- Args:
- hostname (str): hostname to test
- Returns:
- bool: True if valid, otherwise False
- """
- if (not hostname or
- hostname.startswith('localhost') or
- hostname.endswith('localdomain') or
- # OpenShift will not allow a node with more than 63 chars in name.
- len(hostname) > 63):
- return False
- return True
- def choose_hostname(hostnames=None, fallback=''):
- """ Choose a hostname from the provided hostnames
- Given a list of hostnames and a fallback value, choose a hostname to
- use. This function will prefer fqdns if they exist (excluding any that
- begin with localhost or end with localdomain) over ip addresses.
- Args:
- hostnames (list): list of hostnames
- fallback (str): default value to set if hostnames does not contain
- a valid hostname
- Returns:
- str: chosen hostname
- """
- hostname = fallback
- if hostnames is None:
- return hostname
- ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
- hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
- for host_list in (hosts, ips):
- for host in host_list:
- if hostname_valid(host):
- return host
- return hostname
- def query_metadata(metadata_url, headers=None, expect_json=False):
- """ Return metadata from the provided metadata_url
- Args:
- metadata_url (str): metadata url
- headers (dict): headers to set for metadata request
- expect_json (bool): does the metadata_url return json
- Returns:
- dict or list: metadata request result
- """
- result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
- if info['status'] != 200:
- raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
- if expect_json:
- return module.from_json(to_native(result.read())) # noqa: F405
- else:
- return [to_native(line.strip()) for line in result.readlines()]
- def walk_metadata(metadata_url, headers=None, expect_json=False):
- """ Walk the metadata tree and return a dictionary of the entire tree
- Args:
- metadata_url (str): metadata url
- headers (dict): headers to set for metadata request
- expect_json (bool): does the metadata_url return json
- Returns:
- dict: the result of walking the metadata tree
- """
- metadata = dict()
- for line in query_metadata(metadata_url, headers, expect_json):
- if line.endswith('/') and not line == 'public-keys/':
- key = line[:-1]
- metadata[key] = walk_metadata(metadata_url + line,
- headers, expect_json)
- else:
- results = query_metadata(metadata_url + line, headers,
- expect_json)
- if len(results) == 1:
- # disable pylint maybe-no-member because overloaded use of
- # the module name causes pylint to not detect that results
- # is an array or hash
- # pylint: disable=maybe-no-member
- metadata[line] = results.pop()
- else:
- metadata[line] = results
- return metadata
- def get_provider_metadata(metadata_url, supports_recursive=False,
- headers=None, expect_json=False):
- """ Retrieve the provider metadata
- Args:
- metadata_url (str): metadata url
- supports_recursive (bool): does the provider metadata api support
- recursion
- headers (dict): headers to set for metadata request
- expect_json (bool): does the metadata_url return json
- Returns:
- dict: the provider metadata
- """
- try:
- if supports_recursive:
- metadata = query_metadata(metadata_url, headers,
- expect_json)
- else:
- metadata = walk_metadata(metadata_url, headers,
- expect_json)
- except OpenShiftFactsMetadataUnavailableError:
- metadata = None
- return metadata
- def normalize_gce_facts(metadata, facts):
- """ Normalize gce facts
- Args:
- metadata (dict): provider metadata
- facts (dict): facts to update
- Returns:
- dict: the result of adding the normalized metadata to the provided
- facts dict
- """
- for interface in metadata['instance']['networkInterfaces']:
- int_info = dict(ips=[interface['ip']], network_type='gce')
- int_info['public_ips'] = [ac['externalIp'] for ac
- in interface['accessConfigs']]
- int_info['public_ips'].extend(interface['forwardedIps'])
- _, _, network_id = interface['network'].rpartition('/')
- int_info['network_id'] = network_id
- facts['network']['interfaces'].append(int_info)
- _, _, zone = metadata['instance']['zone'].rpartition('/')
- facts['zone'] = zone
- # GCE currently only supports a single interface
- facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
- pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
- facts['network']['public_ip'] = pub_ip
- # Split instance hostname from GCE metadata to use the short instance name
- facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
- # TODO: attempt to resolve public_hostname
- facts['network']['public_hostname'] = facts['network']['public_ip']
- return facts
- def normalize_aws_facts(metadata, facts):
- """ Normalize aws facts
- Args:
- metadata (dict): provider metadata
- facts (dict): facts to update
- Returns:
- dict: the result of adding the normalized metadata to the provided
- facts dict
- """
- for interface in sorted(
- metadata['network']['interfaces']['macs'].values(),
- key=lambda x: x['device-number']
- ):
- int_info = dict()
- var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
- for ips_var, int_var in iteritems(var_map):
- ips = interface.get(int_var)
- if isinstance(ips, string_types):
- int_info[ips_var] = [ips]
- else:
- int_info[ips_var] = ips
- if 'vpc-id' in interface:
- int_info['network_type'] = 'vpc'
- else:
- int_info['network_type'] = 'classic'
- if int_info['network_type'] == 'vpc':
- int_info['network_id'] = interface['subnet-id']
- else:
- int_info['network_id'] = None
- facts['network']['interfaces'].append(int_info)
- facts['zone'] = metadata['placement']['availability-zone']
- # TODO: actually attempt to determine default local and public ips
- # by using the ansible default ip fact and the ipv4-associations
- # from the ec2 metadata
- facts['network']['ip'] = metadata.get('local-ipv4')
- facts['network']['public_ip'] = metadata.get('public-ipv4')
- # TODO: verify that local hostname makes sense and is resolvable
- facts['network']['hostname'] = metadata.get('local-hostname')
- # TODO: verify that public hostname makes sense and is resolvable
- facts['network']['public_hostname'] = metadata.get('public-hostname')
- return facts
- def normalize_openstack_facts(metadata, facts):
- """ Normalize openstack facts
- Args:
- metadata (dict): provider metadata
- facts (dict): facts to update
- Returns:
- dict: the result of adding the normalized metadata to the provided
- facts dict
- """
- # openstack ec2 compat api does not support network interfaces and
- # the version tested on did not include the info in the openstack
- # metadata api, should be updated if neutron exposes this.
- facts['zone'] = metadata['availability_zone']
- local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
- facts['network']['ip'] = local_ipv4
- facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
- for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
- ('public_hostname', 'public-hostname', 'public-ipv4')]:
- try:
- if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
- facts['network'][f_var] = metadata['ec2_compat'][h_var]
- else:
- facts['network'][f_var] = metadata['ec2_compat'][ip_var]
- except socket.gaierror:
- facts['network'][f_var] = metadata['ec2_compat'][ip_var]
- return facts
- def normalize_provider_facts(provider, metadata):
- """ Normalize provider facts
- Args:
- provider (str): host provider
- metadata (dict): provider metadata
- Returns:
- dict: the normalized provider facts
- """
- if provider is None or metadata is None:
- return {}
- # TODO: test for ipv6_enabled where possible (gce, aws do not support)
- # and configure ipv6 facts if available
- # TODO: add support for setting user_data if available
- facts = dict(name=provider, metadata=metadata,
- network=dict(interfaces=[], ipv6_enabled=False))
- if provider == 'gce':
- facts = normalize_gce_facts(metadata, facts)
- elif provider == 'aws':
- facts = normalize_aws_facts(metadata, facts)
- elif provider == 'openstack':
- facts = normalize_openstack_facts(metadata, facts)
- return facts
- # pylint: disable=too-many-branches
- def set_selectors(facts):
- """ Set selectors facts if not already present in facts dict
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated selectors
- facts if they were not already present
- """
- selector = "region=infra"
- if 'hosted' not in facts:
- facts['hosted'] = {}
- if 'router' not in facts['hosted']:
- facts['hosted']['router'] = {}
- if 'selector' not in facts['hosted']['router'] or facts['hosted']['router']['selector'] in [None, 'None']:
- facts['hosted']['router']['selector'] = selector
- if 'registry' not in facts['hosted']:
- facts['hosted']['registry'] = {}
- if 'selector' not in facts['hosted']['registry'] or facts['hosted']['registry']['selector'] in [None, 'None']:
- facts['hosted']['registry']['selector'] = selector
- if 'metrics' not in facts['hosted']:
- facts['hosted']['metrics'] = {}
- if 'selector' not in facts['hosted']['metrics'] or facts['hosted']['metrics']['selector'] in [None, 'None']:
- facts['hosted']['metrics']['selector'] = None
- if 'logging' not in facts or not isinstance(facts['logging'], dict):
- facts['logging'] = {}
- if 'selector' not in facts['logging'] or facts['logging']['selector'] in [None, 'None']:
- facts['logging']['selector'] = None
- if 'etcd' not in facts['hosted']:
- facts['hosted']['etcd'] = {}
- if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
- facts['hosted']['etcd']['selector'] = None
- if 'prometheus' not in facts:
- facts['prometheus'] = {}
- if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
- facts['prometheus']['selector'] = None
- if 'alertmanager' not in facts['prometheus']:
- facts['prometheus']['alertmanager'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
- facts['prometheus']['alertmanager']['selector'] = None
- if 'alertbuffer' not in facts['prometheus']:
- facts['prometheus']['alertbuffer'] = {}
- # pylint: disable=line-too-long
- if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
- facts['prometheus']['alertbuffer']['selector'] = None
- return facts
- def set_identity_providers_if_unset(facts):
- """ Set identity_providers fact if not already present in facts dict
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated identity providers
- facts if they were not already present
- """
- if 'master' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'identity_providers' not in facts['master']:
- identity_provider = dict(
- name='allow_all', challenge=True, login=True,
- kind='AllowAllPasswordIdentityProvider'
- )
- if deployment_type == 'openshift-enterprise':
- identity_provider = dict(
- name='deny_all', challenge=True, login=True,
- kind='DenyAllPasswordIdentityProvider'
- )
- facts['master']['identity_providers'] = [identity_provider]
- return facts
- def set_url_facts_if_unset(facts):
- """ Set url facts if not already present in facts dict
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated url facts if they
- were not already present
- """
- if 'master' in facts:
- hostname = facts['common']['hostname']
- cluster_hostname = facts['master'].get('cluster_hostname')
- cluster_public_hostname = facts['master'].get('cluster_public_hostname')
- public_hostname = facts['common']['public_hostname']
- api_hostname = cluster_hostname if cluster_hostname else hostname
- api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
- console_path = facts['master']['console_path']
- etcd_hosts = facts['master']['etcd_hosts']
- use_ssl = dict(
- api=facts['master']['api_use_ssl'],
- public_api=facts['master']['api_use_ssl'],
- loopback_api=facts['master']['api_use_ssl'],
- console=facts['master']['console_use_ssl'],
- public_console=facts['master']['console_use_ssl'],
- etcd=facts['master']['etcd_use_ssl']
- )
- ports = dict(
- api=facts['master']['api_port'],
- public_api=facts['master']['api_port'],
- loopback_api=facts['master']['api_port'],
- console=facts['master']['console_port'],
- public_console=facts['master']['console_port'],
- etcd=facts['master']['etcd_port'],
- )
- etcd_urls = []
- if etcd_hosts != '':
- facts['master']['etcd_port'] = ports['etcd']
- facts['master']['embedded_etcd'] = False
- for host in etcd_hosts:
- etcd_urls.append(format_url(use_ssl['etcd'], host,
- ports['etcd']))
- else:
- etcd_urls = [format_url(use_ssl['etcd'], hostname,
- ports['etcd'])]
- facts['master'].setdefault('etcd_urls', etcd_urls)
- prefix_hosts = [('api', api_hostname),
- ('public_api', api_public_hostname),
- ('loopback_api', hostname)]
- for prefix, host in prefix_hosts:
- facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
- host,
- ports[prefix]))
- r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
- r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
- facts['master'].setdefault('loopback_cluster_name', r_lhn)
- facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
- facts['master'].setdefault('loopback_user', r_lhu)
- prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
- for prefix, host in prefix_hosts:
- facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
- host,
- ports[prefix],
- console_path))
- return facts
- def set_aggregate_facts(facts):
- """ Set aggregate facts
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with aggregated facts
- """
- all_hostnames = set()
- internal_hostnames = set()
- kube_svc_ip = first_ip(facts['common']['portal_net'])
- if 'common' in facts:
- all_hostnames.add(facts['common']['hostname'])
- all_hostnames.add(facts['common']['public_hostname'])
- all_hostnames.add(facts['common']['ip'])
- all_hostnames.add(facts['common']['public_ip'])
- facts['common']['kube_svc_ip'] = kube_svc_ip
- internal_hostnames.add(facts['common']['hostname'])
- internal_hostnames.add(facts['common']['ip'])
- cluster_domain = facts['common']['dns_domain']
- if 'master' in facts:
- if 'cluster_hostname' in facts['master']:
- all_hostnames.add(facts['master']['cluster_hostname'])
- if 'cluster_public_hostname' in facts['master']:
- all_hostnames.add(facts['master']['cluster_public_hostname'])
- svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
- 'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
- 'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
- all_hostnames.update(svc_names)
- internal_hostnames.update(svc_names)
- all_hostnames.add(kube_svc_ip)
- internal_hostnames.add(kube_svc_ip)
- facts['common']['all_hostnames'] = list(all_hostnames)
- facts['common']['internal_hostnames'] = list(internal_hostnames)
- return facts
- def set_etcd_facts_if_unset(facts):
- """
- If using embedded etcd, loads the data directory from master-config.yaml.
- If using standalone etcd, loads ETCD_DATA_DIR from etcd.conf.
- If anything goes wrong parsing these, the fact will not be set.
- """
- if 'master' in facts and safe_get_bool(facts['master']['embedded_etcd']):
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
- if 'etcd_data_dir' not in etcd_facts:
- try:
- # Parse master config to find actual etcd data dir:
- master_cfg_path = os.path.join(facts['common']['config_base'],
- 'master/master-config.yaml')
- master_cfg_f = open(master_cfg_path, 'r')
- config = yaml.safe_load(master_cfg_f.read())
- master_cfg_f.close()
- etcd_facts['etcd_data_dir'] = \
- config['etcdConfig']['storageDirectory']
- facts['etcd'] = etcd_facts
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
- else:
- etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
- # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
- try:
- # Add a fake section for parsing:
- ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
- ini_fp = io.StringIO(ini_str)
- config = configparser.RawConfigParser()
- config.readfp(ini_fp)
- etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
- if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
- etcd_data_dir = etcd_data_dir[1:-1]
- etcd_facts['etcd_data_dir'] = etcd_data_dir
- facts['etcd'] = etcd_facts
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
- return facts
- def set_deployment_facts_if_unset(facts):
- """ Set Facts that vary based on deployment_type. This currently
- includes common.service_type, master.registry_url, node.registry_url,
- node.storage_plugin_deps
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated deployment_type
- facts
- """
- # disabled to avoid breaking up facts related to deployment type into
- # multiple methods for now.
- # pylint: disable=too-many-statements, too-many-branches
- if 'common' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'service_type' not in facts['common']:
- service_type = 'atomic-openshift'
- if deployment_type == 'origin':
- service_type = 'origin'
- facts['common']['service_type'] = service_type
- for role in ('master', 'node'):
- if role in facts:
- deployment_type = facts['common']['deployment_type']
- if 'registry_url' not in facts[role]:
- registry_url = 'openshift/origin-${component}:${version}'
- if deployment_type == 'openshift-enterprise':
- registry_url = 'openshift3/ose-${component}:${version}'
- facts[role]['registry_url'] = registry_url
- if 'master' in facts:
- deployment_type = facts['common']['deployment_type']
- openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
- if 'disabled_features' not in facts['master']:
- if facts['common']['deployment_subtype'] == 'registry':
- facts['master']['disabled_features'] = openshift_features
- if 'node' in facts:
- deployment_type = facts['common']['deployment_type']
- if 'storage_plugin_deps' not in facts['node']:
- facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi']
- return facts
- # pylint: disable=too-many-statements
- def set_version_facts_if_unset(facts):
- """ Set version facts. This currently includes common.version and
- common.version_gte_3_x
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with version facts.
- """
- if 'common' in facts:
- openshift_version = get_openshift_version(facts)
- if openshift_version and openshift_version != "latest":
- version = LooseVersion(openshift_version)
- facts['common']['version'] = openshift_version
- facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
- version_gte_3_6 = version >= LooseVersion('3.6')
- version_gte_3_7 = version >= LooseVersion('3.7')
- version_gte_3_8 = version >= LooseVersion('3.8')
- else:
- # 'Latest' version is set to True, 'Next' versions set to False
- version_gte_3_6 = True
- version_gte_3_7 = True
- version_gte_3_8 = False
- facts['common']['version_gte_3_6'] = version_gte_3_6
- facts['common']['version_gte_3_7'] = version_gte_3_7
- facts['common']['version_gte_3_8'] = version_gte_3_8
- if version_gte_3_8:
- examples_content_version = 'v3.8'
- elif version_gte_3_7:
- examples_content_version = 'v3.7'
- elif version_gte_3_6:
- examples_content_version = 'v3.6'
- else:
- examples_content_version = 'v1.5'
- facts['common']['examples_content_version'] = examples_content_version
- return facts
- def set_sdn_facts_if_unset(facts, system_facts):
- """ Set sdn facts if not already present in facts dict
- Args:
- facts (dict): existing facts
- system_facts (dict): ansible_facts
- Returns:
- dict: the facts dict updated with the generated sdn facts if they
- were not already present
- """
- if 'master' in facts:
- # set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
- # these might be overridden if they exist in the master config file
- sdn_cluster_network_cidr = '10.128.0.0/14'
- sdn_host_subnet_length = '9'
- master_cfg_path = os.path.join(facts['common']['config_base'],
- 'master/master-config.yaml')
- if os.path.isfile(master_cfg_path):
- with open(master_cfg_path, 'r') as master_cfg_f:
- config = yaml.safe_load(master_cfg_f.read())
- if 'networkConfig' in config:
- if 'clusterNetworkCIDR' in config['networkConfig']:
- sdn_cluster_network_cidr = \
- config['networkConfig']['clusterNetworkCIDR']
- if 'hostSubnetLength' in config['networkConfig']:
- sdn_host_subnet_length = \
- config['networkConfig']['hostSubnetLength']
- if 'sdn_cluster_network_cidr' not in facts['master']:
- facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
- if 'sdn_host_subnet_length' not in facts['master']:
- facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
- if 'node' in facts and 'sdn_mtu' not in facts['node']:
- node_ip = facts['common']['ip']
- # default MTU if interface MTU cannot be detected
- facts['node']['sdn_mtu'] = '1450'
- for val in itervalues(system_facts):
- if isinstance(val, dict) and 'mtu' in val:
- mtu = val['mtu']
- if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
- facts['node']['sdn_mtu'] = str(mtu - 50)
- return facts
- def set_nodename(facts):
- """ set nodename """
- if 'node' in facts and 'common' in facts:
- if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
- facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
- # TODO: The openstack cloudprovider nodename setting was too opinionaed.
- # It needs to be generalized before it can be enabled again.
- # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
- # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
- else:
- facts['node']['nodename'] = facts['common']['hostname'].lower()
- return facts
- def migrate_oauth_template_facts(facts):
- """
- Migrate an old oauth template fact to a newer format if it's present.
- The legacy 'oauth_template' fact was just a filename, and assumed you were
- setting the 'login' template.
- The new pluralized 'oauth_templates' fact is a dict mapping the template
- name to a filename.
- Simplify the code after this by merging the old fact into the new.
- """
- if 'master' in facts and 'oauth_template' in facts['master']:
- if 'oauth_templates' not in facts['master']:
- facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
- elif 'login' not in facts['master']['oauth_templates']:
- facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
- return facts
- def format_url(use_ssl, hostname, port, path=''):
- """ Format url based on ssl flag, hostname, port and path
- Args:
- use_ssl (bool): is ssl enabled
- hostname (str): hostname
- port (str): port
- path (str): url path
- Returns:
- str: The generated url string
- """
- scheme = 'https' if use_ssl else 'http'
- netloc = hostname
- if (use_ssl and port != '443') or (not use_ssl and port != '80'):
- netloc += ":%s" % port
- try:
- url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
- except AttributeError:
- # pylint: disable=undefined-variable
- url = urlunparse((scheme, netloc, path, '', '', ''))
- return url
- def get_current_config(facts):
- """ Get current openshift config
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the current openshift config
- """
- current_config = dict()
- roles = [role for role in facts if role not in ['common', 'provider']]
- for role in roles:
- if 'roles' in current_config:
- current_config['roles'].append(role)
- else:
- current_config['roles'] = [role]
- # TODO: parse the /etc/sysconfig/openshift-{master,node} config to
- # determine the location of files.
- # TODO: I suspect this isn't working right now, but it doesn't prevent
- # anything from working properly as far as I can tell, perhaps because
- # we override the kubeconfig path everywhere we use it?
- # Query kubeconfig settings
- kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
- if role == 'node':
- kubeconfig_dir = os.path.join(
- kubeconfig_dir, "node-%s" % facts['common']['hostname']
- )
- kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
- if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
- try:
- _, output, _ = module.run_command( # noqa: F405
- ["/usr/bin/openshift", "ex", "config", "view", "-o",
- "json", "--kubeconfig=%s" % kubeconfig_path],
- check_rc=False
- )
- config = json.loads(output)
- cad = 'certificate-authority-data'
- try:
- for cluster in config['clusters']:
- config['clusters'][cluster][cad] = 'masked'
- except KeyError:
- pass
- try:
- for user in config['users']:
- config['users'][user][cad] = 'masked'
- config['users'][user]['client-key-data'] = 'masked'
- except KeyError:
- pass
- current_config['kubeconfig'] = config
- # override pylint broad-except warning, since we do not want
- # to bubble up any exceptions if oc config view
- # fails
- # pylint: disable=broad-except
- except Exception:
- pass
- return current_config
- def build_kubelet_args(facts):
- """Build node kubelet_args
- In the node-config.yaml file, kubeletArgument sub-keys have their
- values provided as a list. Hence the gratuitous use of ['foo'] below.
- """
- cloud_cfg_path = os.path.join(
- facts['common']['config_base'],
- 'cloudprovider')
- # We only have to do this stuff on hosts that are nodes
- if 'node' in facts:
- # Any changes to the kubeletArguments parameter are stored
- # here first.
- kubelet_args = {}
- if 'cloudprovider' in facts:
- # EVERY cloud is special <3
- if 'kind' in facts['cloudprovider']:
- if facts['cloudprovider']['kind'] == 'aws':
- kubelet_args['cloud-provider'] = ['aws']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- kubelet_args['cloud-provider'] = ['openstack']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
- if facts['cloudprovider']['kind'] == 'gce':
- kubelet_args['cloud-provider'] = ['gce']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
- # Automatically add node-labels to the kubeletArguments
- # parameter. See BZ1359848 for additional details.
- #
- # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848
- if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict):
- # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns
- # into ['foo=bar', 'a=b']
- #
- # On the openshift_node_labels inventory variable we loop
- # over each key-value tuple (from .items()) and join the
- # key to the value with an '=' character, this produces a
- # list.
- #
- # map() seems to be returning an itertools.imap object
- # instead of a list. We cast it to a list ourselves.
- # pylint: disable=unnecessary-lambda
- labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items()))
- if labels_str != '':
- kubelet_args['node-labels'] = labels_str
- # If we've added items to the kubelet_args dict then we need
- # to merge the new items back into the main facts object.
- if kubelet_args != {}:
- facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
- return facts
- def build_controller_args(facts):
- """ Build master controller_args """
- cloud_cfg_path = os.path.join(facts['common']['config_base'],
- 'cloudprovider')
- if 'master' in facts:
- controller_args = {}
- if 'cloudprovider' in facts:
- if 'kind' in facts['cloudprovider']:
- if facts['cloudprovider']['kind'] == 'aws':
- controller_args['cloud-provider'] = ['aws']
- controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- controller_args['cloud-provider'] = ['openstack']
- controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
- if facts['cloudprovider']['kind'] == 'gce':
- controller_args['cloud-provider'] = ['gce']
- controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
- if controller_args != {}:
- facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
- return facts
- def build_api_server_args(facts):
- """ Build master api_server_args """
- cloud_cfg_path = os.path.join(facts['common']['config_base'],
- 'cloudprovider')
- if 'master' in facts:
- api_server_args = {}
- if 'cloudprovider' in facts:
- if 'kind' in facts['cloudprovider']:
- if facts['cloudprovider']['kind'] == 'aws':
- api_server_args['cloud-provider'] = ['aws']
- api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- api_server_args['cloud-provider'] = ['openstack']
- api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
- if facts['cloudprovider']['kind'] == 'gce':
- api_server_args['cloud-provider'] = ['gce']
- api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
- if api_server_args != {}:
- facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
- return facts
- def is_service_running(service):
- """ Queries systemd through dbus to see if the service is running """
- service_running = False
- try:
- bus = SystemBus()
- systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
- manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
- service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
- service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
- service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
- service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
- service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
- if service_load_state == 'loaded' and service_active_state == 'active':
- service_running = True
- except DBusException:
- # TODO: do not swallow exception, as it may be hiding useful debugging
- # information.
- pass
- return service_running
- def rpm_rebuilddb():
- """
- Runs rpm --rebuilddb to ensure the db is in good shape.
- """
- module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
- def get_version_output(binary, version_cmd):
- """ runs and returns the version output for a command """
- cmd = []
- for item in (binary, version_cmd):
- if isinstance(item, list):
- cmd.extend(item)
- else:
- cmd.append(item)
- if os.path.isfile(cmd[0]):
- _, output, _ = module.run_command(cmd) # noqa: F405
- return output
- # We may need this in the future.
- def get_docker_version_info():
- """ Parses and returns the docker version info """
- result = None
- if is_service_running('docker') or is_service_running('container-engine'):
- version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
- if 'Server' in version_info:
- result = {
- 'api_version': version_info['Server']['API version'],
- 'version': version_info['Server']['Version']
- }
- return result
- def get_openshift_version(facts):
- """ Get current version of openshift on the host.
- Checks a variety of ways ranging from fastest to slowest.
- Args:
- facts (dict): existing facts
- optional cli_image for pulling the version number
- Returns:
- version: the current openshift version
- """
- version = None
- # No need to run this method repeatedly on a system if we already know the
- # version
- # TODO: We need a way to force reload this after upgrading bits.
- if 'common' in facts:
- if 'version' in facts['common'] and facts['common']['version'] is not None:
- return chomp_commit_offset(facts['common']['version'])
- if os.path.isfile('/usr/bin/openshift'):
- _, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
- version = parse_openshift_version(output)
- elif 'common' in facts and 'is_containerized' in facts['common']:
- version = get_container_openshift_version(facts)
- # Handle containerized masters that have not yet been configured as a node.
- # This can be very slow and may get re-run multiple times, so we only use this
- # if other methods failed to find a version.
- if not version and os.path.isfile('/usr/local/bin/openshift'):
- _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
- version = parse_openshift_version(output)
- return chomp_commit_offset(version)
- def chomp_commit_offset(version):
- """Chomp any "+git.foo" commit offset string from the given `version`
- and return the modified version string.
- Ex:
- - chomp_commit_offset(None) => None
- - chomp_commit_offset(1337) => "1337"
- - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
- - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
- - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
- """
- if version is None:
- return version
- else:
- # Stringify, just in case it's a Number type. Split by '+' and
- # return the first split. No concerns about strings without a
- # '+', .split() returns an array of the original string.
- return str(version).split('+')[0]
- def get_container_openshift_version(facts):
- """
- If containerized, see if we can determine the installed version via the
- systemd environment files.
- """
- for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
- env_path = filename % facts['common']['service_type']
- if not os.path.exists(env_path):
- continue
- with open(env_path) as env_file:
- for line in env_file:
- if line.startswith("IMAGE_VERSION="):
- tag = line[len("IMAGE_VERSION="):].strip()
- # Remove leading "v" and any trailing release info, we just want
- # a version number here:
- no_v_version = tag[1:] if tag[0] == 'v' else tag
- version = no_v_version.split("-")[0]
- return version
- return None
- def parse_openshift_version(output):
- """ Apply provider facts to supplied facts dict
- Args:
- string: output of 'openshift version'
- Returns:
- string: the version number
- """
- versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
- ver = versions.get('openshift', '')
- # Remove trailing build number and commit hash from older versions, we need to return a straight
- # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
- ver = ver.split('-')[0]
- return ver
- def apply_provider_facts(facts, provider_facts):
- """ Apply provider facts to supplied facts dict
- Args:
- facts (dict): facts dict to update
- provider_facts (dict): provider facts to apply
- roles: host roles
- Returns:
- dict: the merged facts
- """
- if not provider_facts:
- return facts
- common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
- for h_var, ip_var in common_vars:
- ip_value = provider_facts['network'].get(ip_var)
- if ip_value:
- facts['common'][ip_var] = ip_value
- facts['common'][h_var] = choose_hostname(
- [provider_facts['network'].get(h_var)],
- facts['common'][h_var]
- )
- facts['provider'] = provider_facts
- return facts
- # Disabling pylint too many branches. This function needs refactored
- # but is a very core part of openshift_facts.
- # pylint: disable=too-many-branches, too-many-nested-blocks
- def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overwrite):
- """ Recursively merge facts dicts
- Args:
- orig (dict): existing facts
- new (dict): facts to update
- additive_facts_to_overwrite (list): additive facts to overwrite in jinja
- '.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
- Returns:
- dict: the merged facts
- """
- additive_facts = ['named_certificates']
- protected_facts = ['ha']
- # Facts we do not ever want to merge. These originate in inventory variables
- # and contain JSON dicts. We don't ever want to trigger a merge
- # here, just completely overwrite with the new if they are present there.
- inventory_json_facts = ['admission_plugin_config',
- 'kube_admission_plugin_config',
- 'image_policy_config',
- "builddefaults",
- "buildoverrides"]
- facts = dict()
- for key, value in iteritems(orig):
- # Key exists in both old and new facts.
- if key in new:
- if key in inventory_json_facts:
- # Watchout for JSON facts that sometimes load as strings.
- # (can happen if the JSON contains a boolean)
- if isinstance(new[key], string_types):
- facts[key] = yaml.safe_load(new[key])
- else:
- facts[key] = copy.deepcopy(new[key])
- # Continue to recurse if old and new fact is a dictionary.
- elif isinstance(value, dict) and isinstance(new[key], dict):
- # Collect the subset of additive facts to overwrite if
- # key matches. These will be passed to the subsequent
- # merge_facts call.
- relevant_additive_facts = []
- for item in additive_facts_to_overwrite:
- if '.' in item and item.startswith(key + '.'):
- relevant_additive_facts.append(item)
- # Collect the subset of protected facts to overwrite
- # if key matches. These will be passed to the
- # subsequent merge_facts call.
- relevant_protected_facts = []
- for item in protected_facts_to_overwrite:
- if '.' in item and item.startswith(key + '.'):
- relevant_protected_facts.append(item)
- facts[key] = merge_facts(value, new[key], relevant_additive_facts, relevant_protected_facts)
- # Key matches an additive fact and we are not overwriting
- # it so we will append the new value to the existing value.
- elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
- if isinstance(value, list) and isinstance(new[key], list):
- new_fact = []
- for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
- if item not in new_fact:
- new_fact.append(item)
- facts[key] = new_fact
- # Key matches a protected fact and we are not overwriting
- # it so we will determine if it is okay to change this
- # fact.
- elif key in protected_facts and key not in [x.split('.')[-1] for x in protected_facts_to_overwrite]:
- # ha (bool) can not change unless it has been passed
- # as a protected fact to overwrite.
- if key == 'ha':
- if safe_get_bool(value) != safe_get_bool(new[key]):
- # pylint: disable=line-too-long
- module.fail_json(msg='openshift_facts received a different value for openshift.master.ha') # noqa: F405
- else:
- facts[key] = value
- # No other condition has been met. Overwrite the old fact
- # with the new value.
- else:
- facts[key] = copy.deepcopy(new[key])
- # Key isn't in new so add it to facts to keep it.
- else:
- facts[key] = copy.deepcopy(value)
- new_keys = set(new.keys()) - set(orig.keys())
- for key in new_keys:
- # Watchout for JSON facts that sometimes load as strings.
- # (can happen if the JSON contains a boolean)
- if key in inventory_json_facts and isinstance(new[key], string_types):
- facts[key] = yaml.safe_load(new[key])
- else:
- facts[key] = copy.deepcopy(new[key])
- return facts
- def save_local_facts(filename, facts):
- """ Save local facts
- Args:
- filename (str): local facts file
- facts (dict): facts to set
- """
- try:
- fact_dir = os.path.dirname(filename)
- try:
- os.makedirs(fact_dir) # try to make the directory
- except OSError as exception:
- if exception.errno != errno.EEXIST: # but it is okay if it is already there
- raise # pass any other exceptions up the chain
- with open(filename, 'w') as fact_file:
- fact_file.write(module.jsonify(facts)) # noqa: F405
- os.chmod(filename, 0o600)
- except (IOError, OSError) as ex:
- raise OpenShiftFactsFileWriteError(
- "Could not create fact file: %s, error: %s" % (filename, ex)
- )
- def get_local_facts_from_file(filename):
- """ Retrieve local facts from fact file
- Args:
- filename (str): local facts file
- Returns:
- dict: the retrieved facts
- """
- local_facts = dict()
- try:
- # Handle conversion of INI style facts file to json style
- ini_facts = configparser.SafeConfigParser()
- ini_facts.read(filename)
- for section in ini_facts.sections():
- local_facts[section] = dict()
- for key, value in ini_facts.items(section):
- local_facts[section][key] = value
- except (configparser.MissingSectionHeaderError,
- configparser.ParsingError):
- try:
- with open(filename, 'r') as facts_file:
- local_facts = json.load(facts_file)
- except (ValueError, IOError):
- pass
- return local_facts
- def sort_unique(alist):
- """ Sorts and de-dupes a list
- Args:
- list: a list
- Returns:
- list: a sorted de-duped list
- """
- return sorted(list(set(alist)))
- def safe_get_bool(fact):
- """ Get a boolean fact safely.
- Args:
- facts: fact to convert
- Returns:
- bool: given fact as a bool
- """
- return bool(strtobool(str(fact)))
- def set_proxy_facts(facts):
- """ Set global proxy facts
- Args:
- facts(dict): existing facts
- Returns:
- facts(dict): Updated facts with missing values
- """
- if 'common' in facts:
- common = facts['common']
- if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
- if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
- common['no_proxy'] = common['no_proxy'].split(",")
- elif 'no_proxy' not in common:
- common['no_proxy'] = []
- # See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
- # masters behind a proxy need to connect to etcd via IP
- if 'no_proxy_etcd_host_ips' in common:
- if isinstance(common['no_proxy_etcd_host_ips'], string_types):
- common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
- if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
- if 'no_proxy_internal_hostnames' in common:
- common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
- # We always add local dns domain and ourselves no matter what
- common['no_proxy'].append('.' + common['dns_domain'])
- common['no_proxy'].append('.svc')
- common['no_proxy'].append(common['hostname'])
- common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
- facts['common'] = common
- return facts
- def set_builddefaults_facts(facts):
- """ Set build defaults including setting proxy values from http_proxy, https_proxy,
- no_proxy to the more specific builddefaults and builddefaults_git vars.
- 1. http_proxy, https_proxy, no_proxy
- 2. builddefaults_*
- 3. builddefaults_git_*
- Args:
- facts(dict): existing facts
- Returns:
- facts(dict): Updated facts with missing values
- """
- if 'builddefaults' in facts:
- builddefaults = facts['builddefaults']
- common = facts['common']
- # Copy values from common to builddefaults
- if 'http_proxy' not in builddefaults and 'http_proxy' in common:
- builddefaults['http_proxy'] = common['http_proxy']
- if 'https_proxy' not in builddefaults and 'https_proxy' in common:
- builddefaults['https_proxy'] = common['https_proxy']
- if 'no_proxy' not in builddefaults and 'no_proxy' in common:
- builddefaults['no_proxy'] = common['no_proxy']
- # Create git specific facts from generic values, if git specific values are
- # not defined.
- if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
- builddefaults['git_http_proxy'] = builddefaults['http_proxy']
- if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
- builddefaults['git_https_proxy'] = builddefaults['https_proxy']
- if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
- builddefaults['git_no_proxy'] = builddefaults['no_proxy']
- # If we're actually defining a builddefaults config then create admission_plugin_config
- # then merge builddefaults[config] structure into admission_plugin_config
- # 'config' is the 'openshift_builddefaults_json' inventory variable
- if 'config' in builddefaults:
- if 'admission_plugin_config' not in facts['master']:
- # Scaffold out the full expected datastructure
- facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
- facts['master']['admission_plugin_config'].update(builddefaults['config'])
- if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
- delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
- return facts
- def delete_empty_keys(keylist):
- """ Delete dictionary elements from keylist where "value" is empty.
- Args:
- keylist(list): A list of builddefault configuration envs.
- Returns:
- none
- Example:
- keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
- {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
- {'name': 'NO_PROXY', 'value': ''}]
- After calling delete_empty_keys the provided list is modified to become:
- [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
- {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
- """
- count = 0
- for i in range(0, len(keylist)):
- if len(keylist[i - count]['value']) == 0:
- del keylist[i - count]
- count += 1
- def set_buildoverrides_facts(facts):
- """ Set build overrides
- Args:
- facts(dict): existing facts
- Returns:
- facts(dict): Updated facts with missing values
- """
- if 'buildoverrides' in facts:
- buildoverrides = facts['buildoverrides']
- # If we're actually defining a buildoverrides config then create admission_plugin_config
- # then merge buildoverrides[config] structure into admission_plugin_config
- if 'config' in buildoverrides:
- if 'admission_plugin_config' not in facts['master']:
- facts['master']['admission_plugin_config'] = dict()
- facts['master']['admission_plugin_config'].update(buildoverrides['config'])
- return facts
- # pylint: disable=too-many-statements
- def set_container_facts_if_unset(facts):
- """ Set containerized facts.
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated containerization
- facts
- """
- deployment_type = facts['common']['deployment_type']
- if deployment_type == 'openshift-enterprise':
- master_image = 'openshift3/ose'
- node_image = 'openshift3/node'
- ovs_image = 'openshift3/openvswitch'
- pod_image = 'openshift3/ose-pod'
- router_image = 'openshift3/ose-haproxy-router'
- registry_image = 'openshift3/ose-docker-registry'
- deployer_image = 'openshift3/ose-deployer'
- else:
- master_image = 'openshift/origin'
- node_image = 'openshift/node'
- ovs_image = 'openshift/openvswitch'
- pod_image = 'openshift/origin-pod'
- router_image = 'openshift/origin-haproxy-router'
- registry_image = 'openshift/origin-docker-registry'
- deployer_image = 'openshift/origin-deployer'
- facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted')
- if 'is_containerized' not in facts['common']:
- facts['common']['is_containerized'] = facts['common']['is_atomic']
- if 'pod_image' not in facts['common']:
- facts['common']['pod_image'] = pod_image
- if 'router_image' not in facts['common']:
- facts['common']['router_image'] = router_image
- if 'registry_image' not in facts['common']:
- facts['common']['registry_image'] = registry_image
- if 'deployer_image' not in facts['common']:
- facts['common']['deployer_image'] = deployer_image
- if 'master' in facts and 'master_image' not in facts['master']:
- facts['master']['master_image'] = master_image
- facts['master']['master_system_image'] = master_image
- if 'node' in facts:
- if 'node_image' not in facts['node']:
- facts['node']['node_image'] = node_image
- facts['node']['node_system_image'] = node_image
- if 'ovs_image' not in facts['node']:
- facts['node']['ovs_image'] = ovs_image
- facts['node']['ovs_system_image'] = ovs_image
- if safe_get_bool(facts['common']['is_containerized']):
- facts['common']['admin_binary'] = '/usr/local/bin/oadm'
- facts['common']['client_binary'] = '/usr/local/bin/oc'
- return facts
- def set_installed_variant_rpm_facts(facts):
- """ Set RPM facts of installed variant
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with installed_variant_rpms
- """
- installed_rpms = []
- for base_rpm in ['openshift', 'atomic-openshift', 'origin']:
- optional_rpms = ['master', 'node', 'clients', 'sdn-ovs']
- variant_rpms = [base_rpm] + \
- ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \
- ['tuned-profiles-%s-node' % base_rpm]
- for rpm in variant_rpms:
- exit_code, _, _ = module.run_command(['rpm', '-q', rpm]) # noqa: F405
- if exit_code == 0:
- installed_rpms.append(rpm)
- facts['common']['installed_variant_rpms'] = installed_rpms
- return facts
- class OpenShiftFactsInternalError(Exception):
- """Origin Facts Error"""
- pass
- class OpenShiftFactsUnsupportedRoleError(Exception):
- """Origin Facts Unsupported Role Error"""
- pass
- class OpenShiftFactsFileWriteError(Exception):
- """Origin Facts File Write Error"""
- pass
- class OpenShiftFactsMetadataUnavailableError(Exception):
- """Origin Facts Metadata Unavailable Error"""
- pass
- class OpenShiftFacts(object):
- """ Origin Facts
- Attributes:
- facts (dict): facts for the host
- Args:
- module (AnsibleModule): an AnsibleModule object
- role (str): role for setting local facts
- filename (str): local facts file to use
- local_facts (dict): local facts to set
- additive_facts_to_overwrite (list): additive facts to overwrite in jinja
- '.' notation ex: ['master.named_certificates']
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
- Raises:
- OpenShiftFactsUnsupportedRoleError:
- """
- known_roles = ['builddefaults',
- 'buildoverrides',
- 'cloudprovider',
- 'common',
- 'etcd',
- 'hosted',
- 'master',
- 'node',
- 'logging',
- 'loggingops',
- 'metrics',
- 'prometheus']
- # Disabling too-many-arguments, this should be cleaned up as a TODO item.
- # pylint: disable=too-many-arguments,no-value-for-parameter
- def __init__(self, role, filename, local_facts,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
- self.changed = False
- self.filename = filename
- if role not in self.known_roles:
- raise OpenShiftFactsUnsupportedRoleError(
- "Role %s is not supported by this module" % role
- )
- self.role = role
- # Collect system facts and preface each fact with 'ansible_'.
- try:
- # pylint: disable=too-many-function-args,invalid-name
- self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
- additional_facts = {}
- for (k, v) in self.system_facts.items():
- additional_facts["ansible_%s" % k.replace('-', '_')] = v
- self.system_facts.update(additional_facts)
- except UnboundLocalError:
- # ansible-2.2,2.3
- self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
- self.facts = self.generate_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
- def generate_facts(self,
- local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite):
- """ Generate facts
- Args:
- local_facts (dict): local_facts for overriding generated defaults
- additive_facts_to_overwrite (list): additive facts to overwrite in jinja
- '.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift_env facts for overriding generated defaults
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
- Returns:
- dict: The generated facts
- """
- local_facts = self.init_local_facts(local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
- roles = local_facts.keys()
- if 'common' in local_facts and 'deployment_type' in local_facts['common']:
- deployment_type = local_facts['common']['deployment_type']
- else:
- deployment_type = 'origin'
- if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
- deployment_subtype = local_facts['common']['deployment_subtype']
- else:
- deployment_subtype = 'basic'
- defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
- provider_facts = self.init_provider_facts()
- facts = apply_provider_facts(defaults, provider_facts)
- facts = merge_facts(facts,
- local_facts,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
- facts = migrate_oauth_template_facts(facts)
- facts['current_config'] = get_current_config(facts)
- facts = set_url_facts_if_unset(facts)
- facts = set_selectors(facts)
- facts = set_identity_providers_if_unset(facts)
- facts = set_deployment_facts_if_unset(facts)
- facts = set_sdn_facts_if_unset(facts, self.system_facts)
- facts = set_container_facts_if_unset(facts)
- facts = build_kubelet_args(facts)
- facts = build_controller_args(facts)
- facts = build_api_server_args(facts)
- facts = set_version_facts_if_unset(facts)
- facts = set_aggregate_facts(facts)
- facts = set_etcd_facts_if_unset(facts)
- facts = set_proxy_facts(facts)
- facts = set_builddefaults_facts(facts)
- facts = set_buildoverrides_facts(facts)
- if not safe_get_bool(facts['common']['is_containerized']):
- facts = set_installed_variant_rpm_facts(facts)
- facts = set_nodename(facts)
- return dict(openshift=facts)
- def get_defaults(self, roles, deployment_type, deployment_subtype):
- """ Get default fact values
- Args:
- roles (list): list of roles for this host
- Returns:
- dict: The generated default facts
- """
- defaults = {}
- ip_addr = self.system_facts['ansible_default_ipv4']['address']
- exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
- hostname_f = output.strip() if exit_code == 0 else ''
- hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
- self.system_facts['ansible_fqdn']]
- hostname = choose_hostname(hostname_values, ip_addr).lower()
- defaults['common'] = dict(ip=ip_addr,
- public_ip=ip_addr,
- deployment_type=deployment_type,
- deployment_subtype=deployment_subtype,
- hostname=hostname,
- public_hostname=hostname,
- portal_net='172.30.0.0/16',
- client_binary='oc', admin_binary='oadm',
- dns_domain='cluster.local',
- config_base='/etc/origin')
- if 'master' in roles:
- defaults['master'] = dict(api_use_ssl=True, api_port='8443',
- controllers_port='8444',
- console_use_ssl=True,
- console_path='/console',
- console_port='8443', etcd_use_ssl=True,
- etcd_hosts='', etcd_port='4001',
- portal_net='172.30.0.0/16',
- embedded_etcd=True, embedded_kube=True,
- embedded_dns=True,
- bind_addr='0.0.0.0',
- session_max_seconds=3600,
- session_name='ssn',
- session_secrets_file='',
- access_token_max_seconds=86400,
- auth_token_max_seconds=500,
- oauth_grant_method='auto',
- dynamic_provisioning_enabled=True,
- max_requests_inflight=500)
- if 'node' in roles:
- defaults['node'] = dict(labels={}, annotations={},
- iptables_sync_period='30s',
- local_quota_per_fsgroup="",
- set_node_ip=False)
- if 'cloudprovider' in roles:
- defaults['cloudprovider'] = dict(kind=None)
- if 'hosted' in roles or self.role == 'hosted':
- defaults['hosted'] = dict(
- etcd=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='etcd',
- size='1Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- ),
- registry=dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='registry',
- size='5Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'),
- glusterfs=dict(
- endpoints='glusterfs-registry-endpoints',
- path='glusterfs-registry-volume',
- ips=[],
- readOnly=False,
- swap=False,
- swapcopy=True),
- host=None,
- access=dict(
- modes=['ReadWriteMany']
- ),
- create_pv=True,
- create_pvc=True
- )
- ),
- router=dict()
- )
- defaults['logging'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
- defaults['loggingops'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='logging-es-ops',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
- defaults['metrics'] = dict(
- deploy=False,
- duration=7,
- resolution='10s',
- storage=dict(
- kind=None,
- volume=dict(
- name='metrics',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
- defaults['prometheus'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
- defaults['prometheus']['alertmanager'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertmanager',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
- defaults['prometheus']['alertbuffer'] = dict(
- storage=dict(
- kind=None,
- volume=dict(
- name='prometheus-alertbuffer',
- size='10Gi'
- ),
- nfs=dict(
- directory='/exports',
- options='*(rw,root_squash)'
- ),
- host=None,
- access=dict(
- modes=['ReadWriteOnce']
- ),
- create_pv=True,
- create_pvc=False
- )
- )
- return defaults
- def guess_host_provider(self):
- """ Guess the host provider
- Returns:
- dict: The generated default facts for the detected provider
- """
- # TODO: cloud provider facts should probably be submitted upstream
- product_name = self.system_facts['ansible_product_name']
- product_version = self.system_facts['ansible_product_version']
- virt_type = self.system_facts['ansible_virtualization_type']
- virt_role = self.system_facts['ansible_virtualization_role']
- bios_vendor = self.system_facts['ansible_system_vendor']
- provider = None
- metadata = None
- if bios_vendor == 'Google':
- provider = 'gce'
- metadata_url = ('http://metadata.google.internal/'
- 'computeMetadata/v1/?recursive=true')
- headers = {'Metadata-Flavor': 'Google'}
- metadata = get_provider_metadata(metadata_url, True, headers,
- True)
- # Filter sshKeys and serviceAccounts from gce metadata
- if metadata:
- metadata['project']['attributes'].pop('sshKeys', None)
- metadata['instance'].pop('serviceAccounts', None)
- elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
- provider = 'aws'
- metadata_url = 'http://169.254.169.254/latest/meta-data/'
- metadata = get_provider_metadata(metadata_url)
- elif re.search(r'OpenStack', product_name):
- provider = 'openstack'
- metadata_url = ('http://169.254.169.254/openstack/latest/'
- 'meta_data.json')
- metadata = get_provider_metadata(metadata_url, True, None,
- True)
- if metadata:
- ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
- metadata['ec2_compat'] = get_provider_metadata(
- ec2_compat_url
- )
- # disable pylint maybe-no-member because overloaded use of
- # the module name causes pylint to not detect that results
- # is an array or hash
- # pylint: disable=maybe-no-member
- # Filter public_keys and random_seed from openstack metadata
- metadata.pop('public_keys', None)
- metadata.pop('random_seed', None)
- if not metadata['ec2_compat']:
- metadata = None
- return dict(name=provider, metadata=metadata)
- def init_provider_facts(self):
- """ Initialize the provider facts
- Returns:
- dict: The normalized provider facts
- """
- provider_info = self.guess_host_provider()
- provider_facts = normalize_provider_facts(
- provider_info.get('name'),
- provider_info.get('metadata')
- )
- return provider_facts
- @staticmethod
- def split_openshift_env_fact_keys(openshift_env_fact, openshift_env_structures):
- """ Split openshift_env facts based on openshift_env structures.
- Args:
- openshift_env_fact (string): the openshift_env fact to split
- ex: 'openshift_cloudprovider_openstack_auth_url'
- openshift_env_structures (list): a list of structures to determine fact keys
- ex: ['openshift.cloudprovider.openstack.*']
- Returns:
- list: a list of keys that represent the fact
- ex: ['openshift', 'cloudprovider', 'openstack', 'auth_url']
- """
- # By default, we'll split an openshift_env fact by underscores.
- fact_keys = openshift_env_fact.split('_')
- # Determine if any of the provided variable structures match the fact.
- matching_structure = None
- if openshift_env_structures is not None:
- for structure in openshift_env_structures:
- if re.match(structure, openshift_env_fact):
- matching_structure = structure
- # Fact didn't match any variable structures so return the default fact keys.
- if matching_structure is None:
- return fact_keys
- final_keys = []
- structure_keys = matching_structure.split('.')
- for structure_key in structure_keys:
- # Matched current key. Add to final keys.
- if structure_key == fact_keys[structure_keys.index(structure_key)]:
- final_keys.append(structure_key)
- # Wildcard means we will be taking everything from here to the end of the fact.
- elif structure_key == '*':
- final_keys.append('_'.join(fact_keys[structure_keys.index(structure_key):]))
- # Shouldn't have gotten here, return the fact keys.
- else:
- return fact_keys
- return final_keys
- # Disabling too-many-branches and too-many-locals.
- # This should be cleaned up as a TODO item.
- # pylint: disable=too-many-branches, too-many-locals
- def init_local_facts(self, facts=None,
- additive_facts_to_overwrite=None,
- openshift_env=None,
- openshift_env_structures=None,
- protected_facts_to_overwrite=None):
- """ Initialize the local facts
- Args:
- facts (dict): local facts to set
- additive_facts_to_overwrite (list): additive facts to overwrite in jinja
- '.' notation ex: ['master.named_certificates']
- openshift_env (dict): openshift env facts to set
- protected_facts_to_overwrite (list): protected facts to overwrite in jinja
- '.' notation ex: ['master.master_count']
- Returns:
- dict: The result of merging the provided facts with existing
- local facts
- """
- changed = False
- facts_to_set = dict()
- if facts is not None:
- facts_to_set[self.role] = facts
- if openshift_env != {} and openshift_env is not None:
- for fact, value in iteritems(openshift_env):
- oo_env_facts = dict()
- current_level = oo_env_facts
- keys = self.split_openshift_env_fact_keys(fact, openshift_env_structures)[1:]
- if len(keys) > 0 and keys[0] != self.role:
- continue
- # Build a dictionary from the split fact keys.
- # After this loop oo_env_facts is the resultant dictionary.
- # For example:
- # fact = "openshift_metrics_install_metrics"
- # value = 'true'
- # keys = ['metrics', 'install', 'metrics']
- # result = {'metrics': {'install': {'metrics': 'true'}}}
- for i, _ in enumerate(keys):
- # This is the last key. Set the value.
- if i == (len(keys) - 1):
- current_level[keys[i]] = value
- # This is a key other than the last key. Set as
- # dictionary and continue.
- else:
- current_level[keys[i]] = dict()
- current_level = current_level[keys[i]]
- facts_to_set = merge_facts(orig=facts_to_set,
- new=oo_env_facts,
- additive_facts_to_overwrite=[],
- protected_facts_to_overwrite=[])
- local_facts = get_local_facts_from_file(self.filename)
- migrated_facts = migrate_local_facts(local_facts)
- new_local_facts = merge_facts(migrated_facts,
- facts_to_set,
- additive_facts_to_overwrite,
- protected_facts_to_overwrite)
- new_local_facts = self.remove_empty_facts(new_local_facts)
- if new_local_facts != local_facts:
- self.validate_local_facts(new_local_facts)
- changed = True
- if not module.check_mode: # noqa: F405
- save_local_facts(self.filename, new_local_facts)
- self.changed = changed
- return new_local_facts
- def remove_empty_facts(self, facts=None):
- """ Remove empty facts
- Args:
- facts (dict): facts to clean
- """
- facts_to_remove = []
- for fact, value in iteritems(facts):
- if isinstance(facts[fact], dict):
- facts[fact] = self.remove_empty_facts(facts[fact])
- else:
- if value == "" or value == [""] or value is None:
- facts_to_remove.append(fact)
- for fact in facts_to_remove:
- del facts[fact]
- return facts
- def validate_local_facts(self, facts=None):
- """ Validate local facts
- Args:
- facts (dict): local facts to validate
- """
- invalid_facts = dict()
- invalid_facts = self.validate_master_facts(facts, invalid_facts)
- if invalid_facts:
- msg = 'Invalid facts detected:\n'
- # pylint: disable=consider-iterating-dictionary
- for key in invalid_facts.keys():
- msg += '{0}: {1}\n'.format(key, invalid_facts[key])
- module.fail_json(msg=msg, changed=self.changed) # noqa: F405
- # disabling pylint errors for line-too-long since we're dealing
- # with best effort reduction of error messages here.
- # disabling errors for too-many-branches since we require checking
- # many conditions.
- # pylint: disable=line-too-long, too-many-branches
- @staticmethod
- def validate_master_facts(facts, invalid_facts):
- """ Validate master facts
- Args:
- facts (dict): local facts to validate
- invalid_facts (dict): collected invalid_facts
- Returns:
- dict: Invalid facts
- """
- if 'master' in facts:
- # openshift.master.session_auth_secrets
- if 'session_auth_secrets' in facts['master']:
- session_auth_secrets = facts['master']['session_auth_secrets']
- if not issubclass(type(session_auth_secrets), list):
- invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
- elif 'session_encryption_secrets' not in facts['master']:
- invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
- 'if openshift_master_session_auth_secrets is provided.')
- elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
- invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
- 'openshift_master_session_encryption_secrets must be '
- 'equal length.')
- else:
- for secret in session_auth_secrets:
- if len(secret) < 32:
- invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
- 'Secrets must be at least 32 characters in length.')
- # openshift.master.session_encryption_secrets
- if 'session_encryption_secrets' in facts['master']:
- session_encryption_secrets = facts['master']['session_encryption_secrets']
- if not issubclass(type(session_encryption_secrets), list):
- invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
- elif 'session_auth_secrets' not in facts['master']:
- invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
- 'set if openshift_master_session_encryption_secrets '
- 'is provided.')
- else:
- for secret in session_encryption_secrets:
- if len(secret) not in [16, 24, 32]:
- invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
- 'Secrets must be 16, 24, or 32 characters in length.')
- return invalid_facts
- def main():
- """ main """
- # disabling pylint errors for global-variable-undefined and invalid-name
- # for 'global module' usage, since it is required to use ansible_facts
- # pylint: disable=global-variable-undefined, invalid-name
- global module
- module = AnsibleModule( # noqa: F405
- argument_spec=dict(
- role=dict(default='common', required=False,
- choices=OpenShiftFacts.known_roles),
- local_facts=dict(default=None, type='dict', required=False),
- additive_facts_to_overwrite=dict(default=[], type='list', required=False),
- openshift_env=dict(default={}, type='dict', required=False),
- openshift_env_structures=dict(default=[], type='list', required=False),
- protected_facts_to_overwrite=dict(default=[], type='list', required=False)
- ),
- supports_check_mode=True,
- add_file_common_args=True,
- )
- if not HAVE_DBUS:
- module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
- module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
- module.params['gather_timeout'] = 10 # noqa: F405
- module.params['filter'] = '*' # noqa: F405
- role = module.params['role'] # noqa: F405
- local_facts = module.params['local_facts'] # noqa: F405
- additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
- openshift_env = module.params['openshift_env'] # noqa: F405
- openshift_env_structures = module.params['openshift_env_structures'] # noqa: F405
- protected_facts_to_overwrite = module.params['protected_facts_to_overwrite'] # noqa: F405
- fact_file = '/etc/ansible/facts.d/openshift.fact'
- openshift_facts = OpenShiftFacts(role,
- fact_file,
- local_facts,
- additive_facts_to_overwrite,
- openshift_env,
- openshift_env_structures,
- protected_facts_to_overwrite)
- file_params = module.params.copy() # noqa: F405
- file_params['path'] = fact_file
- file_args = module.load_file_common_arguments(file_params) # noqa: F405
- changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
- openshift_facts.changed)
- return module.exit_json(changed=changed, # noqa: F405
- ansible_facts=openshift_facts.facts)
- if __name__ == '__main__':
- main()
|