cli_installer.py 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. # pylint: disable=missing-docstring,no-self-use,no-value-for-parameter,too-many-lines
  2. import logging
  3. import os
  4. import sys
  5. import click
  6. from pkg_resources import parse_version
  7. from ooinstall import openshift_ansible, utils
  8. from ooinstall.oo_config import Host, OOConfig, OOConfigInvalidHostError, Role
  9. from ooinstall.variants import find_variant, get_variant_version_combos
  10. INSTALLER_LOG = logging.getLogger('installer')
  11. INSTALLER_LOG.setLevel(logging.CRITICAL)
  12. INSTALLER_FILE_HANDLER = logging.FileHandler('/tmp/installer.txt')
  13. INSTALLER_FILE_HANDLER.setFormatter(
  14. logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
  15. # Example output:
  16. # 2016-08-23 07:34:58,480 - installer - DEBUG - Going to 'load_system_facts'
  17. INSTALLER_FILE_HANDLER.setLevel(logging.DEBUG)
  18. INSTALLER_LOG.addHandler(INSTALLER_FILE_HANDLER)
  19. DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
  20. QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
  21. DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
  22. UPGRADE_MAPPINGS = {
  23. '3.6': {
  24. 'minor_version': '3.6',
  25. 'minor_playbook': 'v3_6/upgrade.yml',
  26. 'major_playbook': 'v3_7/upgrade.yml',
  27. 'major_version': '3.7',
  28. },
  29. '3.7': {
  30. 'minor_version': '3.7',
  31. 'minor_playbook': 'v3_7/upgrade.yml',
  32. },
  33. }
  34. def validate_ansible_dir(path):
  35. if not path:
  36. raise click.BadParameter('An Ansible path must be provided')
  37. return path
  38. # if not os.path.exists(path)):
  39. # raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
  40. def validate_prompt_hostname(hostname):
  41. if hostname == '' or utils.is_valid_hostname(hostname):
  42. return hostname
  43. raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
  44. def get_ansible_ssh_user():
  45. click.clear()
  46. message = """
  47. This installation process involves connecting to remote hosts via ssh. Any
  48. account may be used. However, if a non-root account is used, then it must have
  49. passwordless sudo access.
  50. """
  51. click.echo(message)
  52. return click.prompt('User for ssh access', default='root')
  53. def get_routingconfig_subdomain():
  54. click.clear()
  55. message = """
  56. You might want to override the default subdomain used for exposed routes. If you don't know what this is, use the default value.
  57. """
  58. click.echo(message)
  59. return click.prompt('New default subdomain (ENTER for none)', default='')
  60. def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
  61. """
  62. Collect host information from user. This will later be filled in using
  63. Ansible.
  64. Returns: a list of host information collected from the user
  65. """
  66. click.clear()
  67. click.echo('*** Host Configuration ***')
  68. message = """
  69. You must now specify the hosts that will compose your OpenShift cluster.
  70. Please enter an IP address or hostname to connect to for each system in the
  71. cluster. You will then be prompted to identify what role you want this system to
  72. serve in the cluster.
  73. OpenShift masters serve the API and web console and coordinate the jobs to run
  74. across the environment. Optionally, you can specify multiple master systems for
  75. a high-availability (HA) deployment. If you choose an HA deployment, then you
  76. are prompted to identify a *separate* system to act as the load balancer for
  77. your cluster once you define all masters and nodes.
  78. Any masters configured as part of this installation process are also
  79. configured as nodes. This enables the master to proxy to pods
  80. from the API. By default, this node is unschedulable, but this can be changed
  81. after installation with the 'oadm manage-node' command.
  82. OpenShift nodes provide the runtime environments for containers. They host the
  83. required services to be managed by the master.
  84. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
  85. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
  86. """
  87. click.echo(message)
  88. hosts = []
  89. roles = set(['master', 'node', 'storage', 'etcd'])
  90. more_hosts = True
  91. num_masters = 0
  92. while more_hosts:
  93. host_props = {}
  94. host_props['roles'] = []
  95. host_props['connect_to'] = click.prompt('Enter hostname or IP address',
  96. value_proc=validate_prompt_hostname)
  97. if not masters_set:
  98. if click.confirm('Will this host be an OpenShift master?'):
  99. host_props['roles'].append('master')
  100. host_props['roles'].append('etcd')
  101. num_masters += 1
  102. if oo_cfg.settings['variant_version'] == '3.0':
  103. masters_set = True
  104. host_props['roles'].append('node')
  105. host_props['containerized'] = False
  106. if oo_cfg.settings['variant_version'] != '3.0':
  107. rpm_or_container = \
  108. click.prompt('Will this host be RPM or Container based (rpm/container)?',
  109. type=click.Choice(['rpm', 'container']),
  110. default='rpm')
  111. if rpm_or_container == 'container':
  112. host_props['containerized'] = True
  113. host_props['new_host'] = existing_env
  114. host = Host(**host_props)
  115. hosts.append(host)
  116. if print_summary:
  117. print_installation_summary(hosts, oo_cfg.settings['variant_version'])
  118. # If we have one master, this is enough for an all-in-one deployment,
  119. # thus we can start asking if you want to proceed. Otherwise we assume
  120. # you must.
  121. if masters_set or num_masters != 2:
  122. more_hosts = click.confirm('Do you want to add additional hosts?')
  123. if num_masters > 2:
  124. master_lb = collect_master_lb(hosts)
  125. if master_lb:
  126. hosts.append(master_lb)
  127. roles.add('master_lb')
  128. else:
  129. set_cluster_hostname(oo_cfg)
  130. if not existing_env:
  131. collect_storage_host(hosts)
  132. return hosts, roles
  133. # pylint: disable=too-many-branches
  134. def print_installation_summary(hosts, version=None, verbose=True):
  135. """
  136. Displays a summary of all hosts configured thus far, and what role each
  137. will play.
  138. Shows total nodes/masters, hints for performing/modifying the deployment
  139. with additional setup, warnings for invalid or sub-optimal configurations.
  140. """
  141. click.clear()
  142. click.echo('*** Installation Summary ***\n')
  143. click.echo('Hosts:')
  144. for host in hosts:
  145. print_host_summary(hosts, host)
  146. masters = [host for host in hosts if host.is_master()]
  147. nodes = [host for host in hosts if host.is_node()]
  148. dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()]
  149. click.echo('')
  150. click.echo('Total OpenShift masters: %s' % len(masters))
  151. click.echo('Total OpenShift nodes: %s' % len(nodes))
  152. if verbose:
  153. if len(masters) == 1 and version != '3.0':
  154. ha_hint_message = """
  155. NOTE: Add a total of 3 or more masters to perform an HA installation."""
  156. click.echo(ha_hint_message)
  157. elif len(masters) == 2:
  158. min_masters_message = """
  159. WARNING: A minimum of 3 masters are required to perform an HA installation.
  160. Please add one more to proceed."""
  161. click.echo(min_masters_message)
  162. elif len(masters) >= 3:
  163. ha_message = """
  164. NOTE: Multiple masters specified, this will be an HA deployment with a separate
  165. etcd cluster. You will be prompted to provide the FQDN of a load balancer and
  166. a host for storage once finished entering hosts.
  167. """
  168. click.echo(ha_message)
  169. dedicated_nodes_message = """
  170. WARNING: Dedicated nodes are recommended for an HA deployment. If no dedicated
  171. nodes are specified, each configured master will be marked as a schedulable
  172. node."""
  173. min_ha_nodes_message = """
  174. WARNING: A minimum of 3 dedicated nodes are recommended for an HA
  175. deployment."""
  176. if len(dedicated_nodes) == 0:
  177. click.echo(dedicated_nodes_message)
  178. elif len(dedicated_nodes) < 3:
  179. click.echo(min_ha_nodes_message)
  180. click.echo('')
  181. def print_host_summary(all_hosts, host):
  182. click.echo("- %s" % host.connect_to)
  183. if host.is_master():
  184. click.echo(" - OpenShift master")
  185. if host.is_node():
  186. if host.is_dedicated_node():
  187. click.echo(" - OpenShift node (Dedicated)")
  188. elif host.is_schedulable_node(all_hosts):
  189. click.echo(" - OpenShift node")
  190. else:
  191. click.echo(" - OpenShift node (Unscheduled)")
  192. if host.is_master_lb():
  193. if host.preconfigured:
  194. click.echo(" - Load Balancer (Preconfigured)")
  195. else:
  196. click.echo(" - Load Balancer (HAProxy)")
  197. if host.is_etcd():
  198. click.echo(" - Etcd")
  199. if host.is_storage():
  200. click.echo(" - Storage")
  201. if host.new_host:
  202. click.echo(" - NEW")
  203. def collect_master_lb(hosts):
  204. """
  205. Get a valid load balancer from the user and append it to the list of
  206. hosts.
  207. Ensure user does not specify a system already used as a master/node as
  208. this is an invalid configuration.
  209. """
  210. message = """
  211. Setting up high-availability masters requires a load balancing solution.
  212. Please provide the FQDN of a host that will be configured as a proxy. This
  213. can be either an existing load balancer configured to balance all masters on
  214. port 8443 or a new host that will have HAProxy installed on it.
  215. If the host provided is not yet configured, a reference HAProxy load
  216. balancer will be installed. It's important to note that while the rest of the
  217. environment will be fault-tolerant, this reference load balancer will not be.
  218. It can be replaced post-installation with a load balancer with the same
  219. hostname.
  220. """
  221. click.echo(message)
  222. host_props = {}
  223. # Using an embedded function here so we have access to the hosts list:
  224. def validate_prompt_lb(hostname):
  225. # Run the standard hostname check first:
  226. hostname = validate_prompt_hostname(hostname)
  227. # Make sure this host wasn't already specified:
  228. for host in hosts:
  229. if host.connect_to == hostname and (host.is_master() or host.is_node()):
  230. raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
  231. 'please specify a separate host' % hostname)
  232. return hostname
  233. lb_hostname = click.prompt('Enter hostname or IP address',
  234. value_proc=validate_prompt_lb)
  235. if lb_hostname:
  236. host_props['connect_to'] = lb_hostname
  237. install_haproxy = \
  238. click.confirm('Should the reference HAProxy load balancer be installed on this host?')
  239. host_props['preconfigured'] = not install_haproxy
  240. host_props['roles'] = ['master_lb']
  241. return Host(**host_props)
  242. else:
  243. return None
  244. def set_cluster_hostname(oo_cfg):
  245. first_master = next((host for host in oo_cfg.deployment.hosts if host.is_master()), None)
  246. message = """
  247. You have chosen to install a single master cluster (non-HA).
  248. In a single master cluster, the cluster host name (Ansible variable openshift_master_cluster_public_hostname) is set by default to the host name of the single master. In a multiple master (HA) cluster, the FQDN of a host must be provided that will be configured as a proxy. This could be either an existing load balancer configured to balance all masters on
  249. port 8443 or a new host that would have HAProxy installed on it.
  250. (Optional)
  251. If you want to override the cluster host name now to something other than the default (the host name of the single master), or if you think you might add masters later to become an HA cluster and want to future proof your cluster host name choice, please provide a FQDN. Otherwise, press ENTER to continue and accept the default.
  252. """
  253. click.echo(message)
  254. cluster_hostname = click.prompt('Enter hostname or IP address',
  255. default=str(first_master))
  256. oo_cfg.deployment.variables['openshift_master_cluster_hostname'] = cluster_hostname
  257. oo_cfg.deployment.variables['openshift_master_cluster_public_hostname'] = cluster_hostname
  258. def collect_storage_host(hosts):
  259. """
  260. Get a valid host for storage from the user and append it to the list of
  261. hosts.
  262. """
  263. message = """
  264. Setting up high-availability masters requires a storage host. Please provide a
  265. host that will be configured as a Registry Storage.
  266. Note: Containerized storage hosts are not currently supported.
  267. """
  268. click.echo(message)
  269. host_props = {}
  270. first_master = next(host for host in hosts if host.is_master())
  271. hostname_or_ip = click.prompt('Enter hostname or IP address',
  272. value_proc=validate_prompt_hostname,
  273. default=first_master.connect_to)
  274. existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
  275. if existing and existing_host.is_node():
  276. existing_host.roles.append('storage')
  277. else:
  278. host_props['connect_to'] = hostname_or_ip
  279. host_props['preconfigured'] = False
  280. host_props['roles'] = ['storage']
  281. storage = Host(**host_props)
  282. hosts.append(storage)
  283. def is_host_already_node_or_master(hostname, hosts):
  284. is_existing = False
  285. existing_host = None
  286. for host in hosts:
  287. if host.connect_to == hostname and (host.is_master() or host.is_node()):
  288. is_existing = True
  289. existing_host = host
  290. return is_existing, existing_host
  291. def confirm_hosts_facts(oo_cfg, callback_facts):
  292. hosts = oo_cfg.deployment.hosts
  293. click.clear()
  294. message = """
  295. The following is a list of the facts gathered from the provided hosts. The
  296. hostname for a system inside the cluster is often different from the hostname
  297. that is resolveable from command-line or web clients, therefore these settings
  298. cannot be validated automatically.
  299. For some cloud providers, the installer is able to gather metadata exposed in
  300. the instance, so reasonable defaults will be provided.
  301. Please confirm that they are correct before moving forward.
  302. """
  303. notes = """
  304. Format:
  305. connect_to,IP,public IP,hostname,public hostname
  306. Notes:
  307. * The installation host is the hostname from the installer's perspective.
  308. * The IP of the host should be the internal IP of the instance.
  309. * The public IP should be the externally accessible IP associated with the instance
  310. * The hostname should resolve to the internal IP from the instances
  311. themselves.
  312. * The public hostname should resolve to the external IP from hosts outside of
  313. the cloud.
  314. """
  315. # For testing purposes we need to click.echo only once, so build up
  316. # the message:
  317. output = message
  318. default_facts_lines = []
  319. default_facts = {}
  320. for host in hosts:
  321. if host.preconfigured:
  322. continue
  323. try:
  324. default_facts[host.connect_to] = {}
  325. host.ip = callback_facts[host.connect_to]["common"]["ip"]
  326. host.public_ip = callback_facts[host.connect_to]["common"]["public_ip"]
  327. host.hostname = callback_facts[host.connect_to]["common"]["hostname"]
  328. host.public_hostname = callback_facts[host.connect_to]["common"]["public_hostname"]
  329. except KeyError:
  330. click.echo("Problem fetching facts from {}".format(host.connect_to))
  331. continue
  332. default_facts_lines.append(",".join([host.connect_to,
  333. host.ip,
  334. host.public_ip,
  335. host.hostname,
  336. host.public_hostname]))
  337. output = "%s\n%s" % (output, ",".join([host.connect_to,
  338. host.ip,
  339. host.public_ip,
  340. host.hostname,
  341. host.public_hostname]))
  342. output = "%s\n%s" % (output, notes)
  343. click.echo(output)
  344. facts_confirmed = click.confirm("Do the above facts look correct?")
  345. if not facts_confirmed:
  346. message = """
  347. Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
  348. """ % oo_cfg.config_path
  349. click.echo(message)
  350. # Make sure we actually write out the config file.
  351. oo_cfg.save_to_disk()
  352. sys.exit(0)
  353. return default_facts
  354. def check_hosts_config(oo_cfg, unattended):
  355. click.clear()
  356. masters = [host for host in oo_cfg.deployment.hosts if host.is_master()]
  357. if len(masters) == 2:
  358. click.echo("A minimum of 3 masters are required for HA deployments.")
  359. sys.exit(1)
  360. if len(masters) > 1:
  361. master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()]
  362. if len(master_lb) > 1:
  363. click.echo('ERROR: More than one master load balancer specified. Only one is allowed.')
  364. sys.exit(1)
  365. elif len(master_lb) == 1:
  366. if master_lb[0].is_master() or master_lb[0].is_node():
  367. click.echo('ERROR: The master load balancer is configured as a master or node. '
  368. 'Please correct this.')
  369. sys.exit(1)
  370. else:
  371. message = """
  372. ERROR: No master load balancer specified in config. You must provide the FQDN
  373. of a load balancer to balance the API (port 8443) on all master hosts.
  374. https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
  375. """
  376. click.echo(message)
  377. sys.exit(1)
  378. dedicated_nodes = [host for host in oo_cfg.deployment.hosts
  379. if host.is_node() and not host.is_master()]
  380. if len(dedicated_nodes) == 0:
  381. message = """
  382. WARNING: No dedicated nodes specified. By default, colocated masters have
  383. their nodes set to unschedulable. If you proceed all nodes will be labelled
  384. as schedulable.
  385. """
  386. if unattended:
  387. click.echo(message)
  388. else:
  389. confirm_continue(message)
  390. return
  391. def get_variant_and_version(multi_master=False):
  392. message = "\nWhich variant would you like to install?\n\n"
  393. i = 1
  394. combos = get_variant_version_combos()
  395. for (variant, _) in combos:
  396. message = "%s\n(%s) %s" % (message, i, variant.description)
  397. i = i + 1
  398. message = "%s\n" % message
  399. click.echo(message)
  400. if multi_master:
  401. click.echo('NOTE: 3.0 installations are not')
  402. response = click.prompt("Choose a variant from above: ", default=1)
  403. product, version = combos[response - 1]
  404. return product, version
  405. def confirm_continue(message):
  406. if message:
  407. click.echo(message)
  408. click.confirm("Are you ready to continue?", default=False, abort=True)
  409. return
  410. def error_if_missing_info(oo_cfg):
  411. missing_info = False
  412. if not oo_cfg.deployment.hosts:
  413. missing_info = True
  414. click.echo('For unattended installs, hosts must be specified on the '
  415. 'command line or in the config file: %s' % oo_cfg.config_path)
  416. sys.exit(1)
  417. if 'ansible_ssh_user' not in oo_cfg.deployment.variables:
  418. click.echo("Must specify ansible_ssh_user in configuration file.")
  419. sys.exit(1)
  420. # Lookup a variant based on the key we were given:
  421. if not oo_cfg.settings['variant']:
  422. click.echo("No variant specified in configuration file.")
  423. sys.exit(1)
  424. ver = None
  425. if 'variant_version' in oo_cfg.settings:
  426. ver = oo_cfg.settings['variant_version']
  427. variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
  428. if variant is None or version is None:
  429. err_variant_name = oo_cfg.settings['variant']
  430. if ver:
  431. err_variant_name = "%s %s" % (err_variant_name, ver)
  432. click.echo("%s is not an installable variant." % err_variant_name)
  433. sys.exit(1)
  434. oo_cfg.settings['variant_version'] = version.name
  435. # check that all listed host roles are included
  436. listed_roles = oo_cfg.get_host_roles_set()
  437. configured_roles = set([role for role in oo_cfg.deployment.roles])
  438. if listed_roles != configured_roles:
  439. missing_info = True
  440. click.echo('Any roles assigned to hosts must be defined.')
  441. if missing_info:
  442. sys.exit(1)
  443. def get_proxy_hosts_excludes():
  444. message = """
  445. If a proxy is needed to reach HTTP and HTTPS traffic, please enter the
  446. name below. This proxy will be configured by default for all processes
  447. that need to reach systems outside the cluster. An example proxy value
  448. would be:
  449. http://proxy.example.com:8080/
  450. More advanced configuration is possible if using Ansible directly:
  451. https://docs.openshift.com/enterprise/latest/install_config/http_proxies.html
  452. """
  453. click.echo(message)
  454. message = "Specify your http proxy ? (ENTER for none)"
  455. http_proxy_hostname = click.prompt(message, default='')
  456. # TODO: Fix this prompt message and behavior. 'ENTER' will default
  457. # to the http_proxy_hostname if one was provided
  458. message = "Specify your https proxy ? (ENTER for none)"
  459. https_proxy_hostname = click.prompt(message, default=http_proxy_hostname)
  460. if http_proxy_hostname or https_proxy_hostname:
  461. message = """
  462. All hosts in your OpenShift inventory will automatically be added to the NO_PROXY value.
  463. Please provide any additional hosts to be added to NO_PROXY. (ENTER for none)
  464. """
  465. proxy_excludes = click.prompt(message, default='')
  466. else:
  467. proxy_excludes = ''
  468. return http_proxy_hostname, https_proxy_hostname, proxy_excludes
  469. def get_missing_info_from_user(oo_cfg):
  470. """ Prompts the user for any information missing from the given configuration. """
  471. click.clear()
  472. message = """
  473. Welcome to the OpenShift Enterprise 3 installation.
  474. Please confirm that following prerequisites have been met:
  475. * All systems where OpenShift will be installed are running Red Hat Enterprise
  476. Linux 7.
  477. * All systems are properly subscribed to the required OpenShift Enterprise 3
  478. repositories.
  479. * All systems have run docker-storage-setup (part of the Red Hat docker RPM).
  480. * All systems have working DNS that resolves not only from the perspective of
  481. the installer, but also from within the cluster.
  482. When the process completes you will have a default configuration for masters
  483. and nodes. For ongoing environment maintenance it's recommended that the
  484. official Ansible playbooks be used.
  485. For more information on installation prerequisites please see:
  486. https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
  487. """
  488. confirm_continue(message)
  489. click.clear()
  490. if not oo_cfg.deployment.variables.get('ansible_ssh_user', False):
  491. oo_cfg.deployment.variables['ansible_ssh_user'] = get_ansible_ssh_user()
  492. click.clear()
  493. if not oo_cfg.settings.get('variant', ''):
  494. variant, version = get_variant_and_version()
  495. oo_cfg.settings['variant'] = variant.name
  496. oo_cfg.settings['variant_version'] = version.name
  497. oo_cfg.settings['variant_subtype'] = version.subtype
  498. click.clear()
  499. if not oo_cfg.deployment.hosts:
  500. oo_cfg.deployment.hosts, roles = collect_hosts(oo_cfg)
  501. set_infra_nodes(oo_cfg.deployment.hosts)
  502. for role in roles:
  503. oo_cfg.deployment.roles[role] = Role(name=role, variables={})
  504. click.clear()
  505. if 'master_routingconfig_subdomain' not in oo_cfg.deployment.variables:
  506. oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \
  507. get_routingconfig_subdomain()
  508. click.clear()
  509. # Are any proxy vars already presisted?
  510. proxy_vars = ['proxy_exclude_hosts', 'proxy_https', 'proxy_http']
  511. # Empty list if NO proxy vars were presisted
  512. saved_proxy_vars = [pv for pv in proxy_vars
  513. if oo_cfg.deployment.variables.get(pv, 'UNSET') is not 'UNSET']
  514. INSTALLER_LOG.debug("Evaluated proxy settings, found %s presisted values",
  515. len(saved_proxy_vars))
  516. current_version = parse_version(
  517. oo_cfg.settings.get('variant_version', '0.0'))
  518. min_version = parse_version('3.2')
  519. # No proxy vars were saved and we are running a version which
  520. # recognizes proxy parameters. We must prompt the user for values
  521. # if this conditional is true.
  522. if not saved_proxy_vars and current_version >= min_version:
  523. INSTALLER_LOG.debug("Prompting user to enter proxy values")
  524. http_proxy, https_proxy, proxy_excludes = get_proxy_hosts_excludes()
  525. oo_cfg.deployment.variables['proxy_http'] = http_proxy
  526. oo_cfg.deployment.variables['proxy_https'] = https_proxy
  527. oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes
  528. click.clear()
  529. return oo_cfg
  530. def collect_new_nodes(oo_cfg):
  531. click.clear()
  532. click.echo('*** New Node Configuration ***')
  533. message = """
  534. Add new nodes here
  535. """
  536. click.echo(message)
  537. new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
  538. return new_nodes
  539. def get_installed_hosts(hosts, callback_facts):
  540. installed_hosts = []
  541. uninstalled_hosts = []
  542. for host in [h for h in hosts if h.is_master() or h.is_node()]:
  543. if host.connect_to in callback_facts.keys():
  544. if is_installed_host(host, callback_facts):
  545. INSTALLER_LOG.debug("%s is already installed", str(host))
  546. installed_hosts.append(host)
  547. else:
  548. INSTALLER_LOG.debug("%s is not installed", str(host))
  549. uninstalled_hosts.append(host)
  550. return installed_hosts, uninstalled_hosts
  551. def is_installed_host(host, callback_facts):
  552. version_found = 'common' in callback_facts[host.connect_to].keys() and \
  553. callback_facts[host.connect_to]['common'].get('version', '') and \
  554. callback_facts[host.connect_to]['common'].get('version', '') != 'None'
  555. return version_found
  556. def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
  557. """
  558. We get here once there are hosts in oo_cfg and we need to find out what
  559. state they are in. There are several different cases that might occur:
  560. 1. All hosts in oo_cfg are uninstalled. In this case, we should proceed
  561. with a normal installation.
  562. 2. All hosts in oo_cfg are installed. In this case, ask the user if they
  563. want to force reinstall or exit. We can also hint in this case about
  564. the scaleup workflow.
  565. 3. Some hosts are installed and some are uninstalled. In this case, prompt
  566. the user if they want to force (re)install all hosts specified or direct
  567. them to the scaleup workflow and exit.
  568. """
  569. hosts_to_run_on = []
  570. # Check if master or nodes already have something installed
  571. installed_hosts, uninstalled_hosts = get_installed_hosts(oo_cfg.deployment.hosts,
  572. callback_facts)
  573. nodes = [host for host in oo_cfg.deployment.hosts if host.is_node()]
  574. masters_and_nodes = [host for host in oo_cfg.deployment.hosts if host.is_master() or host.is_node()]
  575. in_hosts = [str(h) for h in installed_hosts]
  576. un_hosts = [str(h) for h in uninstalled_hosts]
  577. all_hosts = [str(h) for h in oo_cfg.deployment.hosts]
  578. m_and_n = [str(h) for h in masters_and_nodes]
  579. INSTALLER_LOG.debug("installed hosts: %s", ", ".join(in_hosts))
  580. INSTALLER_LOG.debug("uninstalled hosts: %s", ", ".join(un_hosts))
  581. INSTALLER_LOG.debug("deployment hosts: %s", ", ".join(all_hosts))
  582. INSTALLER_LOG.debug("masters and nodes: %s", ", ".join(m_and_n))
  583. # Case (1): All uninstalled hosts
  584. if len(uninstalled_hosts) == len(nodes):
  585. click.echo('All hosts in config are uninstalled. Proceeding with installation...')
  586. hosts_to_run_on = list(oo_cfg.deployment.hosts)
  587. else:
  588. # Case (2): All installed hosts
  589. if len(installed_hosts) == len(masters_and_nodes):
  590. message = """
  591. All specified hosts in specified environment are installed.
  592. """
  593. # Case (3): Some installed, some uninstalled
  594. else:
  595. message = """
  596. A mix of installed and uninstalled hosts have been detected in your environment.
  597. Please make sure your environment was installed successfully before adding new nodes.
  598. """
  599. # Still inside the case 2/3 else condition
  600. mixed_msg = """
  601. \tInstalled hosts:
  602. \t\t{inst_hosts}
  603. \tUninstalled hosts:
  604. \t\t{uninst_hosts}""".format(inst_hosts=", ".join(in_hosts), uninst_hosts=", ".join(un_hosts))
  605. click.echo(mixed_msg)
  606. # Out of the case 2/3 if/else
  607. click.echo(message)
  608. if not unattended:
  609. response = click.confirm('Do you want to (re)install the environment?\n\n'
  610. 'Note: This will potentially erase any custom changes.')
  611. if response:
  612. hosts_to_run_on = list(oo_cfg.deployment.hosts)
  613. force = True
  614. elif unattended and force:
  615. hosts_to_run_on = list(oo_cfg.deployment.hosts)
  616. if not force:
  617. message = """
  618. If you want to force reinstall of your environment, run:
  619. `atomic-openshift-installer install --force`
  620. If you want to add new nodes to this environment, run:
  621. `atomic-openshift-installer scaleup`
  622. """
  623. click.echo(message)
  624. sys.exit(1)
  625. return hosts_to_run_on, callback_facts
  626. def set_infra_nodes(hosts):
  627. if all(host.is_master() for host in hosts):
  628. infra_list = hosts
  629. else:
  630. nodes_list = [host for host in hosts if host.is_schedulable_node(hosts)]
  631. infra_list = nodes_list[:2]
  632. for host in infra_list:
  633. host.node_labels = "{'region': 'infra'}"
  634. def run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory):
  635. # Write Ansible inventory file to disk:
  636. inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
  637. click.echo()
  638. click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path)
  639. click.echo("Wrote Ansible inventory: %s" % inventory_file)
  640. click.echo()
  641. if gen_inventory:
  642. sys.exit(0)
  643. click.echo('Ready to run installation process.')
  644. message = """
  645. If changes are needed please edit the installer.cfg.yml config file above and re-run.
  646. """
  647. if not unattended:
  648. confirm_continue(message)
  649. error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,
  650. hosts_to_run_on, verbose)
  651. if error:
  652. # The bootstrap script will print out the log location.
  653. message = """
  654. An error was detected. After resolving the problem please relaunch the
  655. installation process.
  656. """
  657. click.echo(message)
  658. sys.exit(1)
  659. else:
  660. message = """
  661. The installation was successful!
  662. If this is your first time installing please take a look at the Administrator
  663. Guide for advanced options related to routing, storage, authentication, and
  664. more:
  665. http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
  666. """
  667. click.echo(message)
  668. @click.group(context_settings=dict(max_content_width=120))
  669. @click.pass_context
  670. @click.option('--unattended', '-u', is_flag=True, default=False)
  671. @click.option('--configuration', '-c',
  672. type=click.Path(file_okay=True,
  673. dir_okay=False,
  674. writable=True,
  675. readable=True),
  676. default=None)
  677. @click.option('--ansible-playbook-directory',
  678. '-a',
  679. type=click.Path(exists=True,
  680. file_okay=False,
  681. dir_okay=True,
  682. readable=True),
  683. # callback=validate_ansible_dir,
  684. default=DEFAULT_PLAYBOOK_DIR,
  685. envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
  686. @click.option('--ansible-log-path',
  687. type=click.Path(file_okay=True,
  688. dir_okay=False,
  689. writable=True,
  690. readable=True),
  691. default="/tmp/ansible.log")
  692. @click.option('-v', '--verbose',
  693. is_flag=True, default=False)
  694. @click.option('-d', '--debug',
  695. help="Enable installer debugging (/tmp/installer.log)",
  696. is_flag=True, default=False)
  697. @click.help_option('--help', '-h')
  698. # pylint: disable=too-many-arguments
  699. # pylint: disable=line-too-long
  700. # Main CLI entrypoint, not much we can do about too many arguments.
  701. def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_log_path, verbose, debug):
  702. """
  703. atomic-openshift-installer makes the process for installing OSE or AEP
  704. easier by interactively gathering the data needed to run on each host.
  705. It can also be run in unattended mode if provided with a configuration file.
  706. Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
  707. """
  708. if debug:
  709. # DEFAULT log level threshold is set to CRITICAL (the
  710. # highest), anything below that (we only use debug/warning
  711. # presently) is not logged. If '-d' is given though, we'll
  712. # lower the threshold to debug (almost everything gets through)
  713. INSTALLER_LOG.setLevel(logging.DEBUG)
  714. INSTALLER_LOG.debug("Quick Installer debugging initialized")
  715. ctx.obj = {}
  716. ctx.obj['unattended'] = unattended
  717. ctx.obj['configuration'] = configuration
  718. ctx.obj['ansible_log_path'] = ansible_log_path
  719. ctx.obj['verbose'] = verbose
  720. try:
  721. oo_cfg = OOConfig(ctx.obj['configuration'])
  722. except OOConfigInvalidHostError as err:
  723. click.echo(err)
  724. sys.exit(1)
  725. # If no playbook dir on the CLI, check the config:
  726. if not ansible_playbook_directory:
  727. ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
  728. # If still no playbook dir, check for the default location:
  729. if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
  730. ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
  731. validate_ansible_dir(ansible_playbook_directory)
  732. oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
  733. oo_cfg.ansible_playbook_directory = ansible_playbook_directory
  734. ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
  735. if os.path.exists(DEFAULT_ANSIBLE_CONFIG):
  736. # If we're installed by RPM this file should exist and we can use it as our default:
  737. oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
  738. if not verbose and os.path.exists(QUIET_ANSIBLE_CONFIG):
  739. oo_cfg.settings['ansible_quiet_config'] = QUIET_ANSIBLE_CONFIG
  740. oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
  741. ctx.obj['oo_cfg'] = oo_cfg
  742. openshift_ansible.set_config(oo_cfg)
  743. @click.command()
  744. @click.pass_context
  745. def uninstall(ctx):
  746. oo_cfg = ctx.obj['oo_cfg']
  747. verbose = ctx.obj['verbose']
  748. if hasattr(oo_cfg, 'deployment'):
  749. hosts = oo_cfg.deployment.hosts
  750. elif hasattr(oo_cfg, 'hosts'):
  751. hosts = oo_cfg.hosts
  752. else:
  753. click.echo("No hosts defined in: %s" % oo_cfg.config_path)
  754. sys.exit(1)
  755. click.echo("OpenShift will be uninstalled from the following hosts:\n")
  756. if not ctx.obj['unattended']:
  757. # Prompt interactively to confirm:
  758. for host in hosts:
  759. click.echo(" * %s" % host.connect_to)
  760. proceed = click.confirm("\nDo you want to proceed?")
  761. if not proceed:
  762. click.echo("Uninstall cancelled.")
  763. sys.exit(0)
  764. openshift_ansible.run_uninstall_playbook(hosts, verbose)
  765. @click.command(context_settings=dict(max_content_width=120))
  766. @click.option('--latest-minor', '-l', is_flag=True, default=False)
  767. @click.option('--next-major', '-n', is_flag=True, default=False)
  768. @click.pass_context
  769. # pylint: disable=too-many-statements,too-many-branches
  770. def upgrade(ctx, latest_minor, next_major):
  771. click.echo("Upgrades are no longer supported by this version of installer")
  772. click.echo("Please see the documentation for manual upgrade:")
  773. click.echo("https://docs.openshift.com/container-platform/latest/install_config/upgrading/automated_upgrades.html")
  774. sys.exit(1)
  775. @click.command()
  776. @click.option('--force', '-f', is_flag=True, default=False)
  777. @click.option('--gen-inventory', is_flag=True, default=False,
  778. help="Generate an Ansible inventory file and exit.")
  779. @click.pass_context
  780. def install(ctx, force, gen_inventory):
  781. oo_cfg = ctx.obj['oo_cfg']
  782. verbose = ctx.obj['verbose']
  783. unattended = ctx.obj['unattended']
  784. if unattended:
  785. error_if_missing_info(oo_cfg)
  786. else:
  787. oo_cfg = get_missing_info_from_user(oo_cfg)
  788. check_hosts_config(oo_cfg, unattended)
  789. print_installation_summary(oo_cfg.deployment.hosts,
  790. oo_cfg.settings.get('variant_version', None))
  791. click.echo('Gathering information from hosts...')
  792. callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
  793. verbose)
  794. if error or callback_facts is None:
  795. click.echo("There was a problem fetching the required information. "
  796. "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
  797. sys.exit(1)
  798. hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg,
  799. callback_facts,
  800. unattended,
  801. force)
  802. # We already verified this is not the case for unattended installs, so this can
  803. # only trigger for live CLI users:
  804. if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
  805. confirm_hosts_facts(oo_cfg, callback_facts)
  806. # Write quick installer config file to disk:
  807. oo_cfg.save_to_disk()
  808. run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
  809. @click.command()
  810. @click.option('--gen-inventory', is_flag=True, default=False,
  811. help="Generate an Ansible inventory file and exit.")
  812. @click.pass_context
  813. def scaleup(ctx, gen_inventory):
  814. oo_cfg = ctx.obj['oo_cfg']
  815. verbose = ctx.obj['verbose']
  816. unattended = ctx.obj['unattended']
  817. installed_hosts = list(oo_cfg.deployment.hosts)
  818. if len(installed_hosts) == 0:
  819. click.echo('No hosts specified.')
  820. sys.exit(1)
  821. click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.')
  822. # Scaleup requires manual data entry. Therefore, we do not support
  823. # unattended operations.
  824. if unattended:
  825. msg = """
  826. ---
  827. The 'scaleup' operation does not support unattended
  828. functionality. Re-run the installer without the '-u' or '--unattended'
  829. option to continue.
  830. """
  831. click.echo(msg)
  832. sys.exit(1)
  833. # Resume normal scaleup workflow
  834. print_installation_summary(installed_hosts,
  835. oo_cfg.settings['variant_version'],
  836. verbose=False,)
  837. message = """
  838. ---
  839. We have detected this previously installed OpenShift environment.
  840. This tool will guide you through the process of adding additional
  841. nodes to your cluster.
  842. """
  843. confirm_continue(message)
  844. error_if_missing_info(oo_cfg)
  845. check_hosts_config(oo_cfg, True)
  846. installed_masters = [host for host in installed_hosts if host.is_master()]
  847. new_nodes = collect_new_nodes(oo_cfg)
  848. oo_cfg.deployment.hosts.extend(new_nodes)
  849. hosts_to_run_on = installed_masters + new_nodes
  850. openshift_ansible.set_config(oo_cfg)
  851. click.echo('Gathering information from hosts...')
  852. callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
  853. if error or callback_facts is None:
  854. click.echo("There was a problem fetching the required information. See "
  855. "{} for details.".format(oo_cfg.settings['ansible_log_path']))
  856. sys.exit(1)
  857. print_installation_summary(oo_cfg.deployment.hosts,
  858. oo_cfg.settings.get('variant_version', None))
  859. click.echo('Gathering information from hosts...')
  860. callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
  861. verbose)
  862. if error or callback_facts is None:
  863. click.echo("There was a problem fetching the required information. "
  864. "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
  865. sys.exit(1)
  866. # We already verified this is not the case for unattended installs, so this can
  867. # only trigger for live CLI users:
  868. if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
  869. confirm_hosts_facts(oo_cfg, callback_facts)
  870. # Write quick installer config file to disk:
  871. oo_cfg.save_to_disk()
  872. run_config_playbook(oo_cfg, hosts_to_run_on, unattended, verbose, gen_inventory)
  873. cli.add_command(install)
  874. cli.add_command(scaleup)
  875. cli.add_command(upgrade)
  876. cli.add_command(uninstall)
  877. if __name__ == '__main__':
  878. # This is expected behaviour for context passing with click library:
  879. # pylint: disable=unexpected-keyword-arg
  880. cli(obj={})