cli_installer.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. # TODO: Temporarily disabled due to importing old code into openshift-ansible
  2. # repo. We will work on these over time.
  3. # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
  4. import click
  5. import os
  6. import re
  7. import sys
  8. from ooinstall import openshift_ansible
  9. from ooinstall import OOConfig
  10. from ooinstall.oo_config import OOConfigInvalidHostError
  11. from ooinstall.oo_config import Host
  12. from ooinstall.variants import find_variant, get_variant_version_combos
  13. DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
  14. DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
  15. def validate_ansible_dir(path):
  16. if not path:
  17. raise click.BadParameter('An ansible path must be provided')
  18. return path
  19. # if not os.path.exists(path)):
  20. # raise click.BadParameter("Path \"{}\" doesn't exist".format(path))
  21. def is_valid_hostname(hostname):
  22. if not hostname or len(hostname) > 255:
  23. return False
  24. if hostname[-1] == ".":
  25. hostname = hostname[:-1] # strip exactly one dot from the right, if present
  26. allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
  27. return all(allowed.match(x) for x in hostname.split("."))
  28. def validate_prompt_hostname(hostname):
  29. if '' == hostname or is_valid_hostname(hostname):
  30. return hostname
  31. raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
  32. def get_ansible_ssh_user():
  33. click.clear()
  34. message = """
  35. This installation process will involve connecting to remote hosts via ssh. Any
  36. account may be used however if a non-root account is used it must have
  37. passwordless sudo access.
  38. """
  39. click.echo(message)
  40. return click.prompt('User for ssh access', default='root')
  41. def get_master_routingconfig_subdomain():
  42. click.clear()
  43. message = """
  44. You might want to override the default subdomain uses for exposed routes. If you don't know what
  45. this is, use the default value.
  46. """
  47. click.echo(message)
  48. return click.prompt('New default subdomain (ENTER for none)', default='')
  49. def list_hosts(hosts):
  50. hosts_idx = range(len(hosts))
  51. for idx in hosts_idx:
  52. click.echo(' {}: {}'.format(idx, hosts[idx]))
  53. def delete_hosts(hosts):
  54. while True:
  55. list_hosts(hosts)
  56. del_idx = click.prompt('Select host to delete, y/Y to confirm, ' \
  57. 'or n/N to add more hosts', default='n')
  58. try:
  59. del_idx = int(del_idx)
  60. hosts.remove(hosts[del_idx])
  61. except IndexError:
  62. click.echo("\"{}\" doesn't match any hosts listed.".format(del_idx))
  63. except ValueError:
  64. try:
  65. response = del_idx.lower()
  66. if response in ['y', 'n']:
  67. return hosts, response
  68. click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
  69. except AttributeError:
  70. click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
  71. return hosts, None
  72. def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
  73. """
  74. Collect host information from user. This will later be filled in using
  75. ansible.
  76. Returns: a list of host information collected from the user
  77. """
  78. click.clear()
  79. click.echo('*** Host Configuration ***')
  80. message = """
  81. You must now specify the hosts that will compose your OpenShift cluster.
  82. Please enter an IP or hostname to connect to for each system in the cluster.
  83. You will then be prompted to identify what role you would like this system to
  84. serve in the cluster.
  85. OpenShift Masters serve the API and web console and coordinate the jobs to run
  86. across the environment. If desired you can specify multiple Master systems for
  87. an HA deployment, in which case you will be prompted to identify a *separate*
  88. system to act as the load balancer for your cluster after all Masters and Nodes
  89. are defined.
  90. If only one Master is specified, an etcd instance embedded within the OpenShift
  91. Master service will be used as the datastore. This can be later replaced with a
  92. separate etcd instance if desired. If multiple Masters are specified, a
  93. separate etcd cluster will be configured with each Master serving as a member.
  94. Any Masters configured as part of this installation process will also be
  95. configured as Nodes. This is so that the Master will be able to proxy to Pods
  96. from the API. By default this Node will be unschedulable but this can be changed
  97. after installation with 'oadm manage-node'.
  98. OpenShift Nodes provide the runtime environments for containers. They will
  99. host the required services to be managed by the Master.
  100. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
  101. http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
  102. """
  103. click.echo(message)
  104. hosts = []
  105. more_hosts = True
  106. num_masters = 0
  107. while more_hosts:
  108. host_props = {}
  109. host_props['connect_to'] = click.prompt('Enter hostname or IP address',
  110. value_proc=validate_prompt_hostname)
  111. if not masters_set:
  112. if click.confirm('Will this host be an OpenShift Master?'):
  113. host_props['master'] = True
  114. num_masters += 1
  115. if oo_cfg.settings['variant_version'] == '3.0':
  116. masters_set = True
  117. host_props['node'] = True
  118. host_props['containerized'] = False
  119. if oo_cfg.settings['variant_version'] != '3.0':
  120. rpm_or_container = \
  121. click.prompt('Will this host be RPM or Container based (rpm/container)?',
  122. type=click.Choice(['rpm', 'container']),
  123. default='rpm')
  124. if rpm_or_container == 'container':
  125. host_props['containerized'] = True
  126. if existing_env:
  127. host_props['new_host'] = True
  128. else:
  129. host_props['new_host'] = False
  130. host = Host(**host_props)
  131. hosts.append(host)
  132. if print_summary:
  133. print_installation_summary(hosts, oo_cfg.settings['variant_version'])
  134. # If we have one master, this is enough for an all-in-one deployment,
  135. # thus we can start asking if you wish to proceed. Otherwise we assume
  136. # you must.
  137. if masters_set or num_masters != 2:
  138. more_hosts = click.confirm('Do you want to add additional hosts?')
  139. if num_masters >= 3:
  140. collect_master_lb(hosts)
  141. if not existing_env:
  142. collect_storage_host(hosts)
  143. return hosts
  144. def print_installation_summary(hosts, version=None):
  145. """
  146. Displays a summary of all hosts configured thus far, and what role each
  147. will play.
  148. Shows total nodes/masters, hints for performing/modifying the deployment
  149. with additional setup, warnings for invalid or sub-optimal configurations.
  150. """
  151. click.clear()
  152. click.echo('*** Installation Summary ***\n')
  153. click.echo('Hosts:')
  154. for host in hosts:
  155. print_host_summary(hosts, host)
  156. masters = [host for host in hosts if host.master]
  157. nodes = [host for host in hosts if host.node]
  158. dedicated_nodes = [host for host in hosts if host.node and not host.master]
  159. click.echo('')
  160. click.echo('Total OpenShift Masters: %s' % len(masters))
  161. click.echo('Total OpenShift Nodes: %s' % len(nodes))
  162. if len(masters) == 1 and version != '3.0':
  163. ha_hint_message = """
  164. NOTE: Add a total of 3 or more Masters to perform an HA installation."""
  165. click.echo(ha_hint_message)
  166. elif len(masters) == 2:
  167. min_masters_message = """
  168. WARNING: A minimum of 3 masters are required to perform an HA installation.
  169. Please add one more to proceed."""
  170. click.echo(min_masters_message)
  171. elif len(masters) >= 3:
  172. ha_message = """
  173. NOTE: Multiple Masters specified, this will be an HA deployment with a separate
  174. etcd cluster. You will be prompted to provide the FQDN of a load balancer and
  175. a host for storage once finished entering hosts.
  176. """
  177. click.echo(ha_message)
  178. dedicated_nodes_message = """
  179. WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
  180. Nodes are specified, each configured Master will be marked as a schedulable
  181. Node."""
  182. min_ha_nodes_message = """
  183. WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
  184. deployment."""
  185. if len(dedicated_nodes) == 0:
  186. click.echo(dedicated_nodes_message)
  187. elif len(dedicated_nodes) < 3:
  188. click.echo(min_ha_nodes_message)
  189. click.echo('')
  190. def print_host_summary(all_hosts, host):
  191. click.echo("- %s" % host.connect_to)
  192. if host.master:
  193. click.echo(" - OpenShift Master")
  194. if host.node:
  195. if host.is_dedicated_node():
  196. click.echo(" - OpenShift Node (Dedicated)")
  197. elif host.is_schedulable_node(all_hosts):
  198. click.echo(" - OpenShift Node")
  199. else:
  200. click.echo(" - OpenShift Node (Unscheduled)")
  201. if host.master_lb:
  202. if host.preconfigured:
  203. click.echo(" - Load Balancer (Preconfigured)")
  204. else:
  205. click.echo(" - Load Balancer (HAProxy)")
  206. if host.master:
  207. if host.is_etcd_member(all_hosts):
  208. click.echo(" - Etcd Member")
  209. else:
  210. click.echo(" - Etcd (Embedded)")
  211. if host.storage:
  212. click.echo(" - Storage")
  213. def collect_master_lb(hosts):
  214. """
  215. Get a valid load balancer from the user and append it to the list of
  216. hosts.
  217. Ensure user does not specify a system already used as a master/node as
  218. this is an invalid configuration.
  219. """
  220. message = """
  221. Setting up High Availability Masters requires a load balancing solution.
  222. Please provide a the FQDN of a host that will be configured as a proxy. This
  223. can be either an existing load balancer configured to balance all masters on
  224. port 8443 or a new host that will have HAProxy installed on it.
  225. If the host provided does is not yet configured, a reference haproxy load
  226. balancer will be installed. It's important to note that while the rest of the
  227. environment will be fault tolerant this reference load balancer will not be.
  228. It can be replaced post-installation with a load balancer with the same
  229. hostname.
  230. """
  231. click.echo(message)
  232. host_props = {}
  233. # Using an embedded function here so we have access to the hosts list:
  234. def validate_prompt_lb(hostname):
  235. # Run the standard hostname check first:
  236. hostname = validate_prompt_hostname(hostname)
  237. # Make sure this host wasn't already specified:
  238. for host in hosts:
  239. if host.connect_to == hostname and (host.master or host.node):
  240. raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
  241. 'please specify a separate host' % hostname)
  242. return hostname
  243. host_props['connect_to'] = click.prompt('Enter hostname or IP address',
  244. value_proc=validate_prompt_lb)
  245. install_haproxy = \
  246. click.confirm('Should the reference haproxy load balancer be installed on this host?')
  247. host_props['preconfigured'] = not install_haproxy
  248. host_props['master'] = False
  249. host_props['node'] = False
  250. host_props['master_lb'] = True
  251. master_lb = Host(**host_props)
  252. hosts.append(master_lb)
  253. def collect_storage_host(hosts):
  254. """
  255. Get a valid host for storage from the user and append it to the list of
  256. hosts.
  257. """
  258. message = """
  259. Setting up High Availability Masters requires a storage host. Please provide a
  260. host that will be configured as a Registry Storage.
  261. Note: Containerized storage hosts are not currently supported.
  262. """
  263. click.echo(message)
  264. host_props = {}
  265. first_master = next(host for host in hosts if host.master)
  266. hostname_or_ip = click.prompt('Enter hostname or IP address',
  267. value_proc=validate_prompt_hostname,
  268. default=first_master)
  269. existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
  270. if existing and existing_host.node:
  271. existing_host.storage = True
  272. else:
  273. host_props['connect_to'] = hostname_or_ip
  274. host_props['preconfigured'] = False
  275. host_props['master'] = False
  276. host_props['node'] = False
  277. host_props['storage'] = True
  278. storage = Host(**host_props)
  279. hosts.append(storage)
  280. def is_host_already_node_or_master(hostname, hosts):
  281. is_existing = False
  282. existing_host = None
  283. for host in hosts:
  284. if host.connect_to == hostname and (host.master or host.node):
  285. is_existing = True
  286. existing_host = host
  287. return is_existing, existing_host
  288. def confirm_hosts_facts(oo_cfg, callback_facts):
  289. hosts = oo_cfg.hosts
  290. click.clear()
  291. message = """
  292. A list of the facts gathered from the provided hosts follows. Because it is
  293. often the case that the hostname for a system inside the cluster is different
  294. from the hostname that is resolveable from command line or web clients
  295. these settings cannot be validated automatically.
  296. For some cloud providers the installer is able to gather metadata exposed in
  297. the instance so reasonable defaults will be provided.
  298. Plese confirm that they are correct before moving forward.
  299. """
  300. notes = """
  301. Format:
  302. connect_to,IP,public IP,hostname,public hostname
  303. Notes:
  304. * The installation host is the hostname from the installer's perspective.
  305. * The IP of the host should be the internal IP of the instance.
  306. * The public IP should be the externally accessible IP associated with the instance
  307. * The hostname should resolve to the internal IP from the instances
  308. themselves.
  309. * The public hostname should resolve to the external ip from hosts outside of
  310. the cloud.
  311. """
  312. # For testing purposes we need to click.echo only once, so build up
  313. # the message:
  314. output = message
  315. default_facts_lines = []
  316. default_facts = {}
  317. for h in hosts:
  318. if h.preconfigured == True:
  319. continue
  320. try:
  321. default_facts[h.connect_to] = {}
  322. h.ip = callback_facts[h.connect_to]["common"]["ip"]
  323. h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
  324. h.hostname = callback_facts[h.connect_to]["common"]["hostname"]
  325. h.public_hostname = callback_facts[h.connect_to]["common"]["public_hostname"]
  326. except KeyError:
  327. click.echo("Problem fetching facts from {}".format(h.connect_to))
  328. continue
  329. default_facts_lines.append(",".join([h.connect_to,
  330. h.ip,
  331. h.public_ip,
  332. h.hostname,
  333. h.public_hostname]))
  334. output = "%s\n%s" % (output, ",".join([h.connect_to,
  335. h.ip,
  336. h.public_ip,
  337. h.hostname,
  338. h.public_hostname]))
  339. output = "%s\n%s" % (output, notes)
  340. click.echo(output)
  341. facts_confirmed = click.confirm("Do the above facts look correct?")
  342. if not facts_confirmed:
  343. message = """
  344. Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
  345. """ % oo_cfg.config_path
  346. click.echo(message)
  347. # Make sure we actually write out the config file.
  348. oo_cfg.save_to_disk()
  349. sys.exit(0)
  350. return default_facts
  351. def check_hosts_config(oo_cfg, unattended):
  352. click.clear()
  353. masters = [host for host in oo_cfg.hosts if host.master]
  354. if len(masters) == 2:
  355. click.echo("A minimum of 3 Masters are required for HA deployments.")
  356. sys.exit(1)
  357. if len(masters) > 1:
  358. master_lb = [host for host in oo_cfg.hosts if host.master_lb]
  359. if len(master_lb) > 1:
  360. click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
  361. sys.exit(1)
  362. elif len(master_lb) == 1:
  363. if master_lb[0].master or master_lb[0].node:
  364. click.echo('ERROR: The Master load balancer is configured as a master or node. ' \
  365. 'Please correct this.')
  366. sys.exit(1)
  367. else:
  368. message = """
  369. ERROR: No master load balancer specified in config. You must provide the FQDN
  370. of a load balancer to balance the API (port 8443) on all Master hosts.
  371. https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
  372. """
  373. click.echo(message)
  374. sys.exit(1)
  375. dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
  376. if len(dedicated_nodes) == 0:
  377. message = """
  378. WARNING: No dedicated Nodes specified. By default, colocated Masters have
  379. their Nodes set to unschedulable. If you proceed all nodes will be labelled
  380. as schedulable.
  381. """
  382. if unattended:
  383. click.echo(message)
  384. else:
  385. confirm_continue(message)
  386. return
  387. def get_variant_and_version(multi_master=False):
  388. message = "\nWhich variant would you like to install?\n\n"
  389. i = 1
  390. combos = get_variant_version_combos()
  391. for (variant, version) in combos:
  392. message = "%s\n(%s) %s %s" % (message, i, variant.description,
  393. version.name)
  394. i = i + 1
  395. message = "%s\n" % message
  396. click.echo(message)
  397. if multi_master:
  398. click.echo('NOTE: 3.0 installations are not')
  399. response = click.prompt("Choose a variant from above: ", default=1)
  400. product, version = combos[response - 1]
  401. return product, version
  402. def confirm_continue(message):
  403. if message:
  404. click.echo(message)
  405. click.confirm("Are you ready to continue?", default=False, abort=True)
  406. return
  407. def error_if_missing_info(oo_cfg):
  408. missing_info = False
  409. if not oo_cfg.hosts:
  410. missing_info = True
  411. click.echo('For unattended installs, hosts must be specified on the '
  412. 'command line or in the config file: %s' % oo_cfg.config_path)
  413. sys.exit(1)
  414. if 'ansible_ssh_user' not in oo_cfg.settings:
  415. click.echo("Must specify ansible_ssh_user in configuration file.")
  416. sys.exit(1)
  417. # Lookup a variant based on the key we were given:
  418. if not oo_cfg.settings['variant']:
  419. click.echo("No variant specified in configuration file.")
  420. sys.exit(1)
  421. ver = None
  422. if 'variant_version' in oo_cfg.settings:
  423. ver = oo_cfg.settings['variant_version']
  424. variant, version = find_variant(oo_cfg.settings['variant'], version=ver)
  425. if variant is None or version is None:
  426. err_variant_name = oo_cfg.settings['variant']
  427. if ver:
  428. err_variant_name = "%s %s" % (err_variant_name, ver)
  429. click.echo("%s is not an installable variant." % err_variant_name)
  430. sys.exit(1)
  431. oo_cfg.settings['variant_version'] = version.name
  432. missing_facts = oo_cfg.calc_missing_facts()
  433. if len(missing_facts) > 0:
  434. missing_info = True
  435. click.echo('For unattended installs, facts must be provided for all masters/nodes:')
  436. for host in missing_facts:
  437. click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
  438. if missing_info:
  439. sys.exit(1)
  440. def get_missing_info_from_user(oo_cfg):
  441. """ Prompts the user for any information missing from the given configuration. """
  442. click.clear()
  443. message = """
  444. Welcome to the OpenShift Enterprise 3 installation.
  445. Please confirm that following prerequisites have been met:
  446. * All systems where OpenShift will be installed are running Red Hat Enterprise
  447. Linux 7.
  448. * All systems are properly subscribed to the required OpenShift Enterprise 3
  449. repositories.
  450. * All systems have run docker-storage-setup (part of the Red Hat docker RPM).
  451. * All systems have working DNS that resolves not only from the perspective of
  452. the installer but also from within the cluster.
  453. When the process completes you will have a default configuration for Masters
  454. and Nodes. For ongoing environment maintenance it's recommended that the
  455. official Ansible playbooks be used.
  456. For more information on installation prerequisites please see:
  457. https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.html
  458. """
  459. confirm_continue(message)
  460. click.clear()
  461. if oo_cfg.settings.get('ansible_ssh_user', '') == '':
  462. oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
  463. click.clear()
  464. if oo_cfg.settings.get('variant', '') == '':
  465. variant, version = get_variant_and_version()
  466. oo_cfg.settings['variant'] = variant.name
  467. oo_cfg.settings['variant_version'] = version.name
  468. click.clear()
  469. if not oo_cfg.hosts:
  470. oo_cfg.hosts = collect_hosts(oo_cfg)
  471. click.clear()
  472. if not oo_cfg.settings.get('master_routingconfig_subdomain', None):
  473. oo_cfg.settings['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain()
  474. click.clear()
  475. return oo_cfg
  476. def collect_new_nodes(oo_cfg):
  477. click.clear()
  478. click.echo('*** New Node Configuration ***')
  479. message = """
  480. Add new nodes here
  481. """
  482. click.echo(message)
  483. return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
  484. def get_installed_hosts(hosts, callback_facts):
  485. installed_hosts = []
  486. # count nativeha lb as an installed host
  487. try:
  488. first_master = next(host for host in hosts if host.master)
  489. lb_hostname = callback_facts[first_master.connect_to]['master'].get('cluster_hostname', '')
  490. lb_host = \
  491. next(host for host in hosts if host.ip == callback_facts[lb_hostname]['common']['ip'])
  492. installed_hosts.append(lb_host)
  493. except (KeyError, StopIteration):
  494. pass
  495. for host in hosts:
  496. if host.connect_to in callback_facts.keys() and is_installed_host(host, callback_facts):
  497. installed_hosts.append(host)
  498. return installed_hosts
  499. def is_installed_host(host, callback_facts):
  500. version_found = 'common' in callback_facts[host.connect_to].keys() and \
  501. callback_facts[host.connect_to]['common'].get('version', '') and \
  502. callback_facts[host.connect_to]['common'].get('version', '') != 'None'
  503. return version_found or host.master_lb or host.preconfigured
  504. # pylint: disable=too-many-branches
  505. # This pylint error will be corrected shortly in separate PR.
  506. def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
  507. # Copy the list of existing hosts so we can remove any already installed nodes.
  508. hosts_to_run_on = list(oo_cfg.hosts)
  509. # Check if master or nodes already have something installed
  510. installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
  511. if len(installed_hosts) > 0:
  512. click.echo('Installed environment detected.')
  513. # This check has to happen before we start removing hosts later in this method
  514. if not force:
  515. if not unattended:
  516. click.echo('By default the installer only adds new nodes ' \
  517. 'to an installed environment.')
  518. response = click.prompt('Do you want to (1) only add additional nodes or ' \
  519. '(2) reinstall the existing hosts ' \
  520. 'potentially erasing any custom changes?',
  521. type=int)
  522. # TODO: this should be reworked with error handling.
  523. # Click can certainly do this for us.
  524. # This should be refactored as soon as we add a 3rd option.
  525. if response == 1:
  526. force = False
  527. if response == 2:
  528. force = True
  529. # present a message listing already installed hosts and remove hosts if needed
  530. for host in installed_hosts:
  531. if host.master:
  532. click.echo("{} is already an OpenShift Master".format(host))
  533. # Masters stay in the list, we need to run against them when adding
  534. # new nodes.
  535. elif host.node:
  536. click.echo("{} is already an OpenShift Node".format(host))
  537. # force is only used for reinstalls so we don't want to remove
  538. # anything.
  539. if not force:
  540. hosts_to_run_on.remove(host)
  541. # Handle the cases where we know about uninstalled systems
  542. new_hosts = set(hosts_to_run_on) - set(installed_hosts)
  543. if len(new_hosts) > 0:
  544. for new_host in new_hosts:
  545. click.echo("{} is currently uninstalled".format(new_host))
  546. # Fall through
  547. click.echo('Adding additional nodes...')
  548. else:
  549. if unattended:
  550. if not force:
  551. click.echo('Installed environment detected and no additional ' \
  552. 'nodes specified: aborting. If you want a fresh install, use ' \
  553. '`atomic-openshift-installer install --force`')
  554. sys.exit(1)
  555. else:
  556. if not force:
  557. new_nodes = collect_new_nodes(oo_cfg)
  558. hosts_to_run_on.extend(new_nodes)
  559. oo_cfg.hosts.extend(new_nodes)
  560. openshift_ansible.set_config(oo_cfg)
  561. click.echo('Gathering information from hosts...')
  562. callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
  563. if error or callback_facts is None:
  564. click.echo("There was a problem fetching the required information. See " \
  565. "{} for details.".format(oo_cfg.settings['ansible_log_path']))
  566. sys.exit(1)
  567. else:
  568. pass # proceeding as normal should do a clean install
  569. return hosts_to_run_on, callback_facts
  570. @click.group()
  571. @click.pass_context
  572. @click.option('--unattended', '-u', is_flag=True, default=False)
  573. @click.option('--configuration', '-c',
  574. type=click.Path(file_okay=True,
  575. dir_okay=False,
  576. writable=True,
  577. readable=True),
  578. default=None)
  579. @click.option('--ansible-playbook-directory',
  580. '-a',
  581. type=click.Path(exists=True,
  582. file_okay=False,
  583. dir_okay=True,
  584. readable=True),
  585. # callback=validate_ansible_dir,
  586. default=DEFAULT_PLAYBOOK_DIR,
  587. envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
  588. @click.option('--ansible-config',
  589. type=click.Path(file_okay=True,
  590. dir_okay=False,
  591. writable=True,
  592. readable=True),
  593. default=None)
  594. @click.option('--ansible-log-path',
  595. type=click.Path(file_okay=True,
  596. dir_okay=False,
  597. writable=True,
  598. readable=True),
  599. default="/tmp/ansible.log")
  600. @click.option('-v', '--verbose',
  601. is_flag=True, default=False)
  602. #pylint: disable=too-many-arguments
  603. #pylint: disable=line-too-long
  604. # Main CLI entrypoint, not much we can do about too many arguments.
  605. def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
  606. """
  607. atomic-openshift-installer makes the process for installing OSE or AEP
  608. easier by interactively gathering the data needed to run on each host.
  609. It can also be run in unattended mode if provided with a configuration file.
  610. Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
  611. """
  612. ctx.obj = {}
  613. ctx.obj['unattended'] = unattended
  614. ctx.obj['configuration'] = configuration
  615. ctx.obj['ansible_config'] = ansible_config
  616. ctx.obj['ansible_log_path'] = ansible_log_path
  617. ctx.obj['verbose'] = verbose
  618. try:
  619. oo_cfg = OOConfig(ctx.obj['configuration'])
  620. except OOConfigInvalidHostError as e:
  621. click.echo(e)
  622. sys.exit(1)
  623. # If no playbook dir on the CLI, check the config:
  624. if not ansible_playbook_directory:
  625. ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
  626. # If still no playbook dir, check for the default location:
  627. if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
  628. ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
  629. validate_ansible_dir(ansible_playbook_directory)
  630. oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
  631. oo_cfg.ansible_playbook_directory = ansible_playbook_directory
  632. ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
  633. if ctx.obj['ansible_config']:
  634. oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
  635. elif 'ansible_config' not in oo_cfg.settings and \
  636. os.path.exists(DEFAULT_ANSIBLE_CONFIG):
  637. # If we're installed by RPM this file should exist and we can use it as our default:
  638. oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
  639. oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
  640. ctx.obj['oo_cfg'] = oo_cfg
  641. openshift_ansible.set_config(oo_cfg)
  642. @click.command()
  643. @click.pass_context
  644. def uninstall(ctx):
  645. oo_cfg = ctx.obj['oo_cfg']
  646. verbose = ctx.obj['verbose']
  647. if len(oo_cfg.hosts) == 0:
  648. click.echo("No hosts defined in: %s" % oo_cfg.config_path)
  649. sys.exit(1)
  650. click.echo("OpenShift will be uninstalled from the following hosts:\n")
  651. if not ctx.obj['unattended']:
  652. # Prompt interactively to confirm:
  653. for host in oo_cfg.hosts:
  654. click.echo(" * %s" % host.connect_to)
  655. proceed = click.confirm("\nDo you wish to proceed?")
  656. if not proceed:
  657. click.echo("Uninstall cancelled.")
  658. sys.exit(0)
  659. openshift_ansible.run_uninstall_playbook(verbose)
  660. @click.command()
  661. @click.option('--latest-minor', '-l', is_flag=True, default=False)
  662. @click.option('--next-major', '-n', is_flag=True, default=False)
  663. @click.pass_context
  664. def upgrade(ctx, latest_minor, next_major):
  665. oo_cfg = ctx.obj['oo_cfg']
  666. verbose = ctx.obj['verbose']
  667. upgrade_mappings = {
  668. '3.0':{
  669. 'minor_version' :'3.0',
  670. 'minor_playbook':'v3_0_minor/upgrade.yml',
  671. 'major_version' :'3.1',
  672. 'major_playbook':'v3_0_to_v3_1/upgrade.yml',
  673. },
  674. '3.1':{
  675. 'minor_version' :'3.1',
  676. 'minor_playbook':'v3_1_minor/upgrade.yml',
  677. 'major_playbook':'v3_1_to_v3_2/upgrade.yml',
  678. 'major_version' :'3.2',
  679. }
  680. }
  681. if len(oo_cfg.hosts) == 0:
  682. click.echo("No hosts defined in: %s" % oo_cfg.config_path)
  683. sys.exit(1)
  684. old_variant = oo_cfg.settings['variant']
  685. old_version = oo_cfg.settings['variant_version']
  686. mapping = upgrade_mappings.get(old_version)
  687. message = """
  688. This tool will help you upgrade your existing OpenShift installation.
  689. """
  690. click.echo(message)
  691. if not (latest_minor or next_major):
  692. click.echo("Version {} found. Do you want to update to the latest version of {} " \
  693. "or migrate to the next major release?".format(old_version, old_version))
  694. response = click.prompt("(1) Update to latest {} " \
  695. "(2) Migrate to next release".format(old_version),
  696. type=click.Choice(['1', '2']),)
  697. if response == "1":
  698. latest_minor = True
  699. if response == "2":
  700. next_major = True
  701. if next_major:
  702. playbook = mapping['major_playbook']
  703. new_version = mapping['major_version']
  704. # Update config to reflect the version we're targetting, we'll write
  705. # to disk once ansible completes successfully, not before.
  706. oo_cfg.settings['variant_version'] = new_version
  707. if oo_cfg.settings['variant'] == 'enterprise':
  708. oo_cfg.settings['variant'] = 'openshift-enterprise'
  709. if latest_minor:
  710. playbook = mapping['minor_playbook']
  711. new_version = mapping['minor_version']
  712. click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
  713. old_variant, old_version, oo_cfg.settings['variant'], new_version))
  714. for host in oo_cfg.hosts:
  715. click.echo(" * %s" % host.connect_to)
  716. if not ctx.obj['unattended']:
  717. # Prompt interactively to confirm:
  718. proceed = click.confirm("\nDo you wish to proceed?")
  719. if not proceed:
  720. click.echo("Upgrade cancelled.")
  721. sys.exit(0)
  722. retcode = openshift_ansible.run_upgrade_playbook(playbook, verbose)
  723. if retcode > 0:
  724. click.echo("Errors encountered during upgrade, please check %s." %
  725. oo_cfg.settings['ansible_log_path'])
  726. else:
  727. oo_cfg.save_to_disk()
  728. click.echo("Upgrade completed! Rebooting all hosts is recommended.")
  729. @click.command()
  730. @click.option('--force', '-f', is_flag=True, default=False)
  731. @click.option('--gen-inventory', is_flag=True, default=False,
  732. help="Generate an ansible inventory file and exit.")
  733. @click.pass_context
  734. def install(ctx, force, gen_inventory):
  735. oo_cfg = ctx.obj['oo_cfg']
  736. verbose = ctx.obj['verbose']
  737. if ctx.obj['unattended']:
  738. error_if_missing_info(oo_cfg)
  739. else:
  740. oo_cfg = get_missing_info_from_user(oo_cfg)
  741. check_hosts_config(oo_cfg, ctx.obj['unattended'])
  742. print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None))
  743. click.echo('Gathering information from hosts...')
  744. callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
  745. verbose)
  746. if error or callback_facts is None:
  747. click.echo("There was a problem fetching the required information. " \
  748. "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
  749. sys.exit(1)
  750. hosts_to_run_on, callback_facts = get_hosts_to_run_on(
  751. oo_cfg, callback_facts, ctx.obj['unattended'], force, verbose)
  752. # We already verified this is not the case for unattended installs, so this can
  753. # only trigger for live CLI users:
  754. # TODO: if there are *new* nodes and this is a live install, we may need the user
  755. # to confirm the settings for new nodes. Look into this once we're distinguishing
  756. # between new and pre-existing nodes.
  757. if len(oo_cfg.calc_missing_facts()) > 0:
  758. confirm_hosts_facts(oo_cfg, callback_facts)
  759. # Write quick installer config file to disk:
  760. oo_cfg.save_to_disk()
  761. # Write ansible inventory file to disk:
  762. inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
  763. click.echo()
  764. click.echo('Wrote atomic-openshift-installer config: %s' % oo_cfg.config_path)
  765. click.echo("Wrote ansible inventory: %s" % inventory_file)
  766. click.echo()
  767. if gen_inventory:
  768. sys.exit(0)
  769. click.echo('Ready to run installation process.')
  770. message = """
  771. If changes are needed please edit the config file above and re-run.
  772. """
  773. if not ctx.obj['unattended']:
  774. confirm_continue(message)
  775. error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.hosts,
  776. hosts_to_run_on, verbose)
  777. if error:
  778. # The bootstrap script will print out the log location.
  779. message = """
  780. An error was detected. After resolving the problem please relaunch the
  781. installation process.
  782. """
  783. click.echo(message)
  784. sys.exit(1)
  785. else:
  786. message = """
  787. The installation was successful!
  788. If this is your first time installing please take a look at the Administrator
  789. Guide for advanced options related to routing, storage, authentication and much
  790. more:
  791. http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
  792. """
  793. click.echo(message)
  794. click.pause()
  795. cli.add_command(install)
  796. cli.add_command(upgrade)
  797. cli.add_command(uninstall)
  798. if __name__ == '__main__':
  799. # This is expected behaviour for context passing with click library:
  800. # pylint: disable=unexpected-keyword-arg
  801. cli(obj={})