fixture.py 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. # pylint: disable=missing-docstring
  2. import os
  3. import yaml
  4. import ooinstall.cli_installer as cli
  5. from test.oo_config_tests import OOInstallFixture
  6. from click.testing import CliRunner
  7. # Substitute in a product name before use:
  8. SAMPLE_CONFIG = """
  9. variant: %s
  10. variant_version: 3.3
  11. master_routingconfig_subdomain: example.com
  12. version: v2
  13. deployment:
  14. ansible_ssh_user: root
  15. hosts:
  16. - connect_to: 10.0.0.1
  17. ip: 10.0.0.1
  18. hostname: master-private.example.com
  19. public_ip: 24.222.0.1
  20. public_hostname: master.example.com
  21. roles:
  22. - master
  23. - node
  24. - connect_to: 10.0.0.2
  25. ip: 10.0.0.2
  26. hostname: node1-private.example.com
  27. public_ip: 24.222.0.2
  28. public_hostname: node1.example.com
  29. roles:
  30. - node
  31. - connect_to: 10.0.0.3
  32. ip: 10.0.0.3
  33. hostname: node2-private.example.com
  34. public_ip: 24.222.0.3
  35. public_hostname: node2.example.com
  36. roles:
  37. - node
  38. roles:
  39. master:
  40. node:
  41. """
  42. def read_yaml(config_file_path):
  43. cfg_f = open(config_file_path, 'r')
  44. config = yaml.safe_load(cfg_f.read())
  45. cfg_f.close()
  46. return config
  47. class OOCliFixture(OOInstallFixture):
  48. def setUp(self):
  49. OOInstallFixture.setUp(self)
  50. self.runner = CliRunner()
  51. # Add any arguments you would like to test here, the defaults ensure
  52. # we only do unattended invocations here, and using temporary files/dirs.
  53. self.cli_args = ["-a", self.work_dir]
  54. def run_cli(self):
  55. return self.runner.invoke(cli.cli, self.cli_args)
  56. def assert_result(self, result, exit_code):
  57. if result.exit_code != exit_code:
  58. msg = ["Unexpected result from CLI execution\n"]
  59. msg.append("Exit code: %s\n" % result.exit_code)
  60. msg.append("Exception: %s\n" % result.exception)
  61. import traceback
  62. msg.extend(traceback.format_exception(*result.exc_info))
  63. msg.append("Output:\n%s" % result.output)
  64. self.fail("".join(msg))
  65. def _verify_load_facts(self, load_facts_mock):
  66. """ Check that we ran load facts with expected inputs. """
  67. load_facts_args = load_facts_mock.call_args[0]
  68. self.assertEquals(os.path.join(self.work_dir, "hosts"),
  69. load_facts_args[0])
  70. self.assertEquals(os.path.join(self.work_dir,
  71. "playbooks/byo/openshift_facts.yml"),
  72. load_facts_args[1])
  73. env_vars = load_facts_args[2]
  74. self.assertEquals(os.path.join(self.work_dir,
  75. '.ansible/callback_facts.yaml'),
  76. env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
  77. self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
  78. def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
  79. """ Check that we ran playbook with expected inputs. """
  80. hosts = run_playbook_mock.call_args[0][1]
  81. hosts_to_run_on = run_playbook_mock.call_args[0][2]
  82. self.assertEquals(exp_hosts_len, len(hosts))
  83. self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
  84. def _verify_config_hosts(self, written_config, host_count):
  85. self.assertEquals(host_count, len(written_config['deployment']['hosts']))
  86. for host in written_config['deployment']['hosts']:
  87. self.assertTrue('hostname' in host)
  88. self.assertTrue('public_hostname' in host)
  89. if 'preconfigured' not in host:
  90. if 'roles' in host:
  91. self.assertTrue('node' in host['roles'] or 'storage' in host['roles'])
  92. self.assertTrue('ip' in host)
  93. self.assertTrue('public_ip' in host)
  94. # pylint: disable=too-many-arguments
  95. def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
  96. run_playbook_mock, cli_input,
  97. exp_hosts_len=None, exp_hosts_to_run_on_len=None,
  98. force=None):
  99. """
  100. Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
  101. few subtle branches in the logic. The goal with this method is simply
  102. to handle all the messy stuff here and allow the main test cases to be
  103. easily read. The basic idea is to modify mock_facts to return a
  104. version indicating OpenShift is already installed on particular hosts.
  105. """
  106. load_facts_mock.return_value = (mock_facts, 0)
  107. run_playbook_mock.return_value = 0
  108. if cli_input:
  109. self.cli_args.append("install")
  110. result = self.runner.invoke(cli.cli,
  111. self.cli_args,
  112. input=cli_input)
  113. else:
  114. config_file = self.write_config(
  115. os.path.join(self.work_dir,
  116. 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
  117. self.cli_args.extend(["-c", config_file, "install"])
  118. if force:
  119. self.cli_args.append("--force")
  120. result = self.runner.invoke(cli.cli, self.cli_args)
  121. written_config = read_yaml(config_file)
  122. self._verify_config_hosts(written_config, exp_hosts_len)
  123. if "If you want to force reinstall" in result.output:
  124. # verify we exited on seeing installed hosts
  125. self.assertEqual(result.exit_code, 1)
  126. else:
  127. self.assert_result(result, 0)
  128. self._verify_load_facts(load_facts_mock)
  129. self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
  130. # Make sure we ran on the expected masters and nodes:
  131. hosts = run_playbook_mock.call_args[0][1]
  132. hosts_to_run_on = run_playbook_mock.call_args[0][2]
  133. self.assertEquals(exp_hosts_len, len(hosts))
  134. self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
  135. # pylint: disable=too-many-arguments,too-many-branches,too-many-statements
  136. def build_input(ssh_user=None, hosts=None, variant_num=None,
  137. add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
  138. master_lb=('', False), storage=None):
  139. """
  140. Build an input string simulating a user entering values in an interactive
  141. attended install.
  142. This is intended to give us one place to update when the CLI prompts change.
  143. We should aim to keep this dependent on optional keyword arguments with
  144. sensible defaults to keep things from getting too fragile.
  145. """
  146. inputs = [
  147. 'y', # let's proceed
  148. ]
  149. if ssh_user:
  150. inputs.append(ssh_user)
  151. if variant_num:
  152. inputs.append(str(variant_num)) # Choose variant + version
  153. num_masters = 0
  154. if hosts:
  155. i = 0
  156. for (host, is_master, is_containerized) in hosts:
  157. inputs.append(host)
  158. if is_master:
  159. inputs.append('y')
  160. num_masters += 1
  161. else:
  162. inputs.append('n')
  163. if is_containerized:
  164. inputs.append('container')
  165. else:
  166. inputs.append('rpm')
  167. # inputs.append('rpm')
  168. # We should not be prompted to add more hosts if we're currently at
  169. # 2 masters, this is an invalid HA configuration, so this question
  170. # will not be asked, and the user must enter the next host:
  171. if num_masters != 2:
  172. if i < len(hosts) - 1:
  173. if num_masters >= 1:
  174. inputs.append('y') # Add more hosts
  175. else:
  176. inputs.append('n') # Done adding hosts
  177. i += 1
  178. # You can pass a single master_lb or a list if you intend for one to get rejected:
  179. if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
  180. inputs.extend(master_lb[0])
  181. else:
  182. inputs.append(master_lb[0])
  183. if master_lb[0]:
  184. inputs.append('y' if master_lb[1] else 'n')
  185. if storage:
  186. inputs.append(storage)
  187. inputs.append('subdomain.example.com')
  188. inputs.append('proxy.example.com')
  189. inputs.append('proxy-private.example.com')
  190. inputs.append('exclude.example.com')
  191. # TODO: support option 2, fresh install
  192. if add_nodes:
  193. if schedulable_masters_ok:
  194. inputs.append('y')
  195. inputs.append('1') # Add more nodes
  196. i = 0
  197. for (host, _, is_containerized) in add_nodes:
  198. inputs.append(host)
  199. if is_containerized:
  200. inputs.append('container')
  201. else:
  202. inputs.append('rpm')
  203. # inputs.append('rpm')
  204. if i < len(add_nodes) - 1:
  205. inputs.append('y') # Add more hosts
  206. else:
  207. inputs.append('n') # Done adding hosts
  208. i += 1
  209. if add_nodes is None:
  210. total_hosts = hosts
  211. else:
  212. total_hosts = hosts + add_nodes
  213. if total_hosts is not None and num_masters == len(total_hosts):
  214. inputs.append('y')
  215. inputs.extend([
  216. confirm_facts,
  217. 'y', # lets do this
  218. 'y',
  219. ])
  220. return '\n'.join(inputs)