|
@@ -1,424 +0,0 @@
|
|
|
-#!/usr/bin/env python2
|
|
|
-
|
|
|
-import argparse
|
|
|
-import ConfigParser
|
|
|
-import os
|
|
|
-import sys
|
|
|
-import subprocess
|
|
|
-import traceback
|
|
|
-
|
|
|
-
|
|
|
-class Cluster(object):
|
|
|
- """
|
|
|
- Provide Command, Control and Configuration (c3) Interface for OpenShift Clusters
|
|
|
- """
|
|
|
-
|
|
|
- def __init__(self):
|
|
|
- # setup ansible ssh environment
|
|
|
- if 'ANSIBLE_SSH_ARGS' not in os.environ:
|
|
|
- os.environ['ANSIBLE_SSH_ARGS'] = (
|
|
|
- '-o ForwardAgent=yes '
|
|
|
- '-o StrictHostKeyChecking=no '
|
|
|
- '-o UserKnownHostsFile=/dev/null '
|
|
|
- '-o ControlMaster=auto '
|
|
|
- '-o ControlPersist=600s '
|
|
|
- )
|
|
|
- # Because of `UserKnownHostsFile=/dev/null`
|
|
|
- # our `.ssh/known_hosts` file most probably misses the ssh host public keys
|
|
|
- # of our servers.
|
|
|
- # In that case, ansible serializes the execution of ansible modules
|
|
|
- # because we might be interactively prompted to accept the ssh host public keys.
|
|
|
- # Because of `StrictHostKeyChecking=no` we know that we won't be prompted
|
|
|
- # So, we don't want our modules execution to be serialized.
|
|
|
- os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
|
|
|
- # TODO: A more secure way to proceed would consist in dynamically
|
|
|
- # retrieving the ssh host public keys from the IaaS interface
|
|
|
- if 'ANSIBLE_SSH_PIPELINING' not in os.environ:
|
|
|
- os.environ['ANSIBLE_SSH_PIPELINING'] = 'True'
|
|
|
-
|
|
|
- def get_deployment_type(self, args):
|
|
|
- """
|
|
|
- Get the deployment_type based on the environment variables and the
|
|
|
- command line arguments
|
|
|
- :param args: command line arguments provided by the user
|
|
|
- :return: string representing the deployment type
|
|
|
- """
|
|
|
- deployment_type = 'origin'
|
|
|
- if args.deployment_type:
|
|
|
- deployment_type = args.deployment_type
|
|
|
- elif 'OS_DEPLOYMENT_TYPE' in os.environ:
|
|
|
- deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
|
|
|
- return deployment_type
|
|
|
-
|
|
|
-
|
|
|
- def create(self, args):
|
|
|
- """
|
|
|
- Create an OpenShift cluster for given provider
|
|
|
- :param args: command line arguments provided by user
|
|
|
- """
|
|
|
- cluster = {'cluster_id': args.cluster_id,
|
|
|
- 'deployment_type': self.get_deployment_type(args)}
|
|
|
- playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
|
|
|
- inventory = self.setup_provider(args.provider)
|
|
|
-
|
|
|
- cluster['num_masters'] = args.masters
|
|
|
- cluster['num_nodes'] = args.nodes
|
|
|
- cluster['num_infra'] = args.infra
|
|
|
- cluster['num_etcd'] = args.etcd
|
|
|
- cluster['cluster_env'] = args.env
|
|
|
-
|
|
|
- if args.cloudprovider and args.provider == 'openstack':
|
|
|
- cluster['openshift_cloudprovider_kind'] = 'openstack'
|
|
|
- cluster['openshift_cloudprovider_openstack_auth_url'] = os.getenv('OS_AUTH_URL')
|
|
|
- cluster['openshift_cloudprovider_openstack_username'] = os.getenv('OS_USERNAME')
|
|
|
- cluster['openshift_cloudprovider_openstack_password'] = os.getenv('OS_PASSWORD')
|
|
|
- if 'OS_USER_DOMAIN_ID' in os.environ:
|
|
|
- cluster['openshift_cloudprovider_openstack_domain_id'] = os.getenv('OS_USER_DOMAIN_ID')
|
|
|
- if 'OS_USER_DOMAIN_NAME' in os.environ:
|
|
|
- cluster['openshift_cloudprovider_openstack_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')
|
|
|
- if 'OS_PROJECT_ID' in os.environ or 'OS_TENANT_ID' in os.environ:
|
|
|
- cluster['openshift_cloudprovider_openstack_tenant_id'] = os.getenv('OS_PROJECT_ID',os.getenv('OS_TENANT_ID'))
|
|
|
- if 'OS_PROJECT_NAME' is os.environ or 'OS_TENANT_NAME' in os.environ:
|
|
|
- cluster['openshift_cloudprovider_openstack_tenant_name'] = os.getenv('OS_PROJECT_NAME',os.getenv('OS_TENANT_NAME'))
|
|
|
- if 'OS_REGION_NAME' in os.environ:
|
|
|
- cluster['openshift_cloudprovider_openstack_region'] = os.getenv('OS_REGION_NAME')
|
|
|
-
|
|
|
- self.action(args, inventory, cluster, playbook)
|
|
|
-
|
|
|
- def add_nodes(self, args):
|
|
|
- """
|
|
|
- Add nodes to an existing cluster for given provider
|
|
|
- :param args: command line arguments provided by user
|
|
|
- """
|
|
|
- cluster = {'cluster_id': args.cluster_id,
|
|
|
- 'deployment_type': self.get_deployment_type(args),
|
|
|
- }
|
|
|
- playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider)
|
|
|
- inventory = self.setup_provider(args.provider)
|
|
|
-
|
|
|
- cluster['num_nodes'] = args.nodes
|
|
|
- cluster['num_infra'] = args.infra
|
|
|
- cluster['cluster_env'] = args.env
|
|
|
-
|
|
|
- self.action(args, inventory, cluster, playbook)
|
|
|
-
|
|
|
- def terminate(self, args):
|
|
|
- """
|
|
|
- Destroy OpenShift cluster
|
|
|
- :param args: command line arguments provided by user
|
|
|
- """
|
|
|
- cluster = {'cluster_id': args.cluster_id,
|
|
|
- 'deployment_type': self.get_deployment_type(args),
|
|
|
- 'cluster_env': args.env,
|
|
|
- }
|
|
|
- playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
|
|
|
- inventory = self.setup_provider(args.provider)
|
|
|
-
|
|
|
- self.action(args, inventory, cluster, playbook)
|
|
|
-
|
|
|
- def list(self, args):
|
|
|
- """
|
|
|
- List VMs in cluster
|
|
|
- :param args: command line arguments provided by user
|
|
|
- """
|
|
|
- cluster = {'cluster_id': args.cluster_id,
|
|
|
- 'deployment_type': self.get_deployment_type(args),
|
|
|
- 'cluster_env': args.env,
|
|
|
- }
|
|
|
- playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
|
|
|
- inventory = self.setup_provider(args.provider)
|
|
|
-
|
|
|
- self.action(args, inventory, cluster, playbook)
|
|
|
-
|
|
|
- def config(self, args):
|
|
|
- """
|
|
|
- Configure or reconfigure OpenShift across clustered VMs
|
|
|
- :param args: command line arguments provided by user
|
|
|
- """
|
|
|
- cluster = {'cluster_id': args.cluster_id,
|
|
|
- 'deployment_type': self.get_deployment_type(args),
|
|
|
- 'cluster_env': args.env,
|
|
|
- }
|
|
|
- playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
|
|
|
- inventory = self.setup_provider(args.provider)
|
|
|
-
|
|
|
- self.action(args, inventory, cluster, playbook)
|
|
|
-
|
|
|
- def update(self, args):
|
|
|
- """
|
|
|
- Update to latest OpenShift across clustered VMs
|
|
|
- :param args: command line arguments provided by user
|
|
|
- """
|
|
|
- cluster = {'cluster_id': args.cluster_id,
|
|
|
- 'deployment_type': self.get_deployment_type(args),
|
|
|
- 'cluster_env': args.env,
|
|
|
- }
|
|
|
-
|
|
|
- playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
|
|
|
- inventory = self.setup_provider(args.provider)
|
|
|
-
|
|
|
- self.action(args, inventory, cluster, playbook)
|
|
|
-
|
|
|
- def service(self, args):
|
|
|
- """
|
|
|
- Make the same service call across all nodes in the cluster
|
|
|
- :param args: command line arguments provided by user
|
|
|
- """
|
|
|
- cluster = {'cluster_id': args.cluster_id,
|
|
|
- 'deployment_type': self.get_deployment_type(args),
|
|
|
- 'new_cluster_state': args.state,
|
|
|
- 'cluster_env': args.env,
|
|
|
- }
|
|
|
-
|
|
|
- playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
|
|
|
- inventory = self.setup_provider(args.provider)
|
|
|
-
|
|
|
- self.action(args, inventory, cluster, playbook)
|
|
|
-
|
|
|
- def setup_provider(self, provider):
|
|
|
- """
|
|
|
- Setup ansible playbook environment
|
|
|
- :param provider: command line arguments provided by user
|
|
|
- :return: path to inventory for given provider
|
|
|
- """
|
|
|
- config = ConfigParser.ConfigParser()
|
|
|
- if 'gce' == provider:
|
|
|
- gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini')
|
|
|
- gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
|
|
|
- if os.path.exists(gce_ini_path):
|
|
|
- config.readfp(open(gce_ini_path))
|
|
|
-
|
|
|
- for key in config.options('gce'):
|
|
|
- os.environ[key] = config.get('gce', key)
|
|
|
-
|
|
|
- inventory = '-i inventory/gce/hosts'
|
|
|
- elif 'aws' == provider:
|
|
|
- config.readfp(open('inventory/aws/hosts/ec2.ini'))
|
|
|
-
|
|
|
- for key in config.options('ec2'):
|
|
|
- os.environ[key] = config.get('ec2', key)
|
|
|
-
|
|
|
- inventory = '-i inventory/aws/hosts'
|
|
|
-
|
|
|
- key_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
|
|
|
- key_missing = [key for key in key_vars if key not in os.environ]
|
|
|
-
|
|
|
- boto_conf_files = ['~/.aws/credentials', '~/.boto']
|
|
|
- conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
|
|
|
- boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
|
|
|
-
|
|
|
- if len(key_missing) > 0 and len(boto_configs) == 0:
|
|
|
- raise ValueError("PROVIDER aws requires {0} environment variable(s). See README_AWS.md".format(key_missing))
|
|
|
-
|
|
|
- elif 'libvirt' == provider:
|
|
|
- inventory = '-i inventory/libvirt/hosts'
|
|
|
- elif 'openstack' == provider:
|
|
|
- inventory = '-i inventory/openstack/hosts'
|
|
|
- else:
|
|
|
- # this code should never be reached
|
|
|
- raise ValueError("invalid PROVIDER {0}".format(provider))
|
|
|
-
|
|
|
- return inventory
|
|
|
-
|
|
|
- def action(self, args, inventory, cluster, playbook):
|
|
|
- """
|
|
|
- Build ansible-playbook command line and execute
|
|
|
- :param args: command line arguments provided by user
|
|
|
- :param inventory: derived provider library
|
|
|
- :param cluster: cluster variables for kubernetes
|
|
|
- :param playbook: ansible playbook to execute
|
|
|
- """
|
|
|
-
|
|
|
- verbose = ''
|
|
|
- if args.verbose > 0:
|
|
|
- verbose = '-{0}'.format('v' * args.verbose)
|
|
|
-
|
|
|
- if args.option:
|
|
|
- for opt in args.option:
|
|
|
- k, v = opt.split('=', 1)
|
|
|
- cluster['cli_' + k] = v
|
|
|
-
|
|
|
- ansible_extra_vars = '-e \'{0}\''.format(
|
|
|
- ' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()])
|
|
|
- )
|
|
|
-
|
|
|
- command = 'ansible-playbook {0} {1} {2} {3}'.format(
|
|
|
- verbose, inventory, ansible_extra_vars, playbook
|
|
|
- )
|
|
|
-
|
|
|
- if args.profile:
|
|
|
- command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command
|
|
|
-
|
|
|
- if args.verbose > 1:
|
|
|
- command = 'time {0}'.format(command)
|
|
|
-
|
|
|
- if args.verbose > 0:
|
|
|
- sys.stderr.write('RUN [{0}]\n'.format(command))
|
|
|
- sys.stderr.flush()
|
|
|
-
|
|
|
- try:
|
|
|
- subprocess.check_call(command, shell=True)
|
|
|
- except subprocess.CalledProcessError as exc:
|
|
|
- raise ActionFailed("ACTION [{0}] failed: {1}"
|
|
|
- .format(args.action, exc))
|
|
|
-
|
|
|
-
|
|
|
-class ActionFailed(Exception):
|
|
|
- """
|
|
|
- Raised when action failed.
|
|
|
- """
|
|
|
- pass
|
|
|
-
|
|
|
-
|
|
|
-if __name__ == '__main__':
|
|
|
- """
|
|
|
- User command to invoke ansible playbooks in a "known" configuration
|
|
|
-
|
|
|
- Reads ~/.openshift-ansible for default configuration items
|
|
|
- [DEFAULT]
|
|
|
- validate_cluster_ids = False
|
|
|
- cluster_ids = marketing,sales
|
|
|
- providers = gce,aws,libvirt,openstack
|
|
|
- """
|
|
|
-
|
|
|
- warning = ("================================================================================\n"
|
|
|
- "ATTENTION: You are running a community supported utility that has not been\n"
|
|
|
- "tested by Red Hat. Visit https://docs.openshift.com for supported installation\n"
|
|
|
- "instructions.\n"
|
|
|
- "================================================================================\n\n")
|
|
|
- sys.stderr.write(warning)
|
|
|
-
|
|
|
- cluster_config = ConfigParser.SafeConfigParser({
|
|
|
- 'cluster_ids': 'marketing,sales',
|
|
|
- 'validate_cluster_ids': 'False',
|
|
|
- 'providers': 'gce,aws,libvirt,openstack',
|
|
|
- })
|
|
|
-
|
|
|
- path = os.path.expanduser("~/.openshift-ansible")
|
|
|
- if os.path.isfile(path):
|
|
|
- cluster_config.read(path)
|
|
|
-
|
|
|
- cluster = Cluster()
|
|
|
-
|
|
|
- parser = argparse.ArgumentParser(
|
|
|
- formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
|
- description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks',
|
|
|
- epilog='''\
|
|
|
-This wrapper is overriding the following ansible variables:
|
|
|
-
|
|
|
- * ANSIBLE_SSH_ARGS:
|
|
|
- If not set in the environment, this wrapper will use the following value:
|
|
|
- `-o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=600s`
|
|
|
- If set in the environment, the environment variable value is left untouched and used.
|
|
|
-
|
|
|
- * ANSIBLE_SSH_PIPELINING:
|
|
|
- If not set in the environment, this wrapper will set it to `True`.
|
|
|
- If you experience issues with Ansible SSH pipelining, you can disable it by explicitly setting this environment variable to `False`.
|
|
|
-'''
|
|
|
- )
|
|
|
- parser.add_argument('-v', '--verbose', action='count',
|
|
|
- help='Multiple -v options increase the verbosity')
|
|
|
- parser.add_argument('--version', action='version', version='%(prog)s 0.3')
|
|
|
-
|
|
|
- meta_parser = argparse.ArgumentParser(add_help=False)
|
|
|
- providers = cluster_config.get('DEFAULT', 'providers').split(',')
|
|
|
- meta_parser.add_argument('provider', choices=providers, help='provider')
|
|
|
-
|
|
|
- if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
|
|
|
- meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','),
|
|
|
- help='prefix for cluster VM names')
|
|
|
- else:
|
|
|
- meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
|
|
|
-
|
|
|
- meta_parser.add_argument('-t', '--deployment-type',
|
|
|
- choices=['origin', 'atomic-enterprise', 'openshift-enterprise'],
|
|
|
- help='Deployment type. (default: origin)')
|
|
|
- meta_parser.add_argument('-o', '--option', action='append',
|
|
|
- help='options')
|
|
|
-
|
|
|
- meta_parser.add_argument('--env', default='dev', type=str,
|
|
|
- help='environment for the cluster. Defaults to \'dev\'.')
|
|
|
-
|
|
|
- meta_parser.add_argument('-p', '--profile', action='store_true',
|
|
|
- help='Enable playbook profiling')
|
|
|
-
|
|
|
- action_parser = parser.add_subparsers(dest='action', title='actions',
|
|
|
- description='Choose from valid actions')
|
|
|
-
|
|
|
- create_parser = action_parser.add_parser('create', help='Create a cluster',
|
|
|
- parents=[meta_parser])
|
|
|
- create_parser.add_argument('-c', '--cloudprovider', action='store_true',
|
|
|
- help='Enable the cloudprovider')
|
|
|
- create_parser.add_argument('-m', '--masters', default=1, type=int,
|
|
|
- help='number of masters to create in cluster')
|
|
|
- create_parser.add_argument('-n', '--nodes', default=2, type=int,
|
|
|
- help='number of nodes to create in cluster')
|
|
|
- create_parser.add_argument('-i', '--infra', default=1, type=int,
|
|
|
- help='number of infra nodes to create in cluster')
|
|
|
- create_parser.add_argument('-e', '--etcd', default=0, type=int,
|
|
|
- help='number of external etcd hosts to create in cluster')
|
|
|
- create_parser.set_defaults(func=cluster.create)
|
|
|
-
|
|
|
-
|
|
|
- create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster',
|
|
|
- parents=[meta_parser])
|
|
|
- create_parser.add_argument('-n', '--nodes', default=1, type=int,
|
|
|
- help='number of nodes to add to the cluster')
|
|
|
- create_parser.add_argument('-i', '--infra', default=1, type=int,
|
|
|
- help='number of infra nodes to add to the cluster')
|
|
|
- create_parser.set_defaults(func=cluster.add_nodes)
|
|
|
-
|
|
|
-
|
|
|
- config_parser = action_parser.add_parser('config',
|
|
|
- help='Configure or reconfigure a cluster',
|
|
|
- parents=[meta_parser])
|
|
|
- config_parser.set_defaults(func=cluster.config)
|
|
|
-
|
|
|
- terminate_parser = action_parser.add_parser('terminate',
|
|
|
- help='Destroy a cluster',
|
|
|
- parents=[meta_parser])
|
|
|
- terminate_parser.add_argument('-f', '--force', action='store_true',
|
|
|
- help='Destroy cluster without confirmation')
|
|
|
- terminate_parser.set_defaults(func=cluster.terminate)
|
|
|
-
|
|
|
- update_parser = action_parser.add_parser('update',
|
|
|
- help='Update OpenShift across cluster',
|
|
|
- parents=[meta_parser])
|
|
|
- update_parser.add_argument('-f', '--force', action='store_true',
|
|
|
- help='Update cluster without confirmation')
|
|
|
- update_parser.set_defaults(func=cluster.update)
|
|
|
-
|
|
|
- list_parser = action_parser.add_parser('list', help='List VMs in cluster',
|
|
|
- parents=[meta_parser])
|
|
|
- list_parser.set_defaults(func=cluster.list)
|
|
|
-
|
|
|
- service_parser = action_parser.add_parser('service', help='service for openshift across cluster',
|
|
|
- parents=[meta_parser])
|
|
|
- # choices are the only ones valid for the ansible service module: http://docs.ansible.com/service_module.html
|
|
|
- service_parser.add_argument('state', choices=['started', 'stopped', 'restarted', 'reloaded'],
|
|
|
- help='make service call across cluster')
|
|
|
- service_parser.set_defaults(func=cluster.service)
|
|
|
-
|
|
|
- args = parser.parse_args()
|
|
|
-
|
|
|
- if 'terminate' == args.action and not args.force:
|
|
|
- answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id))
|
|
|
- if answer not in ['y', 'Y']:
|
|
|
- sys.stderr.write('\nACTION [terminate] aborted by user!\n')
|
|
|
- exit(1)
|
|
|
-
|
|
|
- if 'update' == args.action and not args.force:
|
|
|
- answer = raw_input(
|
|
|
- "This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id))
|
|
|
- if answer not in ['y', 'Y']:
|
|
|
- sys.stderr.write('\nACTION [update] aborted by user!\n')
|
|
|
- exit(1)
|
|
|
-
|
|
|
- try:
|
|
|
- args.func(args)
|
|
|
- except Exception as exc:
|
|
|
- if args.verbose:
|
|
|
- traceback.print_exc(file=sys.stderr)
|
|
|
- else:
|
|
|
- print >>sys.stderr, exc
|
|
|
- exit(1)
|