Browse Source

Merge branch 'int' into stg

Troy Dawson 10 years ago
parent
commit
124ca40c13

ansible.cfg → ansible.cfg.example


+ 3 - 10
bin/ohi

@@ -17,13 +17,10 @@ from openshift_ansible.awsutil import ArgumentError
 
 CONFIG_MAIN_SECTION = 'main'
 CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
-CONFIG_INVENTORY_OPTION = 'inventory'
-
 
 
 class Ohi(object):
     def __init__(self):
-        self.inventory = None
         self.host_type_aliases = {}
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
@@ -35,7 +32,7 @@ class Ohi(object):
         self.parse_cli_args()
         self.parse_config_file()
 
-        self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+        self.aws = awsutil.AwsUtil(self.host_type_aliases)
 
     def run(self):
         if self.args.list_host_types:
@@ -47,12 +44,12 @@ class Ohi(object):
            self.args.env is not None:
             # Both env and host-type specified
             hosts = self.aws.get_host_list(host_type=self.args.host_type, \
-                                           env=self.args.env)
+                                           envs=self.args.env)
 
         if self.args.host_type is None and \
            self.args.env is not None:
             # Only env specified
-            hosts = self.aws.get_host_list(env=self.args.env)
+            hosts = self.aws.get_host_list(envs=self.args.env)
 
         if self.args.host_type is not None and \
            self.args.env is None:
@@ -76,10 +73,6 @@ class Ohi(object):
             config = ConfigParser.ConfigParser()
             config.read(self.config_path)
 
-            if config.has_section(CONFIG_MAIN_SECTION) and \
-               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
-                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
-
             self.host_type_aliases = {}
             if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
                 for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):

+ 17 - 2
bin/openshift-ansible-bin.spec

@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Scripts for working with metadata hosts
 Name:          openshift-ansible-bin
-Version:       0.0.12
+Version:       0.0.17
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -24,7 +24,13 @@ mkdir -p %{buildroot}/etc/bash_completion.d
 mkdir -p %{buildroot}/etc/openshift_ansible
 
 cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir}
-cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+
+# Make it so we can load multi_ec2.py as a library.
+rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py*
+ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc
+
 cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
 
 cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
@@ -36,6 +42,15 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
+* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1
+- fixed the openshift-ansible-bin build (twiest@redhat.com)
+
+* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
+- Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
+- Adding cache location for multi ec2 (kwoodson@redhat.com)
+* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1
+- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com)
+
 * Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1
 - fixed opssh and opscp to allow just environment or just host-type.
   (twiest@redhat.com)

+ 96 - 64
bin/openshift_ansible/awsutil.py

@@ -1,113 +1,120 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 
-import subprocess
+"""This module comprises Aws specific utility functions."""
+
 import os
-import json
 import re
+from openshift_ansible import multi_ec2
 
 class ArgumentError(Exception):
+    """This class is raised when improper arguments are passed."""
+
     def __init__(self, message):
+        """Initialize an ArgumentError.
+
+        Keyword arguments:
+        message -- the exact error message being raised
+        """
+        super(ArgumentError, self).__init__()
         self.message = message
 
 class AwsUtil(object):
-    def __init__(self, inventory_path=None, host_type_aliases={}):
-        self.host_type_aliases = host_type_aliases
-        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+    """This class contains the AWS utility functions."""
 
-        if inventory_path is None:
-            inventory_path = os.path.realpath(os.path.join(self.file_path, \
-                                              '..', '..', 'inventory', \
-                                              'multi_ec2.py'))
+    def __init__(self, host_type_aliases=None):
+        """Initialize the AWS utility class.
 
-        if not os.path.isfile(inventory_path):
-            raise Exception("Inventory file not found [%s]" % inventory_path)
+        Keyword arguments:
+        host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
+        """
+
+        host_type_aliases = host_type_aliases or {}
+
+        self.host_type_aliases = host_type_aliases
+        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
-        self.inventory_path = inventory_path
         self.setup_host_type_alias_lookup()
 
     def setup_host_type_alias_lookup(self):
+        """Sets up the alias to host-type lookup table."""
         self.alias_lookup = {}
         for key, values in self.host_type_aliases.iteritems():
             for value in values:
                 self.alias_lookup[value] = key
 
+    @staticmethod
+    def get_inventory(args=None):
+        """Calls the inventory script and returns a dictionary containing the inventory."
 
-
-    def get_inventory(self,args=[]):
-        cmd = [self.inventory_path]
-
-        if args:
-            cmd.extend(args)
-
-        env = os.environ
-
-        p = subprocess.Popen(cmd, stderr=subprocess.PIPE,
-                         stdout=subprocess.PIPE, env=env)
-
-        out,err = p.communicate()
-
-        if p.returncode != 0:
-            raise RuntimeError(err)
-
-        return json.loads(out.strip())
+        Keyword arguments:
+        args -- optional arguments to pass to the inventory script
+        """
+        mec2 = multi_ec2.MultiEc2(args)
+        mec2.run()
+        return mec2.result
 
     def get_environments(self):
+        """Searches for env tags in the inventory and returns all of the envs found."""
         pattern = re.compile(r'^tag_environment_(.*)')
 
         envs = []
         inv = self.get_inventory()
         for key in inv.keys():
-            m = pattern.match(key)
-            if m:
-                envs.append(m.group(1))
+            matched = pattern.match(key)
+            if matched:
+                envs.append(matched.group(1))
 
         envs.sort()
         return envs
 
     def get_host_types(self):
+        """Searches for host-type tags in the inventory and returns all host-types found."""
         pattern = re.compile(r'^tag_host-type_(.*)')
 
         host_types = []
         inv = self.get_inventory()
         for key in inv.keys():
-            m = pattern.match(key)
-            if m:
-                host_types.append(m.group(1))
+            matched = pattern.match(key)
+            if matched:
+                host_types.append(matched.group(1))
 
         host_types.sort()
         return host_types
 
     def get_security_groups(self):
+        """Searches for security_groups in the inventory and returns all SGs found."""
         pattern = re.compile(r'^security_group_(.*)')
 
         groups = []
         inv = self.get_inventory()
         for key in inv.keys():
-            m = pattern.match(key)
-            if m:
-                groups.append(m.group(1))
+            matched = pattern.match(key)
+            if matched:
+                groups.append(matched.group(1))
 
         groups.sort()
         return groups
 
-    def build_host_dict_by_env(self, args=[]):
+    def build_host_dict_by_env(self, args=None):
+        """Searches the inventory for hosts in an env and returns their hostvars."""
+        args = args or []
         inv = self.get_inventory(args)
 
         inst_by_env = {}
-        for dns, host in inv['_meta']['hostvars'].items():
+        for _, host in inv['_meta']['hostvars'].items():
             # If you don't have an environment tag, we're going to ignore you
             if 'ec2_tag_environment' not in host:
                 continue
 
             if host['ec2_tag_environment'] not in inst_by_env:
                 inst_by_env[host['ec2_tag_environment']] = {}
-            host_id = "%s:%s" % (host['ec2_tag_Name'],host['ec2_id'])
+            host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
             inst_by_env[host['ec2_tag_environment']][host_id] = host
 
         return inst_by_env
 
-    # Display host_types
     def print_host_types(self):
+        """Gets the list of host types and aliases and outputs them in columns."""
         host_types = self.get_host_types()
         ht_format_str = "%35s"
         alias_format_str = "%-20s"
@@ -117,22 +124,31 @@ class AwsUtil(object):
         print combined_format_str % ('Host Types', 'Aliases')
         print combined_format_str % ('----------', '-------')
 
-        for ht in host_types:
+        for host_type in host_types:
             aliases = []
-            if ht in self.host_type_aliases:
-                aliases = self.host_type_aliases[ht]
-                print combined_format_str % (ht, ", ".join(aliases))
+            if host_type in self.host_type_aliases:
+                aliases = self.host_type_aliases[host_type]
+                print combined_format_str % (host_type, ", ".join(aliases))
             else:
-                print  ht_format_str % ht
+                print  ht_format_str % host_type
         print
 
-    # Convert host-type aliases to real a host-type
     def resolve_host_type(self, host_type):
+        """Converts a host-type alias into a host-type.
+
+        Keyword arguments:
+        host_type -- The alias or host_type to look up.
+
+        Example (depends on aliases defined in config file):
+            host_type = ex-node
+            returns: openshift-node
+        """
         if self.alias_lookup.has_key(host_type):
             return self.alias_lookup[host_type]
         return host_type
 
-    def gen_env_tag(self, env):
+    @staticmethod
+    def gen_env_tag(env):
         """Generate the environment tag
         """
         return "tag_environment_%s" % env
@@ -149,28 +165,44 @@ class AwsUtil(object):
         host_type = self.resolve_host_type(host_type)
         return "tag_env-host-type_%s-%s" % (env, host_type)
 
-    def get_host_list(self, host_type=None, env=None):
+    def get_host_list(self, host_type=None, envs=None):
         """Get the list of hosts from the inventory using host-type and environment
         """
+        envs = envs or []
         inv = self.get_inventory()
 
-        if host_type is not None and \
-           env is not None:
-            # Both host type and environment were specified
-            env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
-            return inv[env_host_type_tag]
+        # We prefer to deal with a list of environments
+        if issubclass(type(envs), basestring):
+            if envs == 'all':
+                envs = self.get_environments()
+            else:
+                envs = [envs]
 
-        if host_type is None and \
-           env is not None:
+        if host_type and envs:
+            # Both host type and environment were specified
+            retval = []
+            for env in envs:
+                env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
+                if env_host_type_tag in inv.keys():
+                    retval += inv[env_host_type_tag]
+            return set(retval)
+
+        if envs and not host_type:
             # Just environment was specified
-            host_type_tag = self.gen_env_tag(env)
-            return inv[host_type_tag]
-
-        if host_type is not None and \
-           env is None:
+            retval = []
+            for env in envs:
+                env_tag = AwsUtil.gen_env_tag(env)
+                if env_tag in inv.keys():
+                    retval += inv[env_tag]
+            return set(retval)
+
+        if host_type and not envs:
             # Just host-type was specified
+            retval = []
             host_type_tag = self.gen_host_type_tag(host_type)
-            return inv[host_type_tag]
+            if host_type_tag in inv.keys():
+                retval = inv[host_type_tag]
+            return set(retval)
 
         # We should never reach here!
         raise ArgumentError("Invalid combination of parameters")

+ 1 - 0
bin/openshift_ansible/multi_ec2.py

@@ -0,0 +1 @@
+../../inventory/multi_ec2.py

+ 9 - 13
bin/oscp

@@ -11,11 +11,9 @@ import ConfigParser
 from openshift_ansible import awsutil
 
 CONFIG_MAIN_SECTION = 'main'
-CONFIG_INVENTORY_OPTION = 'inventory'
 
 class Oscp(object):
     def __init__(self):
-        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
         # Default the config path to /etc
@@ -29,13 +27,13 @@ class Oscp(object):
         # parse host and user
         self.process_host()
 
-        self.aws = awsutil.AwsUtil(self.inventory)
+        self.aws = awsutil.AwsUtil()
 
         # get a dict of host inventory
-        if self.args.list:
-            self.get_hosts()
-        else:
+        if self.args.refresh_cache:
             self.get_hosts(True)
+        else:
+            self.get_hosts()
 
         if (self.args.src == '' or self.args.dest == '') and not self.args.list:
             self.parser.print_help()
@@ -56,10 +54,6 @@ class Oscp(object):
             config = ConfigParser.ConfigParser()
             config.read(self.config_path)
 
-            if config.has_section(CONFIG_MAIN_SECTION) and \
-               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
-                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
-
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser.add_argument('-e', '--env',
@@ -68,6 +62,8 @@ class Oscp(object):
                           action="store_true", help="debug mode")
         parser.add_argument('-v', '--verbose', default=False,
                           action="store_true", help="Verbose?")
+        parser.add_argument('--refresh-cache', default=False,
+                          action="store_true", help="Force a refresh on the host cache.")
         parser.add_argument('--list', default=False,
                           action="store_true", help="list out hosts")
         parser.add_argument('-r', '--recurse', action='store_true', default=False,
@@ -119,14 +115,14 @@ class Oscp(object):
         else:
             self.env = None
 
-    def get_hosts(self, cache_only=False):
+    def get_hosts(self, refresh_cache=False):
         '''Query our host inventory and return a dict where the format
            equals:
 
            dict['environment'] = [{'servername' : {}}, ]
         '''
-        if cache_only:
-            self.host_inventory = self.aws.build_host_dict_by_env(['--cache-only'])
+        if refresh_cache:
+            self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
         else:
             self.host_inventory = self.aws.build_host_dict_by_env()
 

+ 9 - 14
bin/ossh

@@ -11,11 +11,9 @@ import ConfigParser
 from openshift_ansible import awsutil
 
 CONFIG_MAIN_SECTION = 'main'
-CONFIG_INVENTORY_OPTION = 'inventory'
 
 class Ossh(object):
     def __init__(self):
-        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
         # Default the config path to /etc
@@ -26,13 +24,12 @@ class Ossh(object):
         self.parse_cli_args()
         self.parse_config_file()
 
-        self.aws = awsutil.AwsUtil(self.inventory)
+        self.aws = awsutil.AwsUtil()
 
-        # get a dict of host inventory
-        if self.args.list:
-            self.get_hosts()
-        else:
+        if self.args.refresh_cache:
             self.get_hosts(True)
+        else:
+            self.get_hosts()
 
         # parse host and user
         self.process_host()
@@ -55,10 +52,6 @@ class Ossh(object):
             config = ConfigParser.ConfigParser()
             config.read(self.config_path)
 
-            if config.has_section(CONFIG_MAIN_SECTION) and \
-               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
-                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
-
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser.add_argument('-e', '--env', action="store",
@@ -67,6 +60,8 @@ class Ossh(object):
                           action="store_true", help="debug mode")
         parser.add_argument('-v', '--verbose', default=False,
                           action="store_true", help="Verbose?")
+        parser.add_argument('--refresh-cache', default=False,
+                          action="store_true", help="Force a refresh on the host cache.")
         parser.add_argument('--list', default=False,
                           action="store_true", help="list out hosts")
         parser.add_argument('-c', '--command', action='store',
@@ -109,14 +104,14 @@ class Ossh(object):
             if self.args.login_name:
                 self.user = self.args.login_name
 
-    def get_hosts(self, cache_only=False):
+    def get_hosts(self, refresh_cache=False):
         '''Query our host inventory and return a dict where the format
            equals:
 
            dict['servername'] = dns_name
         '''
-        if cache_only:
-            self.host_inventory = self.aws.build_host_dict_by_env(['--cache-only'])
+        if refresh_cache:
+            self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
         else:
             self.host_inventory = self.aws.build_host_dict_by_env()
 

+ 4 - 2
inventory/byo/hosts

@@ -20,7 +20,8 @@ deployment_type=enterprise
 openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
 
 # Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
 
 # Origin copr repo
 #openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
@@ -31,4 +32,5 @@ ose3-master-ansible.test.example.com
 
 # host group for nodes
 [nodes]
-ose3-node[1:2]-ansible.test.example.com
+ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 143 - 42
inventory/multi_ec2.py

@@ -11,9 +11,13 @@ import yaml
 import os
 import subprocess
 import json
-
+import errno
+import fcntl
+import tempfile
+import copy
 
 CONFIG_FILE_NAME = 'multi_ec2.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
 
 class MultiEc2(object):
     '''
@@ -22,12 +26,17 @@ class MultiEc2(object):
             Stores a json hash of resources in result.
     '''
 
-    def __init__(self):
-        self.args = None
+    def __init__(self, args=None):
+        # Allow args to be passed when called as a library
+        if not args:
+            self.args = {}
+        else:
+            self.args = args
+
+        self.cache_path = DEFAULT_CACHE_PATH
         self.config = None
         self.all_ec2_results = {}
         self.result = {}
-        self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
         same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
@@ -41,17 +50,26 @@ class MultiEc2(object):
         else:
             self.config_file = None # expect env vars
 
-        self.parse_cli_args()
 
+    def run(self):
+        '''This method checks to see if the local
+           cache is valid for the inventory.
+
+           if the cache is valid; return cache
+           else the credentials are loaded from multi_ec2.yaml or from the env
+           and we attempt to get the inventory from the provider specified.
+        '''
         # load yaml
         if self.config_file and os.path.isfile(self.config_file):
             self.config = self.load_yaml_config()
         elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
              os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+            # Build a default config
             self.config = {}
             self.config['accounts'] = [
                 {
                     'name': 'default',
+                    'cache_location': DEFAULT_CACHE_PATH,
                     'provider': 'aws/hosts/ec2.py',
                     'env_vars': {
                         'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
@@ -64,11 +82,15 @@ class MultiEc2(object):
         else:
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
 
-        if self.args.refresh_cache:
+        # Set the default cache path but if its defined we'll assign it.
+        if self.config.has_key('cache_location'):
+            self.cache_path = self.config['cache_location']
+
+        if self.args.get('refresh_cache', None):
             self.get_inventory()
             self.write_to_cache()
         # if its a host query, fetch and do not cache
-        elif self.args.host:
+        elif self.args.get('host', None):
             self.get_inventory()
         elif not self.is_cache_valid():
             # go fetch the inventories and cache them if cache is expired
@@ -109,9 +131,9 @@ class MultiEc2(object):
                         "and that it is executable. (%s)" % provider)
 
         cmds = [provider]
-        if self.args.host:
+        if self.args.get('host', None):
             cmds.append("--host")
-            cmds.append(self.args.host)
+            cmds.append(self.args.get('host', None))
         else:
             cmds.append('--list')
 
@@ -119,6 +141,54 @@ class MultiEc2(object):
 
         return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
                                 stdout=subprocess.PIPE, env=env)
+
+    @staticmethod
+    def generate_config(config_data):
+        """Generate the ec2.ini file in as a secure temp file.
+           Once generated, pass it to the ec2.py as an environment variable.
+        """
+        fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
+        for section, values in config_data.items():
+            os.write(fildes, "[%s]\n" % section)
+            for option, value  in values.items():
+                os.write(fildes, "%s = %s\n" % (option, value))
+        os.close(fildes)
+        return tmp_file_path
+
+    def run_provider(self):
+        '''Setup the provider call with proper variables
+           and call self.get_provider_tags.
+        '''
+        try:
+            all_results = []
+            tmp_file_paths = []
+            processes = {}
+            for account in self.config['accounts']:
+                env = account['env_vars']
+                if account.has_key('provider_config'):
+                    tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
+                    env['EC2_INI_PATH'] = tmp_file_paths[-1]
+                name = account['name']
+                provider = account['provider']
+                processes[name] = self.get_provider_tags(provider, env)
+
+            # for each process collect stdout when its available
+            for name, process in processes.items():
+                out, err = process.communicate()
+                all_results.append({
+                    "name": name,
+                    "out": out.strip(),
+                    "err": err.strip(),
+                    "code": process.returncode
+                })
+
+        finally:
+            # Clean up the mkstemp file
+            for tmp_file in tmp_file_paths:
+                os.unlink(tmp_file)
+
+        return all_results
+
     def get_inventory(self):
         """Create the subprocess to fetch tags from a provider.
         Host query:
@@ -129,46 +199,61 @@ class MultiEc2(object):
         Query all of the different accounts for their tags.  Once completed
         store all of their results into one merged updated hash.
         """
-        processes = {}
-        for account in self.config['accounts']:
-            env = account['env_vars']
-            name = account['name']
-            provider = account['provider']
-            processes[name] = self.get_provider_tags(provider, env)
-
-        # for each process collect stdout when its available
-        all_results = []
-        for name, process in processes.items():
-            out, err = process.communicate()
-            all_results.append({
-                "name": name,
-                "out": out.strip(),
-                "err": err.strip(),
-                "code": process.returncode
-            })
+        provider_results = self.run_provider()
 
         # process --host results
-        if not self.args.host:
+        # For any 0 result, return it
+        if self.args.get('host', None):
+            count = 0
+            for results in provider_results:
+                if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
+                    self.result = json.loads(results['out'])
+                    count += 1
+                if count > 1:
+                    raise RuntimeError("Found > 1 results for --host %s. \
+                                       This is an invalid state." % self.args.get('host', None))
+        # process --list results
+        else:
             # For any non-zero, raise an error on it
-            for result in all_results:
+            for result in provider_results:
                 if result['code'] != 0:
                     raise RuntimeError(result['err'])
                 else:
                     self.all_ec2_results[result['name']] = json.loads(result['out'])
+
+            # Check if user wants extra vars in yaml by
+            # having hostvars and all_group defined
+            for acc_config in self.config['accounts']:
+                self.apply_account_config(acc_config)
+
+            # Build results by merging all dictionaries
             values = self.all_ec2_results.values()
             values.insert(0, self.result)
             for result in  values:
                 MultiEc2.merge_destructively(self.result, result)
-        else:
-            # For any 0 result, return it
-            count = 0
-            for results in all_results:
-                if results['code'] == 0 and results['err'] == '' and results['out'] != '{}':
-                    self.result = json.loads(out)
-                    count += 1
-                if count > 1:
-                    raise RuntimeError("Found > 1 results for --host %s. \
-                                       This is an invalid state." % self.args.host)
+
+    def apply_account_config(self, acc_config):
+        ''' Apply account config settings
+        '''
+        if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'):
+            return
+
+        results = self.all_ec2_results[acc_config['name']]
+       # Update each hostvar with the newly desired key: value
+        for host_property, value in acc_config['hostvars'].items():
+            # Verify the account results look sane
+            # by checking for these keys ('_meta' and 'hostvars' exist)
+            if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
+                for data in results['_meta']['hostvars'].values():
+                    data[str(host_property)] = str(value)
+
+            # Add this group
+            results["%s_%s" % (host_property, value)] = \
+              copy.copy(results[acc_config['all_group']])
+
+        # store the results back into all_ec2_results
+        self.all_ec2_results[acc_config['name']] = results
+
     @staticmethod
     def merge_destructively(input_a, input_b):
         "merges b into input_a"
@@ -182,7 +267,7 @@ class MultiEc2(object):
                 elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
                     for result in input_b[key]:
                         if result not in input_a[key]:
-                            input_a[key].input_append(result)
+                            input_a[key].append(result)
                 # a is a list and not b
                 elif isinstance(input_a[key], list):
                     if input_b[key] not in input_a[key]:
@@ -217,14 +302,27 @@ class MultiEc2(object):
                             help='List instances (default: True)')
         parser.add_argument('--host', action='store', default=False,
                             help='Get all the variables about a specific instance')
-        self.args = parser.parse_args()
+        self.args = parser.parse_args().__dict__
 
     def write_to_cache(self):
         ''' Writes data in JSON format to a file '''
 
+        # if it does not exist, try and create it.
+        if not os.path.isfile(self.cache_path):
+            path = os.path.dirname(self.cache_path)
+            try:
+                os.makedirs(path)
+            except OSError as exc:
+                if exc.errno != errno.EEXIST or not os.path.isdir(path):
+                    raise
+
         json_data = MultiEc2.json_format_dict(self.result, True)
         with open(self.cache_path, 'w') as cache:
-            cache.write(json_data)
+            try:
+                fcntl.flock(cache, fcntl.LOCK_EX)
+                cache.write(json_data)
+            finally:
+                fcntl.flock(cache, fcntl.LOCK_UN)
 
     def get_inventory_from_cache(self):
         ''' Reads the inventory from the cache file and returns it as a JSON
@@ -254,4 +352,7 @@ class MultiEc2(object):
 
 
 if __name__ == "__main__":
-    print MultiEc2().result_str()
+    MEC2 = MultiEc2()
+    MEC2.parse_cli_args()
+    MEC2.run()
+    print MEC2.result_str()

+ 18 - 1
inventory/multi_ec2.yaml.example

@@ -1,15 +1,32 @@
 # multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
+
 accounts:
   - name: aws1
     provider: aws/hosts/ec2.py
+    provider_config:
+      ec2:
+        regions: all
+        regions_exclude:  us-gov-west-1,cn-north-1
+        destination_variable: public_dns_name
+        route53: False
+        cache_path: ~/.ansible/tmp
+        cache_max_age: 300
+        vpc_destination_variable: ip_address
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+    all_group: ec2
+    hostvars:
+      cloud: aws
+      account: aws1
 
-  - name: aws2
+- name: aws2
     provider: aws/hosts/ec2.py
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+      EC2_INI_PATH: /etc/ansible/ec2.ini
 
 cache_max_age: 60

+ 25 - 4
inventory/openshift-ansible-inventory.spec

@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Inventories
 Name:          openshift-ansible-inventory
-Version:       0.0.2
+Version:       0.0.7
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -25,18 +25,39 @@ mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
 
 cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
 cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
-cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
-cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce
+cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws
+cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 
 %files
 %config(noreplace) /etc/ansible/*
 %dir /usr/share/ansible/inventory
 /usr/share/ansible/inventory/multi_ec2.py*
 /usr/share/ansible/inventory/aws/ec2.py*
-%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
+* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1
+- Making multi_ec2 into a library (kwoodson@redhat.com)
+
+* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
+- Added support for grouping and a bug fix. (kwoodson@redhat.com)
+
+* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
+- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're
+  not dictating what the ec2.ini file should look like. (twiest@redhat.com)
+- Added capability to pass in ec2.ini file. (kwoodson@redhat.com)
+
+* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
+- Fixed a bug due to renaming of variables. (kwoodson@redhat.com)
+
+* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
+- fixed build problems with openshift-ansible-inventory.spec
+  (twiest@redhat.com)
+- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com)
+- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com)
+- Adding refresh-cache option and cleanup for pylint. Also updated for
+  aws/hosts/ being added. (kwoodson@redhat.com)
+
 * Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
 - added the ability to have a config file in /etc/openshift_ansible to
   multi_ec2.py. (twiest@redhat.com)

+ 1 - 1
playbooks/aws/ansible-tower/launch.yml

@@ -6,7 +6,7 @@
 
   vars:
     inst_region: us-east-1
-    rhel7_ami: ami-906240f8
+    rhel7_ami: ami-78756d10
     user_data_file: user_data.txt
 
   vars_files:

+ 1 - 1
playbooks/aws/openshift-cluster/vars.online.int.yml

@@ -1,5 +1,5 @@
 ---
-ec2_image: ami-906240f8
+ec2_image: ami-78756d10
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra

+ 1 - 1
playbooks/aws/openshift-cluster/vars.online.prod.yml

@@ -1,5 +1,5 @@
 ---
-ec2_image: ami-906240f8
+ec2_image: ami-78756d10
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra

+ 1 - 1
playbooks/aws/openshift-cluster/vars.online.stage.yml

@@ -1,5 +1,5 @@
 ---
-ec2_image: ami-906240f8
+ec2_image: ami-78756d10
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra

+ 2 - 0
playbooks/byo/config.yml

@@ -1,6 +1,8 @@
 ---
 - name: Run the openshift-master config playbook
   include: openshift-master/config.yml
+  when: groups.masters is defined and groups.masters
 
 - name: Run the openshift-node config playbook
   include: openshift-node/config.yml
+  when: groups.nodes is defined and groups.nodes and groups.masters is defined and groups.masters

+ 2 - 3
playbooks/common/openshift-node/config.yml

@@ -15,6 +15,7 @@
         local_facts:
           hostname: "{{ openshift_hostname | default(None) }}"
           public_hostname: "{{ openshift_public_hostname | default(None) }}"
+          deployment_type: "{{ openshift_deployment_type }}"
       - role: node
         local_facts:
           external_id: "{{ openshift_node_external_id | default(None) }}"
@@ -23,7 +24,6 @@
           pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
           labels: "{{ openshift_node_labels | default(None) }}"
           annotations: "{{ openshift_node_annotations | default(None) }}"
-          deployment_type: "{{ openshift_deployment_type }}"
 
 
 - name: Create temp directory for syncing certs
@@ -68,7 +68,6 @@
     fetch:
       src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
       dest: "{{ sync_tmpdir }}/"
-      flat: yes
       fail_on_missing: yes
       validate_checksum: yes
     with_items: openshift_nodes
@@ -79,7 +78,7 @@
   hosts: oo_nodes_to_config
   gather_facts: no
   vars:
-    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}/{{ groups['oo_first_master'][0] }}/{{ hostvars.localhost.mktemp.stdout }}"
     openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
   pre_tasks:
   - name: Ensure certificate directory exists

+ 1 - 1
rel-eng/packages/openshift-ansible-bin

@@ -1 +1 @@
-0.0.12-1 bin/
+0.0.17-1 bin/

+ 1 - 1
rel-eng/packages/openshift-ansible-inventory

@@ -1 +1 @@
-0.0.2-1 inventory/
+0.0.7-1 inventory/

+ 8 - 0
roles/ansible/tasks/config.yml

@@ -0,0 +1,8 @@
+---
+- name: modify ansible.cfg
+  lineinfile:
+    dest: /etc/ansible/ansible.cfg
+    backrefs: yes
+    regexp: "^#?({{ item.option }})( *)="
+    line: '\1\2= {{ item.value }}'
+  with_items: cfg_options

+ 4 - 0
roles/ansible/tasks/main.yaml

@@ -5,3 +5,7 @@
   yum:
     pkg: ansible
     state: installed
+
+- include: config.yml
+  vars:
+    cfg_options: "{{ ans_config }}"

+ 16 - 18
roles/openshift_ansible_inventory/tasks/main.yml

@@ -24,22 +24,20 @@
     owner: root
     group: libra_ops
 
-- lineinfile:
-    dest: /etc/ansible/ansible.cfg
-    backrefs: yes
-    regexp: '^(hostfile|inventory)( *)='
-    line: '\1\2= /etc/ansible/inventory'
+# This cron uses the above location to call its job
+- name: Cron to keep cache fresh
+  cron:
+    name: 'multi_ec2_inventory'
+    minute: '*/10'
+    job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+  when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
 
-- name: setting ec2.ini destination_format
-  lineinfile:
-    dest: /usr/share/ansible/inventory/aws/ec2.ini
-    regexp: '^destination_format *='
-    line: "destination_format = {{ oo_ec2_destination_format }}"
-  when: oo_ec2_destination_format is defined
-
-- name: setting ec2.ini destination_format_tags
-  lineinfile:
-    dest: /usr/share/ansible/inventory/aws/ec2.ini
-    regexp: '^destination_format_tags *='
-    line: "destination_format_tags = {{ oo_ec2_destination_format_tags }}"
-  when: oo_ec2_destination_format_tags is defined
+- name: Set cache location
+  file:
+    state: directory
+    dest: "{{ oo_inventory_cache_location | dirname }}"
+    owner: root
+    group: libra_ops
+    recurse: yes
+    mode: '2750'
+  when: oo_inventory_cache_location is defined

+ 15 - 0
roles/openshift_ansible_inventory/templates/multi_ec2.yaml.j2

@@ -1,11 +1,26 @@
 # multi ec2 inventory configs
 cache_max_age: {{ oo_inventory_cache_max_age }}
+cache_location: {{ oo_inventory_cache_location | default('~/.ansible/tmp/multi_ec2_inventory.cache') }}
 accounts:
 {% for account in oo_inventory_accounts %}
   - name: {{ account.name }}
     provider: {{ account.provider }}
+    provider_config:
+{%  for section, items in account.provider_config.items() %}
+      {{ section }}:
+{%    for property, value in items.items() %}
+        {{ property }}: {{ value }}
+{%    endfor %}
+{% endfor %}
     env_vars:
       AWS_ACCESS_KEY_ID: {{ account.env_vars.AWS_ACCESS_KEY_ID }}
       AWS_SECRET_ACCESS_KEY: {{ account.env_vars.AWS_SECRET_ACCESS_KEY }}
+{% if account.all_group is defined and account.hostvars is defined%}
+    all_group: {{ account.all_group }}
+    hostvars:
+{%    for property, value in account.hostvars.items() %}
+      {{ property }}: {{ value }}
+{%    endfor %}
+{% endif %}
 
 {% endfor %}

File diff suppressed because it is too large
+ 626 - 340
roles/openshift_facts/library/openshift_facts.py


+ 6 - 0
roles/openshift_facts/tasks/main.yml

@@ -1,3 +1,9 @@
 ---
+- name: Verify Ansible version is greater than 1.8.0 and not 1.9.0
+  assert:
+    that:
+    - ansible_version | version_compare('1.8.0', 'ge')
+    - ansible_version | version_compare('1.9.0', 'ne')
+
 - name: Gather OpenShift facts
   openshift_facts:

+ 4 - 1
roles/openshift_master/defaults/main.yml

@@ -2,12 +2,15 @@
 openshift_node_ips: []
 
 # TODO: update setting these values based on the facts
-# TODO: update for console port change
 os_firewall_allow:
 - service: etcd embedded
   port: 4001/tcp
 - service: OpenShift api https
   port: 8443/tcp
+- service: OpenShift dns tcp
+  port: 53/tcp
+- service: OpenShift dns udp
+  port: 53/udp
 os_firewall_deny:
 - service: OpenShift api http
   port: 8080/tcp