Browse Source

Merge remote-tracking branch 'upstream/master' into upgrade

Devan Goodwin 9 years ago
parent
commit
153c57d6bd
33 changed files with 582 additions and 310 deletions
  1. 1 1
      .tito/packages/openshift-ansible
  2. 1 1
      bin/README_SHELL_COMPLETION
  3. 1 1
      bin/openshift_ansible.conf.example
  4. 7 4
      bin/openshift_ansible/awsutil.py
  5. 10 10
      bin/ossh_bash_completion
  6. 5 5
      bin/ossh_zsh_completion
  7. 2 2
      bin/zsh_functions/_ossh
  8. 66 1
      filter_plugins/oo_filters.py
  9. 5 0
      inventory/byo/hosts.example
  10. 21 11
      inventory/gce/hosts/gce.py
  11. 0 32
      inventory/multi_ec2.yaml.example
  12. 92 52
      inventory/multi_ec2.py
  13. 51 0
      inventory/multi_inventory.yaml.example
  14. 50 6
      openshift-ansible.spec
  15. 4 4
      playbooks/aws/openshift-cluster/launch.yml
  16. 1 64
      playbooks/common/openshift-cluster/config.yml
  17. 64 0
      playbooks/common/openshift-cluster/evaluate_groups.yml
  18. 7 0
      playbooks/common/openshift-cluster/scaleup.yml
  19. 0 0
      playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
  20. 0 0
      playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
  21. 0 0
      playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
  22. 9 0
      playbooks/common/openshift-master/config.yml
  23. 3 3
      playbooks/gce/openshift-cluster/launch.yml
  24. 4 4
      playbooks/libvirt/openshift-cluster/launch.yml
  25. 6 0
      playbooks/libvirt/openshift-cluster/templates/user-data
  26. 10 6
      roles/openshift_ansible_inventory/tasks/main.yml
  27. 11 3
      roles/openshift_facts/library/openshift_facts.py
  28. 15 1
      roles/openshift_master/templates/master.yaml.v1.j2
  29. 0 3
      roles/openshift_master_certificates/tasks/main.yml
  30. 21 0
      roles/os_zabbix/vars/template_os_linux.yml
  31. 1 1
      test/units/README.md
  32. 114 0
      test/units/multi_inventory_test.py
  33. 0 95
      test/units/mutli_ec2_test.py

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.0.6-1 ./
+3.0.7-1 ./

+ 1 - 1
bin/README_SHELL_COMPLETION

@@ -14,7 +14,7 @@ will populate the cache file and the completions should
 become available.
 become available.
 
 
 This script will look at the cached version of your
 This script will look at the cached version of your
-multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache.
+multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
 It will then parse a few {host}.{env} out of the json
 It will then parse a few {host}.{env} out of the json
 and return them to be completable.
 and return them to be completable.
 
 

+ 1 - 1
bin/openshift_ansible.conf.example

@@ -1,5 +1,5 @@
 #[main]
 #[main]
-#inventory = /usr/share/ansible/inventory/multi_ec2.py
+#inventory = /usr/share/ansible/inventory/multi_inventory.py
 
 
 #[host_type_aliases]
 #[host_type_aliases]
 #host-type-one = aliasa,aliasb
 #host-type-one = aliasa,aliasb

+ 7 - 4
bin/openshift_ansible/awsutil.py

@@ -4,7 +4,10 @@
 
 
 import os
 import os
 import re
 import re
-from openshift_ansible import multi_ec2
+
+# Buildbot does not have multi_inventory installed
+#pylint: disable=no-name-in-module
+from openshift_ansible import multi_inventory
 
 
 class ArgumentError(Exception):
 class ArgumentError(Exception):
     """This class is raised when improper arguments are passed."""
     """This class is raised when improper arguments are passed."""
@@ -49,9 +52,9 @@ class AwsUtil(object):
         Keyword arguments:
         Keyword arguments:
         args -- optional arguments to pass to the inventory script
         args -- optional arguments to pass to the inventory script
         """
         """
-        mec2 = multi_ec2.MultiEc2(args)
-        mec2.run()
-        return mec2.result
+        minv = multi_inventory.MultiInventory(args)
+        minv.run()
+        return minv.result
 
 
     def get_environments(self):
     def get_environments(self):
         """Searches for env tags in the inventory and returns all of the envs found."""
         """Searches for env tags in the inventory and returns all of the envs found."""

+ 10 - 10
bin/ossh_bash_completion

@@ -1,12 +1,12 @@
 __ossh_known_hosts(){
 __ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
     if python -c 'import openshift_ansible' &>/dev/null; then
-      /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+      /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
 
 
-    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+    elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
 
 
-    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+    elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
 
 
     fi
     fi
 }
 }
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
 
 
 __opssh_known_hosts(){
 __opssh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
     if python -c 'import openshift_ansible' &>/dev/null; then
-      /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+      /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
 
 
-    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+    elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
 
 
-    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+    elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
 
 
     fi
     fi
 }
 }

+ 5 - 5
bin/ossh_zsh_completion

@@ -2,13 +2,13 @@
 
 
 _ossh_known_hosts(){
 _ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
     if python -c 'import openshift_ansible' &>/dev/null; then
-      print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+      print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
 
 
-    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+    elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
 
 
-    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+    elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
 
 
     fi
     fi
 
 

+ 2 - 2
bin/zsh_functions/_ossh

@@ -1,8 +1,8 @@
 #compdef ossh oscp
 #compdef ossh oscp
 
 
 _ossh_known_hosts(){
 _ossh_known_hosts(){
-  if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+  if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
   fi
   fi
 }
 }
 
 

+ 66 - 1
filter_plugins/oo_filters.py

@@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible
 
 
 from ansible import errors
 from ansible import errors
 from operator import itemgetter
 from operator import itemgetter
+import OpenSSL.crypto
+import os.path
 import pdb
 import pdb
 import re
 import re
 import json
 import json
@@ -327,6 +329,68 @@ class FilterModule(object):
 
 
         return revamped_outputs
         return revamped_outputs
 
 
+    @staticmethod
+    # pylint: disable=too-many-branches
+    def oo_parse_certificate_names(certificates, data_dir, internal_hostnames):
+        ''' Parses names from list of certificate hashes.
+
+            Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt",
+                                  "keyfile": "/etc/origin/master/custom1.key" },
+                                { "certfile": "custom2.crt",
+                                  "keyfile": "custom2.key" }]
+
+                returns [{ "certfile": "/etc/origin/master/custom1.crt",
+                           "keyfile": "/etc/origin/master/custom1.key",
+                           "names": [ "public-master-host.com",
+                                      "other-master-host.com" ] },
+                         { "certfile": "/etc/origin/master/custom2.crt",
+                           "keyfile": "/etc/origin/master/custom2.key",
+                           "names": [ "some-hostname.com" ] }]
+        '''
+        if not issubclass(type(certificates), list):
+            raise errors.AnsibleFilterError("|failed expects certificates is a list")
+
+        if not issubclass(type(data_dir), unicode):
+            raise errors.AnsibleFilterError("|failed expects data_dir is unicode")
+
+        if not issubclass(type(internal_hostnames), list):
+            raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+        for certificate in certificates:
+            if 'names' in certificate.keys():
+                continue
+            else:
+                certificate['names'] = []
+
+            if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+                # Unable to find cert/key, try to prepend data_dir to paths
+                certificate['certfile'] = os.path.join(data_dir, certificate['certfile'])
+                certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile'])
+                if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+                    # Unable to find cert/key in data_dir
+                    raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+                                                    (certificate['certfile'], certificate['keyfile']))
+
+            try:
+                st_cert = open(certificate['certfile'], 'rt').read()
+                cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+                certificate['names'].append(str(cert.get_subject().commonName.decode()))
+                for i in range(cert.get_extension_count()):
+                    if cert.get_extension(i).get_short_name() == 'subjectAltName':
+                        for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+                            certificate['names'].append(name)
+            except:
+                raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+                                                 "please specify certificate names in host inventory"))
+
+            certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+            certificate['names'] = list(set(certificate['names']))
+            if not certificate['names']:
+                raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+                                                 "detected a collision with internal hostname, please specify " +
+                                                 "certificate names in host inventory"))
+        return certificates
+
     def filters(self):
     def filters(self):
         ''' returns a mapping of filters to methods '''
         ''' returns a mapping of filters to methods '''
         return {
         return {
@@ -342,5 +406,6 @@ class FilterModule(object):
             "oo_combine_dict": self.oo_combine_dict,
             "oo_combine_dict": self.oo_combine_dict,
             "oo_split": self.oo_split,
             "oo_split": self.oo_split,
             "oo_filter_list": self.oo_filter_list,
             "oo_filter_list": self.oo_filter_list,
-            "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
+            "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
+            "oo_parse_certificate_names": self.oo_parse_certificate_names
         }
         }

+ 5 - 0
inventory/byo/hosts.example

@@ -99,6 +99,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # set RPM version for debugging purposes
 # set RPM version for debugging purposes
 #openshift_pkg_version=-3.0.0.0
 #openshift_pkg_version=-3.0.0.0
 
 
+# Configure custom master certificates
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
 # host group for masters
 # host group for masters
 [masters]
 [masters]
 ose3-master[1:3]-ansible.test.example.com
 ose3-master[1:3]-ansible.test.example.com

+ 21 - 11
inventory/gce/hosts/gce.py

@@ -66,12 +66,22 @@ Examples:
   $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
   $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
 
 
   Use the GCE inventory script to print out instance specific information
   Use the GCE inventory script to print out instance specific information
-  $ plugins/inventory/gce.py --host my_instance
+  $ contrib/inventory/gce.py --host my_instance
 
 
 Author: Eric Johnson <erjohnso@google.com>
 Author: Eric Johnson <erjohnso@google.com>
 Version: 0.0.1
 Version: 0.0.1
 '''
 '''
 
 
+__requires__ = ['pycrypto>=2.6']
+try:
+    import pkg_resources
+except ImportError:
+    # Use pkg_resources to find the correct versions of libraries and set
+    # sys.path appropriately when there are multiversion installs.  We don't
+    # fail here as there is code that better expresses the errors where the
+    # library is used.
+    pass
+
 USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
 USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
 USER_AGENT_VERSION="v1"
 USER_AGENT_VERSION="v1"
 
 
@@ -102,9 +112,9 @@ class GceInventory(object):
 
 
         # Just display data for specific host
         # Just display data for specific host
         if self.args.host:
         if self.args.host:
-            print self.json_format_dict(self.node_to_dict(
+            print(self.json_format_dict(self.node_to_dict(
                     self.get_instance(self.args.host)),
                     self.get_instance(self.args.host)),
-                    pretty=self.args.pretty)
+                    pretty=self.args.pretty))
             sys.exit(0)
             sys.exit(0)
 
 
         # Otherwise, assume user wants all instances grouped
         # Otherwise, assume user wants all instances grouped
@@ -120,7 +130,6 @@ class GceInventory(object):
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
 
 
-
         # Create a ConfigParser.
         # Create a ConfigParser.
         # This provides empty defaults to each key, so that environment
         # This provides empty defaults to each key, so that environment
         # variable configuration (as opposed to INI configuration) is able
         # variable configuration (as opposed to INI configuration) is able
@@ -174,7 +183,6 @@ class GceInventory(object):
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
 
 
-        
         # Retrieve and return the GCE driver.
         # Retrieve and return the GCE driver.
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce.connection.user_agent_append(
         gce.connection.user_agent_append(
@@ -213,8 +221,7 @@ class GceInventory(object):
             'gce_image': inst.image,
             'gce_image': inst.image,
             'gce_machine_type': inst.size,
             'gce_machine_type': inst.size,
             'gce_private_ip': inst.private_ips[0],
             'gce_private_ip': inst.private_ips[0],
-            # Hosts don't always have a public IP name
-            #'gce_public_ip': inst.public_ips[0],
+            'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
             'gce_name': inst.name,
             'gce_name': inst.name,
             'gce_description': inst.extra['description'],
             'gce_description': inst.extra['description'],
             'gce_status': inst.extra['status'],
             'gce_status': inst.extra['status'],
@@ -222,15 +229,15 @@ class GceInventory(object):
             'gce_tags': inst.extra['tags'],
             'gce_tags': inst.extra['tags'],
             'gce_metadata': md,
             'gce_metadata': md,
             'gce_network': net,
             'gce_network': net,
-            # Hosts don't always have a public IP name
-            #'ansible_ssh_host': inst.public_ips[0]
+            # Hosts don't have a public name, so we add an IP
+            'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
         }
         }
 
 
     def get_instance(self, instance_name):
     def get_instance(self, instance_name):
         '''Gets details about a specific instance '''
         '''Gets details about a specific instance '''
         try:
         try:
             return self.driver.ex_get_node(instance_name)
             return self.driver.ex_get_node(instance_name)
-        except Exception, e:
+        except Exception as e:
             return None
             return None
 
 
     def group_instances(self):
     def group_instances(self):
@@ -250,7 +257,10 @@ class GceInventory(object):
 
 
             tags = node.extra['tags']
             tags = node.extra['tags']
             for t in tags:
             for t in tags:
-                tag = 'tag_%s' % t
+                if t.startswith('group-'):
+                    tag = t[6:]
+                else:
+                    tag = 'tag_%s' % t
                 if groups.has_key(tag): groups[tag].append(name)
                 if groups.has_key(tag): groups[tag].append(name)
                 else: groups[tag] = [name]
                 else: groups[tag] = [name]
 
 

+ 0 - 32
inventory/multi_ec2.yaml.example

@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
-  - name: aws1
-    provider: aws/hosts/ec2.py
-    provider_config:
-      ec2:
-        regions: all
-        regions_exclude:  us-gov-west-1,cn-north-1
-        destination_variable: public_dns_name
-        route53: False
-        cache_path: ~/.ansible/tmp
-        cache_max_age: 300
-        vpc_destination_variable: ip_address
-    env_vars:
-      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
-      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-    all_group: ec2
-    extra_vars:
-      cloud: aws
-      account: aws1
-
-- name: aws2
-    provider: aws/hosts/ec2.py
-    env_vars:
-      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
-      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-      EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60

+ 92 - 52
inventory/multi_ec2.py

@@ -1,6 +1,6 @@
 #!/usr/bin/env python2
 #!/usr/bin/env python2
 '''
 '''
-    Fetch and combine multiple ec2 account settings into a single
+    Fetch and combine multiple inventory account settings into a single
     json hash.
     json hash.
 '''
 '''
 # vim: expandtab:tabstop=4:shiftwidth=4
 # vim: expandtab:tabstop=4:shiftwidth=4
@@ -15,13 +15,19 @@ import errno
 import fcntl
 import fcntl
 import tempfile
 import tempfile
 import copy
 import copy
+from string import Template
+import shutil
 
 
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
+CONFIG_FILE_NAME = 'multi_inventory.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
 
 
-class MultiEc2(object):
+class MultiInventoryException(Exception):
+    '''Exceptions for MultiInventory class'''
+    pass
+
+class MultiInventory(object):
     '''
     '''
-       MultiEc2 class:
+       MultiInventory class:
             Opens a yaml config file and reads aws credentials.
             Opens a yaml config file and reads aws credentials.
             Stores a json hash of resources in result.
             Stores a json hash of resources in result.
     '''
     '''
@@ -35,7 +41,7 @@ class MultiEc2(object):
 
 
         self.cache_path = DEFAULT_CACHE_PATH
         self.cache_path = DEFAULT_CACHE_PATH
         self.config = None
         self.config = None
-        self.all_ec2_results = {}
+        self.all_inventory_results = {}
         self.result = {}
         self.result = {}
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
 
@@ -56,7 +62,7 @@ class MultiEc2(object):
            cache is valid for the inventory.
            cache is valid for the inventory.
 
 
            if the cache is valid; return cache
            if the cache is valid; return cache
-           else the credentials are loaded from multi_ec2.yaml or from the env
+           else the credentials are loaded from multi_inventory.yaml or from the env
            and we attempt to get the inventory from the provider specified.
            and we attempt to get the inventory from the provider specified.
         '''
         '''
         # load yaml
         # load yaml
@@ -111,6 +117,10 @@ class MultiEc2(object):
         with open(conf_file) as conf:
         with open(conf_file) as conf:
             config = yaml.safe_load(conf)
             config = yaml.safe_load(conf)
 
 
+        # Provide a check for unique account names
+        if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
+            raise MultiInventoryException('Duplicate account names in config file')
+
         return config
         return config
 
 
     def get_provider_tags(self, provider, env=None):
     def get_provider_tags(self, provider, env=None):
@@ -136,23 +146,25 @@ class MultiEc2(object):
         else:
         else:
             cmds.append('--list')
             cmds.append('--list')
 
 
-        cmds.append('--refresh-cache')
+        if 'aws' in provider.lower():
+            cmds.append('--refresh-cache')
 
 
         return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
         return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
                                 stdout=subprocess.PIPE, env=env)
                                 stdout=subprocess.PIPE, env=env)
 
 
     @staticmethod
     @staticmethod
-    def generate_config(config_data):
-        """Generate the ec2.ini file in as a secure temp file.
-           Once generated, pass it to the ec2.py as an environment variable.
+    def generate_config(provider_files):
+        """Generate the provider_files in a temporary directory.
         """
         """
-        fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
-        for section, values in config_data.items():
-            os.write(fildes, "[%s]\n" % section)
-            for option, value  in values.items():
-                os.write(fildes, "%s = %s\n" % (option, value))
-        os.close(fildes)
-        return tmp_file_path
+        prefix = 'multi_inventory.'
+        tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
+        for provider_file in provider_files:
+            filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
+            content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
+            filedes.write(content)
+            filedes.close()
+
+        return tmp_dir_path
 
 
     def run_provider(self):
     def run_provider(self):
         '''Setup the provider call with proper variables
         '''Setup the provider call with proper variables
@@ -160,13 +172,21 @@ class MultiEc2(object):
         '''
         '''
         try:
         try:
             all_results = []
             all_results = []
-            tmp_file_paths = []
+            tmp_dir_paths = []
             processes = {}
             processes = {}
             for account in self.config['accounts']:
             for account in self.config['accounts']:
-                env = account['env_vars']
-                if account.has_key('provider_config'):
-                    tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
-                    env['EC2_INI_PATH'] = tmp_file_paths[-1]
+                tmp_dir = None
+                if account.has_key('provider_files'):
+                    tmp_dir = MultiInventory.generate_config(account['provider_files'])
+                    tmp_dir_paths.append(tmp_dir)
+
+                # Update env vars after creating provider_config_files
+                # so that we can grab the tmp_dir if it exists
+                env = account.get('env_vars', {})
+                if env and tmp_dir:
+                    for key, value in env.items():
+                        env[key] = Template(value).substitute(tmpdir=tmp_dir)
+
                 name = account['name']
                 name = account['name']
                 provider = account['provider']
                 provider = account['provider']
                 processes[name] = self.get_provider_tags(provider, env)
                 processes[name] = self.get_provider_tags(provider, env)
@@ -182,9 +202,9 @@ class MultiEc2(object):
                 })
                 })
 
 
         finally:
         finally:
-            # Clean up the mkstemp file
-            for tmp_file in tmp_file_paths:
-                os.unlink(tmp_file)
+            # Clean up the mkdtemp dirs
+            for tmp_dir in tmp_dir_paths:
+                shutil.rmtree(tmp_dir)
 
 
         return all_results
         return all_results
 
 
@@ -223,7 +243,7 @@ class MultiEc2(object):
                               ]
                               ]
                     raise RuntimeError('\n'.join(err_msg).format(**result))
                     raise RuntimeError('\n'.join(err_msg).format(**result))
                 else:
                 else:
-                    self.all_ec2_results[result['name']] = json.loads(result['out'])
+                    self.all_inventory_results[result['name']] = json.loads(result['out'])
 
 
             # Check if user wants extra vars in yaml by
             # Check if user wants extra vars in yaml by
             # having hostvars and all_group defined
             # having hostvars and all_group defined
@@ -231,29 +251,52 @@ class MultiEc2(object):
                 self.apply_account_config(acc_config)
                 self.apply_account_config(acc_config)
 
 
             # Build results by merging all dictionaries
             # Build results by merging all dictionaries
-            values = self.all_ec2_results.values()
+            values = self.all_inventory_results.values()
             values.insert(0, self.result)
             values.insert(0, self.result)
             for result in  values:
             for result in  values:
-                MultiEc2.merge_destructively(self.result, result)
+                MultiInventory.merge_destructively(self.result, result)
+
+    def add_entry(self, data, keys, item):
+        ''' Add an item to a dictionary with key notation a.b.c
+            d = {'a': {'b': 'c'}}}
+            keys = a.b
+            item = c
+        '''
+        if "." in keys:
+            key, rest = keys.split(".", 1)
+            if key not in data:
+                data[key] = {}
+            self.add_entry(data[key], rest, item)
+        else:
+            data[keys] = item
+
+    def get_entry(self, data, keys):
+        ''' Get an item from a dictionary with key notation a.b.c
+            d = {'a': {'b': 'c'}}}
+            keys = a.b
+            return c
+        '''
+        if keys and "." in keys:
+            key, rest = keys.split(".", 1)
+            return self.get_entry(data[key], rest)
+        else:
+            return data.get(keys, None)
 
 
     def apply_account_config(self, acc_config):
     def apply_account_config(self, acc_config):
         ''' Apply account config settings
         ''' Apply account config settings
         '''
         '''
-        results = self.all_ec2_results[acc_config['name']]
+        results = self.all_inventory_results[acc_config['name']]
+        results['all_hosts'] = results['_meta']['hostvars'].keys()
 
 
         # Update each hostvar with the newly desired key: value from extra_*
         # Update each hostvar with the newly desired key: value from extra_*
-        for _extra in ['extra_groups', 'extra_vars']:
+        for _extra in ['extra_vars', 'extra_groups']:
             for new_var, value in acc_config.get(_extra, {}).items():
             for new_var, value in acc_config.get(_extra, {}).items():
-                # Verify the account results look sane
-                # by checking for these keys ('_meta' and 'hostvars' exist)
-                if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
-                    for data in results['_meta']['hostvars'].values():
-                        data[str(new_var)] = str(value)
+                for data in results['_meta']['hostvars'].values():
+                    self.add_entry(data, new_var, value)
 
 
                 # Add this group
                 # Add this group
-                if _extra == 'extra_groups' and results.has_key(acc_config['all_group']):
-                    results["%s_%s" % (new_var, value)] = \
-                     copy.copy(results[acc_config['all_group']])
+                if _extra == 'extra_groups':
+                    results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
 
 
         # Clone groups goes here
         # Clone groups goes here
         for to_name, from_name in acc_config.get('clone_groups', {}).items():
         for to_name, from_name in acc_config.get('clone_groups', {}).items():
@@ -262,14 +305,11 @@ class MultiEc2(object):
 
 
         # Clone vars goes here
         # Clone vars goes here
         for to_name, from_name in acc_config.get('clone_vars', {}).items():
         for to_name, from_name in acc_config.get('clone_vars', {}).items():
-            # Verify the account results look sane
-            # by checking for these keys ('_meta' and 'hostvars' exist)
-            if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
-                for data in results['_meta']['hostvars'].values():
-                    data[str(to_name)] = data.get(str(from_name), 'nil')
+            for data in results['_meta']['hostvars'].values():
+                self.add_entry(data, to_name, self.get_entry(data, from_name))
 
 
-        # store the results back into all_ec2_results
-        self.all_ec2_results[acc_config['name']] = results
+        # store the results back into all_inventory_results
+        self.all_inventory_results[acc_config['name']] = results
 
 
     @staticmethod
     @staticmethod
     def merge_destructively(input_a, input_b):
     def merge_destructively(input_a, input_b):
@@ -277,7 +317,7 @@ class MultiEc2(object):
         for key in input_b:
         for key in input_b:
             if key in input_a:
             if key in input_a:
                 if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
                 if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
-                    MultiEc2.merge_destructively(input_a[key], input_b[key])
+                    MultiInventory.merge_destructively(input_a[key], input_b[key])
                 elif input_a[key] == input_b[key]:
                 elif input_a[key] == input_b[key]:
                     pass # same leaf value
                     pass # same leaf value
                 # both lists so add each element in b to a if it does ! exist
                 # both lists so add each element in b to a if it does ! exist
@@ -333,7 +373,7 @@ class MultiEc2(object):
                 if exc.errno != errno.EEXIST or not os.path.isdir(path):
                 if exc.errno != errno.EEXIST or not os.path.isdir(path):
                     raise
                     raise
 
 
-        json_data = MultiEc2.json_format_dict(self.result, True)
+        json_data = MultiInventory.json_format_dict(self.result, True)
         with open(self.cache_path, 'w') as cache:
         with open(self.cache_path, 'w') as cache:
             try:
             try:
                 fcntl.flock(cache, fcntl.LOCK_EX)
                 fcntl.flock(cache, fcntl.LOCK_EX)
@@ -369,7 +409,7 @@ class MultiEc2(object):
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
-    MEC2 = MultiEc2()
-    MEC2.parse_cli_args()
-    MEC2.run()
-    print MEC2.result_str()
+    MI2 = MultiInventory()
+    MI2.parse_cli_args()
+    MI2.run()
+    print MI2.result_str()

+ 51 - 0
inventory/multi_inventory.yaml.example

@@ -0,0 +1,51 @@
+# multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_inventory.cache
+
+accounts:
+  - name: aws1
+    provider: aws/ec2.py
+    provider_files:
+    - name: ec2.ini
+      content: |-
+        [ec2]
+        regions = all
+        regions_exclude =  us-gov-west-1,cn-north-1
+        destination_variable = public_dns_name
+        route53 = False
+        cache_path = ~/.ansible/tmp
+        cache_max_age = 300
+        vpc_destination_variable = ip_address
+    env_vars:
+      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
+      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+      EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+    extra_vars:
+      cloud: aws
+      account: aws1
+
+-   name: mygce
+    extra_vars:
+      cloud: gce
+      account: gce1
+    env_vars:
+      GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+    provider: gce/gce.py
+    provider_files:
+    - name: priv_key.pem
+      contents: |-
+        -----BEGIN PRIVATE KEY-----
+        yourprivatekeydatahere
+        -----END PRIVATE KEY-----
+    - name: gce.ini
+      contents: |-
+        [gce]
+        gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
+        gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+        gce_project_id = gce-project
+        zone = us-central1-a
+        network = default
+        gce_machine_type = n1-standard-2
+        gce_machine_image = rhel7
+
+cache_max_age: 600

+ 50 - 6
openshift-ansible.spec

@@ -5,7 +5,7 @@
 }
 }
 
 
 Name:           openshift-ansible
 Name:           openshift-ansible
-Version:        3.0.6
+Version:        3.0.7
 Release:        1%{?dist}
 Release:        1%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
 License:        ASL 2.0
@@ -47,9 +47,9 @@ cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
 cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
 cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
 cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
 cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
 # Fix links
 # Fix links
-rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
 rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
 rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
-ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
 ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
 ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
 
 
 # openshift-ansible-docs install
 # openshift-ansible-docs install
@@ -60,8 +60,8 @@ mkdir -p %{buildroot}/etc/ansible
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
-cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory
-cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
 cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
 cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
 cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
 cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
 
 
@@ -137,7 +137,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
 %files inventory
 %files inventory
 %config(noreplace) /etc/ansible/*
 %config(noreplace) /etc/ansible/*
 %dir %{_datadir}/ansible/inventory
 %dir %{_datadir}/ansible/inventory
-%{_datadir}/ansible/inventory/multi_ec2.py*
+%{_datadir}/ansible/inventory/multi_inventory.py*
 
 
 %package inventory-aws
 %package inventory-aws
 Summary:       Openshift and Atomic Enterprise Ansible Inventories for AWS
 Summary:       Openshift and Atomic Enterprise Ansible Inventories for AWS
@@ -170,6 +170,9 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook
 %package playbooks
 %package playbooks
 Summary:       Openshift and Atomic Enterprise Ansible Playbooks
 Summary:       Openshift and Atomic Enterprise Ansible Playbooks
 Requires:      %{name}
 Requires:      %{name}
+Requires:      %{name}-roles
+Requires:      %{name}-lookup-plugins
+Requires:      %{name}-filter-plugins
 BuildArch:     noarch
 BuildArch:     noarch
 
 
 %description playbooks
 %description playbooks
@@ -185,6 +188,8 @@ BuildArch:     noarch
 %package roles
 %package roles
 Summary:       Openshift and Atomic Enterprise Ansible roles
 Summary:       Openshift and Atomic Enterprise Ansible roles
 Requires:      %{name}
 Requires:      %{name}
+Requires:      %{name}-lookup-plugins
+Requires:      %{name}-filter-plugins
 BuildArch:     noarch
 BuildArch:     noarch
 
 
 %description roles
 %description roles
@@ -249,6 +254,45 @@ Atomic OpenShift Utilities includes
 
 
 
 
 %changelog
 %changelog
+* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1
+- added the %%util in zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct default playbook directory
+  (smunilla@redhat.com)
+- Support for gce (kwoodson@redhat.com)
+- fixed a dumb naming mistake (mwoodson@redhat.com)
+- added disk tps checks to zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com)
+- atomic-openshift-installer: Add default openshift-ansible-playbook
+  (smunilla@redhat.com)
+- ooinstall: Add check for nopwd sudo (smunilla@redhat.com)
+- ooinstall: Update local install check (smunilla@redhat.com)
+- oo-install: Support running on the host to be deployed (smunilla@redhat.com)
+- Moving to Openshift Etcd application (mmahut@redhat.com)
+- Add all the possible servicenames to openshift_all_hostnames for masters
+  (sdodson@redhat.com)
+- Adding openshift.node.etcd items (mmahut@redhat.com)
+- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com)
+- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com)
+- split inventory into subpackages (tdawson@redhat.com)
+- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to
+  warning (mwoodson@redhat.com)
+- Rename install_transactions module to openshift_ansible.
+  (dgoodwin@redhat.com)
+- atomic-openshift-installer: Text improvements (smunilla@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+  (dgoodwin@redhat.com)
+- Disable requiretty for only the openshift user (error@ioerror.us)
+- Don't require tty to run sudo (error@ioerror.us)
+- Attempt to remove the various interfaces left over from an install
+  (bleanhar@redhat.com)
+- Pulling latest gce.py module from ansible (kwoodson@redhat.com)
+- Disable OpenShift features if installing Atomic Enterprise
+  (jdetiber@redhat.com)
+- Use default playbooks if available. (dgoodwin@redhat.com)
+- Add uninstall subcommand. (dgoodwin@redhat.com)
+- Add subcommands to CLI. (dgoodwin@redhat.com)
+- Remove images options in oadm command (nakayamakenjiro@gmail.com)
+
 * Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
 * Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
 - Adding python-boto and python-libcloud to openshift-ansible-inventory
 - Adding python-boto and python-libcloud to openshift-ansible-inventory
   dependency (kwoodson@redhat.com)
   dependency (kwoodson@redhat.com)

+ 4 - 4
playbooks/aws/openshift-cluster/launch.yml

@@ -11,7 +11,7 @@
       msg: Deployment type not supported for aws provider yet
       msg: Deployment type not supported for aws provider yet
     when: deployment_type == 'enterprise'
     when: deployment_type == 'enterprise'
 
 
-  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
   - include: tasks/launch_instances.yml
   - include: tasks/launch_instances.yml
     vars:
     vars:
       instances: "{{ etcd_names }}"
       instances: "{{ etcd_names }}"
@@ -19,7 +19,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
       g_sub_host_type: "default"
 
 
-  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
   - include: tasks/launch_instances.yml
   - include: tasks/launch_instances.yml
     vars:
     vars:
       instances: "{{ master_names }}"
       instances: "{{ master_names }}"
@@ -27,7 +27,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
       g_sub_host_type: "default"
 
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
     vars:
       type: "compute"
       type: "compute"
       count: "{{ num_nodes }}"
       count: "{{ num_nodes }}"
@@ -38,7 +38,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
     vars:
       type: "infra"
       type: "infra"
       count: "{{ num_infra }}"
       count: "{{ num_infra }}"

+ 1 - 64
playbooks/common/openshift-cluster/config.yml

@@ -1,68 +1,5 @@
 ---
 ---
-- name: Populate config host groups
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - fail:
-      msg: This playbook requires g_etcd_group to be set
-    when: g_etcd_group is not defined
-
-  - fail:
-      msg: This playbook requires g_masters_group to be set
-    when: g_masters_group is not defined
-
-  - fail:
-      msg: This playbook requires g_nodes_group to be set
-    when: g_nodes_group is not defined
-
-  - name: Evaluate oo_etcd_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_etcd_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_etcd_group] | default([])
-
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_masters_group] | default([])
-
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_nodes_group] | default([])
-
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_masters_group] | default([])
-    when: g_nodeonmaster is defined and g_nodeonmaster == true
-
-  - name: Evaluate oo_first_etcd
-    add_host:
-      name: "{{ groups[g_etcd_group][0] }}"
-      groups: oo_first_etcd
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
-
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups[g_masters_group][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+- include: evaluate_groups.yml
 
 
 - include: ../openshift-etcd/config.yml
 - include: ../openshift-etcd/config.yml
 
 

+ 64 - 0
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -0,0 +1,64 @@
+---
+- name: Populate config host groups
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - fail:
+      msg: This playbook rquires g_etcd_group to be set
+    when: g_etcd_group is not defined
+
+  - fail:
+      msg: This playbook rquires g_masters_group to be set
+    when: g_masters_group is not defined
+
+  - fail:
+      msg: This playbook rquires g_nodes_group to be set
+    when: g_nodes_group is not defined
+
+  - name: Evaluate oo_etcd_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_etcd_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_etcd_group] | default([])
+
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_masters_group] | default([])
+
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_nodes_group] | default([])
+
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_masters_group] | default([])
+    when: g_nodeonmaster is defined and g_nodeonmaster == true
+
+  - name: Evaluate oo_first_etcd
+    add_host:
+      name: "{{ groups[g_etcd_group][0] }}"
+      groups: oo_first_etcd
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups[g_masters_group][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0

+ 7 - 0
playbooks/common/openshift-cluster/scaleup.yml

@@ -0,0 +1,7 @@
+---
+- include: evaluate_groups.yml
+
+- include: ../openshift-node/config.yml
+  vars:
+    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"

playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml → playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml


playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml → playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml


playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml → playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml


+ 9 - 0
playbooks/common/openshift-master/config.yml

@@ -199,9 +199,18 @@
       validate_checksum: yes
       validate_checksum: yes
     with_items: masters_needing_certs
     with_items: masters_needing_certs
 
 
+- name: Inspect named certificates
+  hosts: oo_first_master
+  tasks:
+  - name: Collect certificate names
+    set_fact:
+      parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
+    when: openshift_master_named_certificates is defined
+
 - name: Configure master instances
 - name: Configure master instances
   hosts: oo_masters_to_config
   hosts: oo_masters_to_config
   vars:
   vars:
+    named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
     embedded_etcd: "{{ openshift.master.embedded_etcd }}"
     embedded_etcd: "{{ openshift.master.embedded_etcd }}"

+ 3 - 3
playbooks/gce/openshift-cluster/launch.yml

@@ -9,7 +9,7 @@
   - fail: msg="Deployment type not supported for gce provider yet"
   - fail: msg="Deployment type not supported for gce provider yet"
     when: deployment_type == 'enterprise'
     when: deployment_type == 'enterprise'
 
 
-  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
   - include: tasks/launch_instances.yml
   - include: tasks/launch_instances.yml
     vars:
     vars:
       instances: "{{ master_names }}"
       instances: "{{ master_names }}"
@@ -17,7 +17,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
       g_sub_host_type: "default"
 
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
     vars:
       type: "compute"
       type: "compute"
       count: "{{ num_nodes }}"
       count: "{{ num_nodes }}"
@@ -28,7 +28,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
     vars:
       type: "infra"
       type: "infra"
       count: "{{ num_infra }}"
       count: "{{ num_infra }}"

+ 4 - 4
playbooks/libvirt/openshift-cluster/launch.yml

@@ -17,7 +17,7 @@
 
 
   - include: tasks/configure_libvirt.yml
   - include: tasks/configure_libvirt.yml
 
 
-  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
   - include: tasks/launch_instances.yml
   - include: tasks/launch_instances.yml
     vars:
     vars:
       instances: "{{ etcd_names }}"
       instances: "{{ etcd_names }}"
@@ -25,7 +25,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
       g_sub_host_type: "default"
 
 
-  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
   - include: tasks/launch_instances.yml
   - include: tasks/launch_instances.yml
     vars:
     vars:
       instances: "{{ master_names }}"
       instances: "{{ master_names }}"
@@ -33,7 +33,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
       g_sub_host_type: "default"
 
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
     vars:
       type: "compute"
       type: "compute"
       count: "{{ num_nodes }}"
       count: "{{ num_nodes }}"
@@ -44,7 +44,7 @@
       type: "{{ k8s_type }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
     vars:
       type: "infra"
       type: "infra"
       count: "{{ num_infra }}"
       count: "{{ num_infra }}"

+ 6 - 0
playbooks/libvirt/openshift-cluster/templates/user-data

@@ -19,5 +19,11 @@ system_info:
 ssh_authorized_keys:
 ssh_authorized_keys:
   - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
   - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
 
 
+write_files:
+  - path: /etc/sudoers.d/00-openshift-no-requiretty
+    permissions: 440
+    content: |
+        Defaults:openshift !requiretty
+
 runcmd:
 runcmd:
   - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart
   - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart

+ 10 - 6
roles/openshift_ansible_inventory/tasks/main.yml

@@ -1,12 +1,16 @@
 ---
 ---
 - yum:
 - yum:
-    name: openshift-ansible-inventory
+    name: "{{ item }}"
     state: present
     state: present
+  with_items:
+  - openshift-ansible-inventory
+  - openshift-ansible-inventory-aws
+  - openshift-ansible-inventory-gce
 
 
 - name:
 - name:
   copy:
   copy:
     content: "{{ oo_inventory_accounts | to_nice_yaml }}"
     content: "{{ oo_inventory_accounts | to_nice_yaml }}"
-    dest: /etc/ansible/multi_ec2.yaml
+    dest: /etc/ansible/multi_inventory.yaml
     group: "{{ oo_inventory_group }}"
     group: "{{ oo_inventory_group }}"
     owner: "{{ oo_inventory_owner }}"
     owner: "{{ oo_inventory_owner }}"
     mode: "0640"
     mode: "0640"
@@ -20,17 +24,17 @@
 
 
 - file:
 - file:
     state: link
     state: link
-    src: /usr/share/ansible/inventory/multi_ec2.py
-    dest: /etc/ansible/inventory/multi_ec2.py
+    src: /usr/share/ansible/inventory/multi_inventory.py
+    dest: /etc/ansible/inventory/multi_inventory.py
     owner: root
     owner: root
     group: libra_ops
     group: libra_ops
 
 
 # This cron uses the above location to call its job
 # This cron uses the above location to call its job
 - name: Cron to keep cache fresh
 - name: Cron to keep cache fresh
   cron:
   cron:
-    name: 'multi_ec2_inventory'
+    name: 'multi_inventory'
     minute: '*/10'
     minute: '*/10'
-    job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+    job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
   when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
   when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
 
 
 - name: Set cache location
 - name: Set cache location

+ 11 - 3
roles/openshift_facts/library/openshift_facts.py

@@ -484,12 +484,16 @@ def set_aggregate_facts(facts):
             dict: the facts dict updated with aggregated facts
             dict: the facts dict updated with aggregated facts
     """
     """
     all_hostnames = set()
     all_hostnames = set()
+    internal_hostnames = set()
     if 'common' in facts:
     if 'common' in facts:
         all_hostnames.add(facts['common']['hostname'])
         all_hostnames.add(facts['common']['hostname'])
         all_hostnames.add(facts['common']['public_hostname'])
         all_hostnames.add(facts['common']['public_hostname'])
         all_hostnames.add(facts['common']['ip'])
         all_hostnames.add(facts['common']['ip'])
         all_hostnames.add(facts['common']['public_ip'])
         all_hostnames.add(facts['common']['public_ip'])
 
 
+        internal_hostnames.add(facts['common']['hostname'])
+        internal_hostnames.add(facts['common']['ip'])
+
         if 'master' in facts:
         if 'master' in facts:
             # FIXME: not sure why but facts['dns']['domain'] fails
             # FIXME: not sure why but facts['dns']['domain'] fails
             cluster_domain = 'cluster.local'
             cluster_domain = 'cluster.local'
@@ -497,11 +501,14 @@ def set_aggregate_facts(facts):
                 all_hostnames.add(facts['master']['cluster_hostname'])
                 all_hostnames.add(facts['master']['cluster_hostname'])
             if 'cluster_public_hostname' in facts['master']:
             if 'cluster_public_hostname' in facts['master']:
                 all_hostnames.add(facts['master']['cluster_public_hostname'])
                 all_hostnames.add(facts['master']['cluster_public_hostname'])
-            all_hostnames.update(['openshift', 'openshift.default', 'openshift.default.svc',
-                                  'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
-                                  'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain])
+            svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
+                         'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
+                         'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
+            all_hostnames.update(svc_names)
+            internal_hostnames.update(svc_names)
             first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
             first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
             all_hostnames.add(first_svc_ip)
             all_hostnames.add(first_svc_ip)
+            internal_hostnames.add(first_svc_ip)
 
 
             if facts['master']['embedded_etcd']:
             if facts['master']['embedded_etcd']:
                 facts['master']['etcd_data_dir'] = os.path.join(
                 facts['master']['etcd_data_dir'] = os.path.join(
@@ -510,6 +517,7 @@ def set_aggregate_facts(facts):
                 facts['master']['etcd_data_dir'] = '/var/lib/etcd'
                 facts['master']['etcd_data_dir'] = '/var/lib/etcd'
 
 
         facts['common']['all_hostnames'] = list(all_hostnames)
         facts['common']['all_hostnames'] = list(all_hostnames)
+        facts['common']['internal_hostnames'] = list(all_hostnames)
 
 
     return facts
     return facts
 
 

+ 15 - 1
roles/openshift_master/templates/master.yaml.v1.j2

@@ -16,12 +16,15 @@ assetConfig:
     maxRequestsInFlight: 0
     maxRequestsInFlight: 0
     requestTimeoutSeconds: 0
     requestTimeoutSeconds: 0
 corsAllowedOrigins:
 corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] | unique %}
   - {{ origin }}
   - {{ origin }}
 {% endfor %}
 {% endfor %}
 {% for custom_origin in openshift.master.custom_cors_origins | default("") %}
 {% for custom_origin in openshift.master.custom_cors_origins | default("") %}
   - {{ custom_origin }}
   - {{ custom_origin }}
 {% endfor %}
 {% endfor %}
+{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
+  - {{ name }}
+{% endfor %}
 {% if 'disabled_features' in openshift.master %}
 {% if 'disabled_features' in openshift.master %}
 disabledFeatures: {{ openshift.master.disabled_features | to_json }}
 disabledFeatures: {{ openshift.master.disabled_features | to_json }}
 {% endif %}
 {% endif %}
@@ -133,3 +136,14 @@ servingInfo:
   keyFile: master.server.key
   keyFile: master.server.key
   maxRequestsInFlight: 500
   maxRequestsInFlight: 500
   requestTimeoutSeconds: 3600
   requestTimeoutSeconds: 3600
+{% if named_certificates %}
+  namedCertificates:
+{% for named_certificate in named_certificates %}
+  - certFile: {{ named_certificate['certfile'] }}
+    keyFile: {{ named_certificate['keyfile'] }}
+    names:
+{% for name in named_certificate['names'] %}
+    - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}

+ 0 - 3
roles/openshift_master_certificates/tasks/main.yml

@@ -34,9 +34,6 @@
     - serviceaccounts.private.key
     - serviceaccounts.private.key
     - serviceaccounts.public.key
     - serviceaccounts.public.key
 
 
-- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}"
-  with_items: masters_needing_certs
-
 - name: Create the master certificates if they do not already exist
 - name: Create the master certificates if they do not already exist
   command: >
   command: >
     {{ openshift.common.admin_binary }} create-master-certs
     {{ openshift.common.admin_binary }} create-master-certs

+ 21 - 0
roles/os_zabbix/vars/template_os_linux.yml

@@ -194,6 +194,11 @@ g_template_os_linux:
     lifetime: 1
     lifetime: 1
     description: "Dynamically register the filesystems"
     description: "Dynamically register the filesystems"
 
 
+  - name: disc.disk
+    key: disc.disk
+    lifetime: 1
+    description: "Dynamically register disks on a node"
+
   zitemprototypes:
   zitemprototypes:
   - discoveryrule_key: disc.filesys
   - discoveryrule_key: disc.filesys
     name: "disc.filesys.full.{#OSO_FILESYS}"
     name: "disc.filesys.full.{#OSO_FILESYS}"
@@ -211,6 +216,22 @@ g_template_os_linux:
     applications:
     applications:
     - Disk
     - Disk
 
 
+  - discoveryrule_key: disc.disk
+    name: "TPS (IOPS) for disk {#OSO_DISK}"
+    key: "disc.disk.tps[{#OSO_DISK}]"
+    value_type: int
+    description: "PCP disk.dev.totals metric measured over a period of time.  This shows how many disk transactions per second the disk is using"
+    applications:
+    - Disk
+
+  - discoveryrule_key: disc.disk
+    name: "Percent Utilized for disk {#OSO_DISK}"
+    key: "disc.disk.putil[{#OSO_DISK}]"
+    value_type: float
+    description: "PCP disk.dev.avactive metric measured over a period of time.  This is the '%util' in the iostat command"
+    applications:
+    - Disk
+
   ztriggerprototypes:
   ztriggerprototypes:
   - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
   - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
     expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
     expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'

+ 1 - 1
test/units/README.md

@@ -4,4 +4,4 @@ These should be run by sourcing the env-setup:
 $ source test/env-setup
 $ source test/env-setup
 
 
 Then navigate to the test/units/ directory.
 Then navigate to the test/units/ directory.
-$ python -m unittest multi_ec2_test
+$ python -m unittest multi_inventory_test

+ 114 - 0
test/units/multi_inventory_test.py

@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for MultiInventory
+'''
+
+import unittest
+import multi_inventory
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name
+class MultiInventoryTest(unittest.TestCase):
+    '''
+     Test class for multiInventory
+    '''
+
+#    def setUp(self):
+#        '''setup method'''
+#        pass
+
+    def test_merge_simple_1(self):
+        '''Testing a simple merge of 2 dictionaries'''
+        a = {"key1" : 1}
+        b = {"key1" : 2}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [1, 2]})
+
+    def test_merge_b_empty(self):
+        '''Testing a merge of an emtpy dictionary'''
+        a = {"key1" : 1}
+        b = {}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": 1})
+
+    def test_merge_a_empty(self):
+        '''Testing a merge of an emtpy dictionary'''
+        b = {"key1" : 1}
+        a = {}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": 1})
+
+    def test_merge_hash_array(self):
+        '''Testing a merge of a dictionary and a dictionary with an array'''
+        a = {"key1" : {"hasha": 1}}
+        b = {"key1" : [1, 2]}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
+
+    def test_merge_array_hash(self):
+        '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
+        a = {"key1" : [1, 2]}
+        b = {"key1" : {"hasha": 1}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
+
+    def test_merge_keys_1(self):
+        '''Testing a merge on a dictionary for keys'''
+        a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
+        b = {"key2" : {"hashb": 1}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
+
+    def test_merge_recursive_1(self):
+        '''Testing a recursive merge'''
+        a = {"a" : {"b": {"c": 1}}}
+        b = {"a" : {"b": {"c": 2}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+    def test_merge_recursive_array_item(self):
+        '''Testing a recursive merge for an array'''
+        a = {"a" : {"b": {"c": [1]}}}
+        b = {"a" : {"b": {"c": 2}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+    def test_merge_recursive_hash_item(self):
+        '''Testing a recursive merge for a hash'''
+        a = {"a" : {"b": {"c": {"d": 1}}}}
+        b = {"a" : {"b": {"c": 2}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
+
+    def test_merge_recursive_array_hash(self):
+        '''Testing a recursive merge for an array and a hash'''
+        a = {"a" : [{"b": {"c":  1}}]}
+        b = {"a" : {"b": {"c": 1}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+    def test_merge_recursive_hash_array(self):
+        '''Testing a recursive merge for an array and a hash'''
+        a = {"a" : {"b": {"c": 1}}}
+        b = {"a" : [{"b": {"c":  1}}]}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+#    def tearDown(self):
+#        '''TearDown method'''
+#        pass
+
+if __name__ == "__main__":
+    unittest.main()

+ 0 - 95
test/units/mutli_ec2_test.py

@@ -1,95 +0,0 @@
-#!/usr/bin/env python2
-
-import unittest
-import sys
-import os
-import sys
-import multi_ec2
-
-class MultiEc2Test(unittest.TestCase):
-
-    def setUp(self):
-        pass
-
-    def test_merge_simple_1(self):
-        a = {"key1" : 1}
-        b = {"key1" : 2}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [1,2]})
-
-    def test_merge_b_empty(self):
-        a = {"key1" : 1}
-        b = {}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": 1})
-
-    def test_merge_a_empty(self):
-        b = {"key1" : 1}
-        a = {}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": 1})
-
-    def test_merge_hash_array(self):
-        a = {"key1" : {"hasha": 1}}
-        b = {"key1" : [1,2]}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]})
-
-    def test_merge_array_hash(self):
-        a = {"key1" : [1,2]}
-        b = {"key1" : {"hasha": 1}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]})
-
-    def test_merge_keys_1(self):
-        a = {"key1" : [1,2], "key2" : {"hasha": 2}}
-        b = {"key2" : {"hashb": 1}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}})
-
-    def test_merge_recursive_1(self):
-        a = {"a" : {"b": {"c": 1}}}
-        b = {"a" : {"b": {"c": 2}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
-    def test_merge_recursive_array_item(self):
-        a = {"a" : {"b": {"c": [1]}}}
-        b = {"a" : {"b": {"c": 2}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
-    def test_merge_recursive_hash_item(self):
-        a = {"a" : {"b": {"c": {"d": 1}}}}
-        b = {"a" : {"b": {"c": 2}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
-    def test_merge_recursive_array_hash(self):
-        a = {"a" : [{"b": {"c":  1}}]}
-        b = {"a" : {"b": {"c": 1}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
-    def test_merge_recursive_hash_array(self):
-        a = {"a" : {"b": {"c": 1}}}
-        b = {"a" : [{"b": {"c":  1}}]}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
-    def tearDown(self):
-        pass
-
-if __name__ == "__main__":
-  unittest.main()