Sfoglia il codice sorgente

Merge pull request #811 from openshift/master

master to prod
Kenny Woodson 9 anni fa
parent
commit
7869fb8c26
63 ha cambiato i file con 1028 aggiunte e 455 eliminazioni
  1. 1 1
      .tito/packages/openshift-ansible
  2. 0 3
      README_OSE.md
  3. 1 1
      bin/README_SHELL_COMPLETION
  4. 1 1
      bin/openshift_ansible.conf.example
  5. 7 4
      bin/openshift_ansible/awsutil.py
  6. 10 10
      bin/ossh_bash_completion
  7. 5 5
      bin/ossh_zsh_completion
  8. 2 2
      bin/zsh_functions/_ossh
  9. 66 1
      filter_plugins/oo_filters.py
  10. 6 1
      inventory/byo/hosts.example
  11. 21 11
      inventory/gce/hosts/gce.py
  12. 0 32
      inventory/multi_ec2.yaml.example
  13. 92 52
      inventory/multi_ec2.py
  14. 51 0
      inventory/multi_inventory.yaml.example
  15. 47 6
      openshift-ansible.spec
  16. 4 4
      playbooks/aws/openshift-cluster/launch.yml
  17. 1 64
      playbooks/common/openshift-cluster/config.yml
  18. 64 0
      playbooks/common/openshift-cluster/evaluate_groups.yml
  19. 7 0
      playbooks/common/openshift-cluster/scaleup.yml
  20. 0 0
      playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml
  21. 0 0
      playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml
  22. 0 0
      playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml
  23. 9 0
      playbooks/common/openshift-master/config.yml
  24. 3 3
      playbooks/gce/openshift-cluster/launch.yml
  25. 4 4
      playbooks/libvirt/openshift-cluster/launch.yml
  26. 6 0
      playbooks/libvirt/openshift-cluster/templates/user-data
  27. 1 1
      roles/etcd/README.md
  28. 0 8
      roles/etcd/defaults/main.yaml
  29. 1 0
      roles/etcd/handlers/main.yml
  30. 1 1
      roles/etcd/meta/main.yml
  31. 10 2
      roles/etcd/tasks/main.yml
  32. 2 2
      roles/etcd/templates/etcd.conf.j2
  33. 1 1
      roles/etcd_ca/meta/main.yml
  34. 15 15
      roles/etcd_ca/tasks/main.yml
  35. 15 15
      roles/etcd_ca/templates/openssl_append.j2
  36. 0 3
      roles/etcd_ca/vars/main.yml
  37. 1 1
      roles/etcd_certificates/tasks/client.yml
  38. 0 3
      roles/etcd_certificates/tasks/main.yml
  39. 4 6
      roles/etcd_certificates/tasks/server.yml
  40. 0 11
      roles/etcd_certificates/vars/main.yml
  41. 34 0
      roles/etcd_common/README.md
  42. 30 0
      roles/etcd_common/defaults/main.yml
  43. 16 0
      roles/etcd_common/meta/main.yml
  44. 13 0
      roles/etcd_common/tasks/main.yml
  45. 13 0
      roles/etcd_common/templates/host_int_map.j2
  46. 10 6
      roles/openshift_ansible_inventory/tasks/main.yml
  47. 32 2
      roles/openshift_facts/library/openshift_facts.py
  48. 3 0
      roles/openshift_facts/tasks/main.yml
  49. 1 0
      roles/openshift_master/tasks/main.yml
  50. 18 1
      roles/openshift_master/templates/master.yaml.v1.j2
  51. 2 0
      roles/openshift_master_certificates/tasks/main.yml
  52. 9 0
      roles/os_zabbix/tasks/main.yml
  53. 82 0
      roles/os_zabbix/vars/template_openshift_master.yml
  54. 21 0
      roles/os_zabbix/vars/template_os_linux.yml
  55. 14 0
      roles/os_zabbix/vars/template_performance_copilot.yml
  56. 1 1
      test/units/README.md
  57. 114 0
      test/units/multi_inventory_test.py
  58. 0 95
      test/units/mutli_ec2_test.py
  59. 1 1
      utils/setup.py
  60. 92 40
      utils/src/ooinstall/cli_installer.py
  61. 25 1
      utils/src/ooinstall/install_transactions.py
  62. 1 1
      utils/src/ooinstall/variants.py
  63. 37 33
      utils/test/cli_installer_tests.py

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.0.6-1 ./
+3.0.7-1 ./

+ 0 - 3
README_OSE.md

@@ -79,9 +79,6 @@ ansible_ssh_user=root
 # To deploy origin, change deployment_type to origin
 deployment_type=enterprise
 
-# Pre-release registry URL
-oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
-
 # Pre-release additional repo
 openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
 'baseurl':

+ 1 - 1
bin/README_SHELL_COMPLETION

@@ -14,7 +14,7 @@ will populate the cache file and the completions should
 become available.
 
 This script will look at the cached version of your
-multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache.
+multi_inventory results in ~/.ansible/tmp/multi_inventory.cache.
 It will then parse a few {host}.{env} out of the json
 and return them to be completable.
 

+ 1 - 1
bin/openshift_ansible.conf.example

@@ -1,5 +1,5 @@
 #[main]
-#inventory = /usr/share/ansible/inventory/multi_ec2.py
+#inventory = /usr/share/ansible/inventory/multi_inventory.py
 
 #[host_type_aliases]
 #host-type-one = aliasa,aliasb

+ 7 - 4
bin/openshift_ansible/awsutil.py

@@ -4,7 +4,10 @@
 
 import os
 import re
-from openshift_ansible import multi_ec2
+
+# Buildbot does not have multi_inventory installed
+#pylint: disable=no-name-in-module
+from openshift_ansible import multi_inventory
 
 class ArgumentError(Exception):
     """This class is raised when improper arguments are passed."""
@@ -49,9 +52,9 @@ class AwsUtil(object):
         Keyword arguments:
         args -- optional arguments to pass to the inventory script
         """
-        mec2 = multi_ec2.MultiEc2(args)
-        mec2.run()
-        return mec2.result
+        minv = multi_inventory.MultiInventory(args)
+        minv.run()
+        return minv.result
 
     def get_environments(self):
         """Searches for env tags in the inventory and returns all of the envs found."""

+ 10 - 10
bin/ossh_bash_completion

@@ -1,12 +1,12 @@
 __ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
-      /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+      /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
 
-    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+    elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
 
-    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+    elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
 
     fi
 }
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
 
 __opssh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
-      /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+      /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
 
-    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+    elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
 
-    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+    elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
 
     fi
 }

+ 5 - 5
bin/ossh_zsh_completion

@@ -2,13 +2,13 @@
 
 _ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
-      print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+      print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
 
-    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+    elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
+      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
 
-    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+    elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
 
     fi
 

+ 2 - 2
bin/zsh_functions/_ossh

@@ -1,8 +1,8 @@
 #compdef ossh oscp
 
 _ossh_known_hosts(){
-  if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+  if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
+    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
   fi
 }
 

+ 66 - 1
filter_plugins/oo_filters.py

@@ -7,6 +7,8 @@ Custom filters for use in openshift-ansible
 
 from ansible import errors
 from operator import itemgetter
+import OpenSSL.crypto
+import os.path
 import pdb
 import re
 import json
@@ -327,6 +329,68 @@ class FilterModule(object):
 
         return revamped_outputs
 
+    @staticmethod
+    # pylint: disable=too-many-branches
+    def oo_parse_certificate_names(certificates, data_dir, internal_hostnames):
+        ''' Parses names from list of certificate hashes.
+
+            Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt",
+                                  "keyfile": "/etc/origin/master/custom1.key" },
+                                { "certfile": "custom2.crt",
+                                  "keyfile": "custom2.key" }]
+
+                returns [{ "certfile": "/etc/origin/master/custom1.crt",
+                           "keyfile": "/etc/origin/master/custom1.key",
+                           "names": [ "public-master-host.com",
+                                      "other-master-host.com" ] },
+                         { "certfile": "/etc/origin/master/custom2.crt",
+                           "keyfile": "/etc/origin/master/custom2.key",
+                           "names": [ "some-hostname.com" ] }]
+        '''
+        if not issubclass(type(certificates), list):
+            raise errors.AnsibleFilterError("|failed expects certificates is a list")
+
+        if not issubclass(type(data_dir), unicode):
+            raise errors.AnsibleFilterError("|failed expects data_dir is unicode")
+
+        if not issubclass(type(internal_hostnames), list):
+            raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+        for certificate in certificates:
+            if 'names' in certificate.keys():
+                continue
+            else:
+                certificate['names'] = []
+
+            if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+                # Unable to find cert/key, try to prepend data_dir to paths
+                certificate['certfile'] = os.path.join(data_dir, certificate['certfile'])
+                certificate['keyfile'] = os.path.join(data_dir, certificate['keyfile'])
+                if not os.path.isfile(certificate['certfile']) and not os.path.isfile(certificate['keyfile']):
+                    # Unable to find cert/key in data_dir
+                    raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+                                                    (certificate['certfile'], certificate['keyfile']))
+
+            try:
+                st_cert = open(certificate['certfile'], 'rt').read()
+                cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+                certificate['names'].append(str(cert.get_subject().commonName.decode()))
+                for i in range(cert.get_extension_count()):
+                    if cert.get_extension(i).get_short_name() == 'subjectAltName':
+                        for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+                            certificate['names'].append(name)
+            except:
+                raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+                                                 "please specify certificate names in host inventory"))
+
+            certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+            certificate['names'] = list(set(certificate['names']))
+            if not certificate['names']:
+                raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+                                                 "detected a collision with internal hostname, please specify " +
+                                                 "certificate names in host inventory"))
+        return certificates
+
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {
@@ -342,5 +406,6 @@ class FilterModule(object):
             "oo_combine_dict": self.oo_combine_dict,
             "oo_split": self.oo_split,
             "oo_filter_list": self.oo_filter_list,
-            "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
+            "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
+            "oo_parse_certificate_names": self.oo_parse_certificate_names
         }

+ 6 - 1
inventory/byo/hosts.example

@@ -24,7 +24,7 @@ deployment_type=atomic-enterprise
 #use_cluster_metrics=true
 
 # Pre-release registry URL
-#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
+#oreg_url=example.com/openshift3/ose-${component}:${version}
 
 # Pre-release Dev puddle repo
 #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
@@ -99,6 +99,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # set RPM version for debugging purposes
 #openshift_pkg_version=-3.0.0.0
 
+# Configure custom master certificates
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+# Detected names may be overridden by specifying the "names" key
+#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
+
 # host group for masters
 [masters]
 ose3-master[1:3]-ansible.test.example.com

+ 21 - 11
inventory/gce/hosts/gce.py

@@ -66,12 +66,22 @@ Examples:
   $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a"
 
   Use the GCE inventory script to print out instance specific information
-  $ plugins/inventory/gce.py --host my_instance
+  $ contrib/inventory/gce.py --host my_instance
 
 Author: Eric Johnson <erjohnso@google.com>
 Version: 0.0.1
 '''
 
+__requires__ = ['pycrypto>=2.6']
+try:
+    import pkg_resources
+except ImportError:
+    # Use pkg_resources to find the correct versions of libraries and set
+    # sys.path appropriately when there are multiversion installs.  We don't
+    # fail here as there is code that better expresses the errors where the
+    # library is used.
+    pass
+
 USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
 USER_AGENT_VERSION="v1"
 
@@ -102,9 +112,9 @@ class GceInventory(object):
 
         # Just display data for specific host
         if self.args.host:
-            print self.json_format_dict(self.node_to_dict(
+            print(self.json_format_dict(self.node_to_dict(
                     self.get_instance(self.args.host)),
-                    pretty=self.args.pretty)
+                    pretty=self.args.pretty))
             sys.exit(0)
 
         # Otherwise, assume user wants all instances grouped
@@ -120,7 +130,6 @@ class GceInventory(object):
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
 
-
         # Create a ConfigParser.
         # This provides empty defaults to each key, so that environment
         # variable configuration (as opposed to INI configuration) is able
@@ -174,7 +183,6 @@ class GceInventory(object):
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
 
-        
         # Retrieve and return the GCE driver.
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce.connection.user_agent_append(
@@ -213,8 +221,7 @@ class GceInventory(object):
             'gce_image': inst.image,
             'gce_machine_type': inst.size,
             'gce_private_ip': inst.private_ips[0],
-            # Hosts don't always have a public IP name
-            #'gce_public_ip': inst.public_ips[0],
+            'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None,
             'gce_name': inst.name,
             'gce_description': inst.extra['description'],
             'gce_status': inst.extra['status'],
@@ -222,15 +229,15 @@ class GceInventory(object):
             'gce_tags': inst.extra['tags'],
             'gce_metadata': md,
             'gce_network': net,
-            # Hosts don't always have a public IP name
-            #'ansible_ssh_host': inst.public_ips[0]
+            # Hosts don't have a public name, so we add an IP
+            'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
         }
 
     def get_instance(self, instance_name):
         '''Gets details about a specific instance '''
         try:
             return self.driver.ex_get_node(instance_name)
-        except Exception, e:
+        except Exception as e:
             return None
 
     def group_instances(self):
@@ -250,7 +257,10 @@ class GceInventory(object):
 
             tags = node.extra['tags']
             for t in tags:
-                tag = 'tag_%s' % t
+                if t.startswith('group-'):
+                    tag = t[6:]
+                else:
+                    tag = 'tag_%s' % t
                 if groups.has_key(tag): groups[tag].append(name)
                 else: groups[tag] = [name]
 

+ 0 - 32
inventory/multi_ec2.yaml.example

@@ -1,32 +0,0 @@
-# multi ec2 inventory configs
-#
-cache_location: ~/.ansible/tmp/multi_ec2_inventory.cache
-
-accounts:
-  - name: aws1
-    provider: aws/hosts/ec2.py
-    provider_config:
-      ec2:
-        regions: all
-        regions_exclude:  us-gov-west-1,cn-north-1
-        destination_variable: public_dns_name
-        route53: False
-        cache_path: ~/.ansible/tmp
-        cache_max_age: 300
-        vpc_destination_variable: ip_address
-    env_vars:
-      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
-      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-    all_group: ec2
-    extra_vars:
-      cloud: aws
-      account: aws1
-
-- name: aws2
-    provider: aws/hosts/ec2.py
-    env_vars:
-      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
-      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
-      EC2_INI_PATH: /etc/ansible/ec2.ini
-
-cache_max_age: 60

+ 92 - 52
inventory/multi_ec2.py

@@ -1,6 +1,6 @@
 #!/usr/bin/env python2
 '''
-    Fetch and combine multiple ec2 account settings into a single
+    Fetch and combine multiple inventory account settings into a single
     json hash.
 '''
 # vim: expandtab:tabstop=4:shiftwidth=4
@@ -15,13 +15,19 @@ import errno
 import fcntl
 import tempfile
 import copy
+from string import Template
+import shutil
 
-CONFIG_FILE_NAME = 'multi_ec2.yaml'
-DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
+CONFIG_FILE_NAME = 'multi_inventory.yaml'
+DEFAULT_CACHE_PATH = os.path.expanduser('~/.ansible/tmp/multi_inventory.cache')
 
-class MultiEc2(object):
+class MultiInventoryException(Exception):
+    '''Exceptions for MultiInventory class'''
+    pass
+
+class MultiInventory(object):
     '''
-       MultiEc2 class:
+       MultiInventory class:
             Opens a yaml config file and reads aws credentials.
             Stores a json hash of resources in result.
     '''
@@ -35,7 +41,7 @@ class MultiEc2(object):
 
         self.cache_path = DEFAULT_CACHE_PATH
         self.config = None
-        self.all_ec2_results = {}
+        self.all_inventory_results = {}
         self.result = {}
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
@@ -56,7 +62,7 @@ class MultiEc2(object):
            cache is valid for the inventory.
 
            if the cache is valid; return cache
-           else the credentials are loaded from multi_ec2.yaml or from the env
+           else the credentials are loaded from multi_inventory.yaml or from the env
            and we attempt to get the inventory from the provider specified.
         '''
         # load yaml
@@ -111,6 +117,10 @@ class MultiEc2(object):
         with open(conf_file) as conf:
             config = yaml.safe_load(conf)
 
+        # Provide a check for unique account names
+        if len(set([acc['name'] for acc in config['accounts']])) != len(config['accounts']):
+            raise MultiInventoryException('Duplicate account names in config file')
+
         return config
 
     def get_provider_tags(self, provider, env=None):
@@ -136,23 +146,25 @@ class MultiEc2(object):
         else:
             cmds.append('--list')
 
-        cmds.append('--refresh-cache')
+        if 'aws' in provider.lower():
+            cmds.append('--refresh-cache')
 
         return subprocess.Popen(cmds, stderr=subprocess.PIPE, \
                                 stdout=subprocess.PIPE, env=env)
 
     @staticmethod
-    def generate_config(config_data):
-        """Generate the ec2.ini file in as a secure temp file.
-           Once generated, pass it to the ec2.py as an environment variable.
+    def generate_config(provider_files):
+        """Generate the provider_files in a temporary directory.
         """
-        fildes, tmp_file_path = tempfile.mkstemp(prefix='multi_ec2.ini.')
-        for section, values in config_data.items():
-            os.write(fildes, "[%s]\n" % section)
-            for option, value  in values.items():
-                os.write(fildes, "%s = %s\n" % (option, value))
-        os.close(fildes)
-        return tmp_file_path
+        prefix = 'multi_inventory.'
+        tmp_dir_path = tempfile.mkdtemp(prefix=prefix)
+        for provider_file in provider_files:
+            filedes = open(os.path.join(tmp_dir_path, provider_file['name']), 'w+')
+            content = Template(provider_file['contents']).substitute(tmpdir=tmp_dir_path)
+            filedes.write(content)
+            filedes.close()
+
+        return tmp_dir_path
 
     def run_provider(self):
         '''Setup the provider call with proper variables
@@ -160,13 +172,21 @@ class MultiEc2(object):
         '''
         try:
             all_results = []
-            tmp_file_paths = []
+            tmp_dir_paths = []
             processes = {}
             for account in self.config['accounts']:
-                env = account['env_vars']
-                if account.has_key('provider_config'):
-                    tmp_file_paths.append(MultiEc2.generate_config(account['provider_config']))
-                    env['EC2_INI_PATH'] = tmp_file_paths[-1]
+                tmp_dir = None
+                if account.has_key('provider_files'):
+                    tmp_dir = MultiInventory.generate_config(account['provider_files'])
+                    tmp_dir_paths.append(tmp_dir)
+
+                # Update env vars after creating provider_config_files
+                # so that we can grab the tmp_dir if it exists
+                env = account.get('env_vars', {})
+                if env and tmp_dir:
+                    for key, value in env.items():
+                        env[key] = Template(value).substitute(tmpdir=tmp_dir)
+
                 name = account['name']
                 provider = account['provider']
                 processes[name] = self.get_provider_tags(provider, env)
@@ -182,9 +202,9 @@ class MultiEc2(object):
                 })
 
         finally:
-            # Clean up the mkstemp file
-            for tmp_file in tmp_file_paths:
-                os.unlink(tmp_file)
+            # Clean up the mkdtemp dirs
+            for tmp_dir in tmp_dir_paths:
+                shutil.rmtree(tmp_dir)
 
         return all_results
 
@@ -223,7 +243,7 @@ class MultiEc2(object):
                               ]
                     raise RuntimeError('\n'.join(err_msg).format(**result))
                 else:
-                    self.all_ec2_results[result['name']] = json.loads(result['out'])
+                    self.all_inventory_results[result['name']] = json.loads(result['out'])
 
             # Check if user wants extra vars in yaml by
             # having hostvars and all_group defined
@@ -231,29 +251,52 @@ class MultiEc2(object):
                 self.apply_account_config(acc_config)
 
             # Build results by merging all dictionaries
-            values = self.all_ec2_results.values()
+            values = self.all_inventory_results.values()
             values.insert(0, self.result)
             for result in  values:
-                MultiEc2.merge_destructively(self.result, result)
+                MultiInventory.merge_destructively(self.result, result)
+
+    def add_entry(self, data, keys, item):
+        ''' Add an item to a dictionary with key notation a.b.c
+            d = {'a': {'b': 'c'}}}
+            keys = a.b
+            item = c
+        '''
+        if "." in keys:
+            key, rest = keys.split(".", 1)
+            if key not in data:
+                data[key] = {}
+            self.add_entry(data[key], rest, item)
+        else:
+            data[keys] = item
+
+    def get_entry(self, data, keys):
+        ''' Get an item from a dictionary with key notation a.b.c
+            d = {'a': {'b': 'c'}}}
+            keys = a.b
+            return c
+        '''
+        if keys and "." in keys:
+            key, rest = keys.split(".", 1)
+            return self.get_entry(data[key], rest)
+        else:
+            return data.get(keys, None)
 
     def apply_account_config(self, acc_config):
         ''' Apply account config settings
         '''
-        results = self.all_ec2_results[acc_config['name']]
+        results = self.all_inventory_results[acc_config['name']]
+        results['all_hosts'] = results['_meta']['hostvars'].keys()
 
         # Update each hostvar with the newly desired key: value from extra_*
-        for _extra in ['extra_groups', 'extra_vars']:
+        for _extra in ['extra_vars', 'extra_groups']:
             for new_var, value in acc_config.get(_extra, {}).items():
-                # Verify the account results look sane
-                # by checking for these keys ('_meta' and 'hostvars' exist)
-                if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
-                    for data in results['_meta']['hostvars'].values():
-                        data[str(new_var)] = str(value)
+                for data in results['_meta']['hostvars'].values():
+                    self.add_entry(data, new_var, value)
 
                 # Add this group
-                if _extra == 'extra_groups' and results.has_key(acc_config['all_group']):
-                    results["%s_%s" % (new_var, value)] = \
-                     copy.copy(results[acc_config['all_group']])
+                if _extra == 'extra_groups':
+                    results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
 
         # Clone groups goes here
         for to_name, from_name in acc_config.get('clone_groups', {}).items():
@@ -262,14 +305,11 @@ class MultiEc2(object):
 
         # Clone vars goes here
         for to_name, from_name in acc_config.get('clone_vars', {}).items():
-            # Verify the account results look sane
-            # by checking for these keys ('_meta' and 'hostvars' exist)
-            if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
-                for data in results['_meta']['hostvars'].values():
-                    data[str(to_name)] = data.get(str(from_name), 'nil')
+            for data in results['_meta']['hostvars'].values():
+                self.add_entry(data, to_name, self.get_entry(data, from_name))
 
-        # store the results back into all_ec2_results
-        self.all_ec2_results[acc_config['name']] = results
+        # store the results back into all_inventory_results
+        self.all_inventory_results[acc_config['name']] = results
 
     @staticmethod
     def merge_destructively(input_a, input_b):
@@ -277,7 +317,7 @@ class MultiEc2(object):
         for key in input_b:
             if key in input_a:
                 if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
-                    MultiEc2.merge_destructively(input_a[key], input_b[key])
+                    MultiInventory.merge_destructively(input_a[key], input_b[key])
                 elif input_a[key] == input_b[key]:
                     pass # same leaf value
                 # both lists so add each element in b to a if it does ! exist
@@ -333,7 +373,7 @@ class MultiEc2(object):
                 if exc.errno != errno.EEXIST or not os.path.isdir(path):
                     raise
 
-        json_data = MultiEc2.json_format_dict(self.result, True)
+        json_data = MultiInventory.json_format_dict(self.result, True)
         with open(self.cache_path, 'w') as cache:
             try:
                 fcntl.flock(cache, fcntl.LOCK_EX)
@@ -369,7 +409,7 @@ class MultiEc2(object):
 
 
 if __name__ == "__main__":
-    MEC2 = MultiEc2()
-    MEC2.parse_cli_args()
-    MEC2.run()
-    print MEC2.result_str()
+    MI2 = MultiInventory()
+    MI2.parse_cli_args()
+    MI2.run()
+    print MI2.result_str()

+ 51 - 0
inventory/multi_inventory.yaml.example

@@ -0,0 +1,51 @@
+# multi ec2 inventory configs
+#
+cache_location: ~/.ansible/tmp/multi_inventory.cache
+
+accounts:
+  - name: aws1
+    provider: aws/ec2.py
+    provider_files:
+    - name: ec2.ini
+      content: |-
+        [ec2]
+        regions = all
+        regions_exclude =  us-gov-west-1,cn-north-1
+        destination_variable = public_dns_name
+        route53 = False
+        cache_path = ~/.ansible/tmp
+        cache_max_age = 300
+        vpc_destination_variable = ip_address
+    env_vars:
+      AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
+      AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+      EC2_INI_PATH: ${tmpdir}/ec2.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+    extra_vars:
+      cloud: aws
+      account: aws1
+
+-   name: mygce
+    extra_vars:
+      cloud: gce
+      account: gce1
+    env_vars:
+      GCE_INI_PATH: ${tmpdir}/gce.ini # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+    provider: gce/gce.py
+    provider_files:
+    - name: priv_key.pem
+      contents: |-
+        -----BEGIN PRIVATE KEY-----
+        yourprivatekeydatahere
+        -----END PRIVATE KEY-----
+    - name: gce.ini
+      contents: |-
+        [gce]
+        gce_service_account_email_address = <uuid>@developer.gserviceaccount.com
+        gce_service_account_pem_file_path = ${tmpdir}/priv_key.pem # we replace ${tmpdir} with the temporary directory that we've created for the provider.
+        gce_project_id = gce-project
+        zone = us-central1-a
+        network = default
+        gce_machine_type = n1-standard-2
+        gce_machine_image = rhel7
+
+cache_max_age: 600

+ 47 - 6
openshift-ansible.spec

@@ -5,7 +5,7 @@
 }
 
 Name:           openshift-ansible
-Version:        3.0.6
+Version:        3.0.7
 Release:        1%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
@@ -47,9 +47,9 @@ cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
 cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
 cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
 # Fix links
-rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
 rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
-ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf %{_datadir}/ansible/inventory/multi_inventory.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_inventory.py
 ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
 
 # openshift-ansible-docs install
@@ -60,8 +60,8 @@ mkdir -p %{buildroot}/etc/ansible
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
 mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
-cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory
-cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p inventory/multi_inventory.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_inventory.yaml.example %{buildroot}/etc/ansible/multi_inventory.yaml
 cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
 cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
 
@@ -137,7 +137,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
 %files inventory
 %config(noreplace) /etc/ansible/*
 %dir %{_datadir}/ansible/inventory
-%{_datadir}/ansible/inventory/multi_ec2.py*
+%{_datadir}/ansible/inventory/multi_inventory.py*
 
 %package inventory-aws
 Summary:       Openshift and Atomic Enterprise Ansible Inventories for AWS
@@ -230,6 +230,8 @@ BuildArch:     noarch
 %package -n atomic-openshift-utils
 Summary:       Atomic OpenShift Utilities
 BuildRequires: python-setuptools
+Requires:      openshift-ansible-playbooks
+Requires:      openshift-ansible-roles
 Requires:      ansible
 Requires:      python-click
 Requires:      python-setuptools
@@ -247,6 +249,45 @@ Atomic OpenShift Utilities includes
 
 
 %changelog
+* Wed Nov 04 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.7-1
+- added the %%util in zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct default playbook directory
+  (smunilla@redhat.com)
+- Support for gce (kwoodson@redhat.com)
+- fixed a dumb naming mistake (mwoodson@redhat.com)
+- added disk tps checks to zabbix (mwoodson@redhat.com)
+- atomic-openshift-installer: Correct inaccurate prompt (smunilla@redhat.com)
+- atomic-openshift-installer: Add default openshift-ansible-playbook
+  (smunilla@redhat.com)
+- ooinstall: Add check for nopwd sudo (smunilla@redhat.com)
+- ooinstall: Update local install check (smunilla@redhat.com)
+- oo-install: Support running on the host to be deployed (smunilla@redhat.com)
+- Moving to Openshift Etcd application (mmahut@redhat.com)
+- Add all the possible servicenames to openshift_all_hostnames for masters
+  (sdodson@redhat.com)
+- Adding openshift.node.etcd items (mmahut@redhat.com)
+- Fix etcd cert generation when etcd_interface is defined (jdetiber@redhat.com)
+- get zabbix ready to start tracking status of pcp (jdiaz@redhat.com)
+- split inventory into subpackages (tdawson@redhat.com)
+- changed the cpu alert to only alert if cpu idle more than 5x. Change alert to
+  warning (mwoodson@redhat.com)
+- Rename install_transactions module to openshift_ansible.
+  (dgoodwin@redhat.com)
+- atomic-openshift-installer: Text improvements (smunilla@redhat.com)
+- Add utils subpackage missing dep on openshift-ansible-roles.
+  (dgoodwin@redhat.com)
+- Disable requiretty for only the openshift user (error@ioerror.us)
+- Don't require tty to run sudo (error@ioerror.us)
+- Attempt to remove the various interfaces left over from an install
+  (bleanhar@redhat.com)
+- Pulling latest gce.py module from ansible (kwoodson@redhat.com)
+- Disable OpenShift features if installing Atomic Enterprise
+  (jdetiber@redhat.com)
+- Use default playbooks if available. (dgoodwin@redhat.com)
+- Add uninstall subcommand. (dgoodwin@redhat.com)
+- Add subcommands to CLI. (dgoodwin@redhat.com)
+- Remove images options in oadm command (nakayamakenjiro@gmail.com)
+
 * Fri Oct 30 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.6-1
 - Adding python-boto and python-libcloud to openshift-ansible-inventory
   dependency (kwoodson@redhat.com)

+ 4 - 4
playbooks/aws/openshift-cluster/launch.yml

@@ -11,7 +11,7 @@
       msg: Deployment type not supported for aws provider yet
     when: deployment_type == 'enterprise'
 
-  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
   - include: tasks/launch_instances.yml
     vars:
       instances: "{{ etcd_names }}"
@@ -19,7 +19,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
 
-  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
   - include: tasks/launch_instances.yml
     vars:
       instances: "{{ master_names }}"
@@ -27,7 +27,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
       type: "compute"
       count: "{{ num_nodes }}"
@@ -38,7 +38,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
       type: "infra"
       count: "{{ num_infra }}"

+ 1 - 64
playbooks/common/openshift-cluster/config.yml

@@ -1,68 +1,5 @@
 ---
-- name: Populate config host groups
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - fail:
-      msg: This playbook rquires g_etcd_group to be set
-    when: g_etcd_group is not defined
-
-  - fail:
-      msg: This playbook rquires g_masters_group to be set
-    when: g_masters_group is not defined
-
-  - fail:
-      msg: This playbook rquires g_nodes_group to be set
-    when: g_nodes_group is not defined
-
-  - name: Evaluate oo_etcd_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_etcd_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_etcd_group] | default([])
-
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_masters_group] | default([])
-
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_nodes_group] | default([])
-
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_masters_group] | default([])
-    when: g_nodeonmaster is defined and g_nodeonmaster == true
-
-  - name: Evaluate oo_first_etcd
-    add_host:
-      name: "{{ groups[g_etcd_group][0] }}"
-      groups: oo_first_etcd
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
-
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups[g_masters_group][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-      ansible_sudo: "{{ g_sudo | default(omit) }}"
-    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+- include: evaluate_groups.yml
 
 - include: ../openshift-etcd/config.yml
 

+ 64 - 0
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -0,0 +1,64 @@
+---
+- name: Populate config host groups
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - fail:
+      msg: This playbook rquires g_etcd_group to be set
+    when: g_etcd_group is not defined
+
+  - fail:
+      msg: This playbook rquires g_masters_group to be set
+    when: g_masters_group is not defined
+
+  - fail:
+      msg: This playbook rquires g_nodes_group to be set
+    when: g_nodes_group is not defined
+
+  - name: Evaluate oo_etcd_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_etcd_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_etcd_group] | default([])
+
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_masters_group] | default([])
+
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_nodes_group] | default([])
+
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: groups[g_masters_group] | default([])
+    when: g_nodeonmaster is defined and g_nodeonmaster == true
+
+  - name: Evaluate oo_first_etcd
+    add_host:
+      name: "{{ groups[g_etcd_group][0] }}"
+      groups: oo_first_etcd
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups[g_masters_group][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0

+ 7 - 0
playbooks/common/openshift-cluster/scaleup.yml

@@ -0,0 +1,7 @@
+---
+- include: evaluate_groups.yml
+
+- include: ../openshift-node/config.yml
+  vars:
+    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"

playbooks/common/openshift-cluster/set_etcd_launch_facts_tasks.yml → playbooks/common/openshift-cluster/tasks/set_etcd_launch_facts.yml


playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml → playbooks/common/openshift-cluster/tasks/set_master_launch_facts.yml


playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml → playbooks/common/openshift-cluster/tasks/set_node_launch_facts.yml


+ 9 - 0
playbooks/common/openshift-master/config.yml

@@ -199,9 +199,18 @@
       validate_checksum: yes
     with_items: masters_needing_certs
 
+- name: Inspect named certificates
+  hosts: oo_first_master
+  tasks:
+  - name: Collect certificate names
+    set_fact:
+      parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
+    when: openshift_master_named_certificates is defined
+
 - name: Configure master instances
   hosts: oo_masters_to_config
   vars:
+    named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
     embedded_etcd: "{{ openshift.master.embedded_etcd }}"

+ 3 - 3
playbooks/gce/openshift-cluster/launch.yml

@@ -9,7 +9,7 @@
   - fail: msg="Deployment type not supported for gce provider yet"
     when: deployment_type == 'enterprise'
 
-  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
   - include: tasks/launch_instances.yml
     vars:
       instances: "{{ master_names }}"
@@ -17,7 +17,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
       type: "compute"
       count: "{{ num_nodes }}"
@@ -28,7 +28,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
       type: "infra"
       count: "{{ num_infra }}"

+ 4 - 4
playbooks/libvirt/openshift-cluster/launch.yml

@@ -17,7 +17,7 @@
 
   - include: tasks/configure_libvirt.yml
 
-  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml
   - include: tasks/launch_instances.yml
     vars:
       instances: "{{ etcd_names }}"
@@ -25,7 +25,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
 
-  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml
   - include: tasks/launch_instances.yml
     vars:
       instances: "{{ master_names }}"
@@ -33,7 +33,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "default"
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
       type: "compute"
       count: "{{ num_nodes }}"
@@ -44,7 +44,7 @@
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
-  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
     vars:
       type: "infra"
       count: "{{ num_infra }}"

+ 6 - 0
playbooks/libvirt/openshift-cluster/templates/user-data

@@ -19,5 +19,11 @@ system_info:
 ssh_authorized_keys:
   - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
 
+write_files:
+  - path: /etc/sudoers.d/00-openshift-no-requiretty
+    permissions: 440
+    content: |
+        Defaults:openshift !requiretty
+
 runcmd:
   - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart

+ 1 - 1
roles/etcd/README.md

@@ -17,7 +17,7 @@ TODO
 Dependencies
 ------------
 
-None
+etcd-common
 
 Example Playbook
 ----------------

+ 0 - 8
roles/etcd/defaults/main.yaml

@@ -2,16 +2,8 @@
 etcd_interface: "{{ ansible_default_ipv4.interface }}"
 etcd_client_port: 2379
 etcd_peer_port: 2380
-etcd_peers_group: etcd
 etcd_url_scheme: http
 etcd_peer_url_scheme: http
-etcd_conf_dir: /etc/etcd
-etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
-etcd_key_file: "{{ etcd_conf_dir }}/server.key"
-etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
-etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
-etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
 
 etcd_initial_cluster_state: new
 etcd_initial_cluster_token: etcd-cluster-1

+ 1 - 0
roles/etcd/handlers/main.yml

@@ -1,3 +1,4 @@
 ---
 - name: restart etcd
   service: name=etcd state=restarted
+  when: not etcd_service_status_changed | default(false)

+ 1 - 1
roles/etcd/meta/main.yml

@@ -17,4 +17,4 @@ galaxy_info:
   - system
 dependencies:
 - { role: os_firewall }
-- { role: openshift_repos }
+- { role: etcd_common }

+ 10 - 2
roles/etcd/tasks/main.yml

@@ -1,4 +1,12 @@
 ---
+- fail:
+    msg: Interface {{ etcd_interface }} not found
+  when: "'ansible_' ~ etcd_interface not in hostvars[inventory_hostname]"
+
+- fail:
+    msg: IPv4 address not found for {{ etcd_interface }}
+  when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
+
 - name: Install etcd
   yum: pkg=etcd-2.* state=present
 
@@ -49,5 +57,5 @@
     enabled: yes
   register: start_result
 
-- pause: seconds=30
-  when: start_result | changed
+- set_fact:
+    etcd_service_status_changed = start_result | changed

+ 2 - 2
roles/etcd/templates/etcd.conf.j2

@@ -1,9 +1,9 @@
 {% macro initial_cluster() -%}
 {% for host in groups[etcd_peers_group] -%}
 {% if loop.last -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }}
+{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }}
 {%- else -%}
-{{ host }}={{ etcd_peer_url_scheme }}://{{ hostvars[host]['ansible_' + etcd_interface]['ipv4']['address'] }}:{{ etcd_peer_port }},
+{{ host }}={{ etcd_peer_url_scheme }}://{{ etcd_host_int_map[host].interface.ipv4.address }}:{{ etcd_peer_port }},
 {%- endif -%}
 {% endfor -%}
 {% endmacro -%}

+ 1 - 1
roles/etcd_ca/meta/main.yml

@@ -13,4 +13,4 @@ galaxy_info:
   - cloud
   - system
 dependencies:
-- { role: openshift_repos }
+- { role: etcd_common }

+ 15 - 15
roles/etcd_ca/tasks/main.yml

@@ -1,14 +1,14 @@
 ---
 - file:
-    path: "{{ etcd_ca_dir }}/{{ item }}"
+    path: "{{ item }}"
     state: directory
     mode: 0700
     owner: root
     group: root
   with_items:
-  - certs
-  - crl
-  - fragments
+  - "{{ etcd_ca_new_certs_dir }}"
+  - "{{ etcd_ca_crl_dir }}"
+  - "{{ etcd_ca_dir }}/fragments"
 
 - command: cp /etc/pki/tls/openssl.cnf ./
   args:
@@ -22,25 +22,25 @@
 
 - assemble:
     src: "{{ etcd_ca_dir }}/fragments"
-    dest: "{{ etcd_ca_dir }}/openssl.cnf"
+    dest: "{{ etcd_openssl_conf }}"
 
-- command: touch index.txt
+- command: touch {{ etcd_ca_db }}
   args:
-    chdir: "{{ etcd_ca_dir }}"
-    creates: "{{ etcd_ca_dir }}/index.txt"
+    creates: "{{ etcd_ca_db }}"
 
 - copy:
-    dest: "{{ etcd_ca_dir }}/serial"
+    dest: "{{ etcd_ca_serial }}"
     content: "01"
     force: no
 
 - command: >
-    openssl req -config openssl.cnf -newkey rsa:4096
-    -keyout ca.key -new -out ca.crt -x509 -extensions etcd_v3_ca_self
-    -batch -nodes -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
-    -days 365
+    openssl req -config {{ etcd_openssl_conf }} -newkey rsa:4096
+    -keyout {{ etcd_ca_key }} -new -out {{ etcd_ca_cert }}
+    -x509 -extensions {{ etcd_ca_exts_self }} -batch -nodes
+    -days {{ etcd_ca_default_days }}
+    -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
   args:
     chdir: "{{ etcd_ca_dir }}"
-    creates: "{{ etcd_ca_dir }}/ca.crt"
+    creates: "{{ etcd_ca_cert }}"
   environment:
-    SAN: ''
+    SAN: 'etcd-signer'

+ 15 - 15
roles/etcd_ca/templates/openssl_append.j2

@@ -1,20 +1,20 @@
 
-[ etcd_v3_req ]
+[ {{ etcd_req_ext }} ]
 basicConstraints = critical,CA:FALSE
 keyUsage         = digitalSignature,keyEncipherment
 subjectAltName   = ${ENV::SAN}
 
-[ etcd_ca ]
+[ {{ etcd_ca_name }} ]
 dir             = {{ etcd_ca_dir }}
-crl_dir         = $dir/crl
-database        = $dir/index.txt
-new_certs_dir   = $dir/certs
-certificate     = $dir/ca.crt
-serial          = $dir/serial
-private_key     = $dir/ca.key
-crl_number      = $dir/crlnumber
-x509_extensions = etcd_v3_ca_client
-default_days    = 365
+crl_dir         = {{ etcd_ca_crl_dir }}
+database        = {{ etcd_ca_db }}
+new_certs_dir   = {{ etcd_ca_new_certs_dir }}
+certificate     = {{ etcd_ca_cert }}
+serial          = {{ etcd_ca_serial }}
+private_key     = {{ etcd_ca_key }}
+crl_number      = {{ etcd_ca_crl_number }}
+x509_extensions = {{ etcd_ca_exts_client }}
+default_days    = {{ etcd_ca_default_days }}
 default_md      = sha256
 preserve        = no
 name_opt        = ca_default
@@ -23,27 +23,27 @@ policy          = policy_anything
 unique_subject  = no
 copy_extensions = copy
 
-[ etcd_v3_ca_self ]
+[ {{ etcd_ca_exts_self }} ]
 authorityKeyIdentifier = keyid,issuer
 basicConstraints       = critical,CA:TRUE,pathlen:0
 keyUsage               = critical,digitalSignature,keyEncipherment,keyCertSign
 subjectKeyIdentifier   = hash
 
-[ etcd_v3_ca_peer ]
+[ {{ etcd_ca_exts_peer }} ]
 authorityKeyIdentifier = keyid,issuer:always
 basicConstraints       = critical,CA:FALSE
 extendedKeyUsage       = clientAuth,serverAuth
 keyUsage               = digitalSignature,keyEncipherment
 subjectKeyIdentifier   = hash
 
-[ etcd_v3_ca_server ]
+[ {{ etcd_ca_exts_server }} ]
 authorityKeyIdentifier = keyid,issuer:always
 basicConstraints       = critical,CA:FALSE
 extendedKeyUsage       = serverAuth
 keyUsage               = digitalSignature,keyEncipherment
 subjectKeyIdentifier   = hash
 
-[ etcd_v3_ca_client ]
+[ {{ etcd_ca_exts_client }} ]
 authorityKeyIdentifier = keyid,issuer:always
 basicConstraints       = critical,CA:FALSE
 extendedKeyUsage       = clientAuth

+ 0 - 3
roles/etcd_ca/vars/main.yml

@@ -1,3 +0,0 @@
----
-etcd_conf_dir: /etc/etcd
-etcd_ca_dir: /etc/etcd/ca

+ 1 - 1
roles/etcd_certificates/tasks/client.yml

@@ -32,7 +32,7 @@
     creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
                  ~ item.etcd_cert_prefix ~ 'client.crt' }}"
   environment:
-    SAN: ''
+    SAN: "IP:{{ item.openshift.common.ip }}"
   with_items: etcd_needing_client_certs
 
 - file:

+ 0 - 3
roles/etcd_certificates/tasks/main.yml

@@ -4,6 +4,3 @@
 
 - include: server.yml
   when: etcd_needing_server_certs is defined and etcd_needing_server_certs
-
-
-

+ 4 - 6
roles/etcd_certificates/tasks/server.yml

@@ -18,7 +18,7 @@
     creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
                  ~ item.etcd_cert_prefix ~ 'server.csr' }}"
   environment:
-    SAN: "IP:{{ item.openshift.common.ip }}"
+    SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
   with_items: etcd_needing_server_certs
 
 - name: Sign and create the server crt
@@ -32,7 +32,7 @@
     creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
                  ~ item.etcd_cert_prefix ~ 'server.crt' }}"
   environment:
-    SAN: ''
+    SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
   with_items: etcd_needing_server_certs
 
 - name: Create the peer csr
@@ -47,7 +47,7 @@
     creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
                  ~ item.etcd_cert_prefix ~ 'peer.csr' }}"
   environment:
-    SAN: "IP:{{ item.openshift.common.ip }}"
+    SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
   with_items: etcd_needing_server_certs
 
 - name: Sign and create the peer crt
@@ -61,7 +61,7 @@
     creates: "{{ etcd_generated_certs_dir ~ '/' ~  item.etcd_cert_subdir ~ '/'
                  ~ item.etcd_cert_prefix ~ 'peer.crt' }}"
   environment:
-    SAN: ''
+    SAN: "IP:{{ etcd_host_int_map[item.inventory_hostname].interface.ipv4.address }}"
   with_items: etcd_needing_server_certs
 
 - file:
@@ -69,5 +69,3 @@
     dest: "{{ etcd_generated_certs_dir}}/{{ item.etcd_cert_subdir }}/{{ item.etcd_cert_prefix }}ca.crt"
     state: hard
   with_items: etcd_needing_server_certs
-
-

+ 0 - 11
roles/etcd_certificates/vars/main.yml

@@ -1,11 +0,0 @@
----
-etcd_conf_dir: /etc/etcd
-etcd_ca_dir: /etc/etcd/ca
-etcd_generated_certs_dir: /etc/etcd/generated_certs
-etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
-etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
-etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
-etcd_ca_name: etcd_ca
-etcd_req_ext: etcd_v3_req
-etcd_ca_exts_peer: etcd_v3_ca_peer
-etcd_ca_exts_server: etcd_v3_ca_server

+ 34 - 0
roles/etcd_common/README.md

@@ -0,0 +1,34 @@
+etcd_common
+========================
+
+TODO
+
+Requirements
+------------
+
+TODO
+
+Role Variables
+--------------
+
+TODO
+
+Dependencies
+------------
+
+openshift-repos
+
+Example Playbook
+----------------
+
+TODO
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)

+ 30 - 0
roles/etcd_common/defaults/main.yml

@@ -0,0 +1,30 @@
+---
+etcd_peers_group: etcd
+
+# etcd server vars
+etcd_conf_dir: /etc/etcd
+etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
+etcd_key_file: "{{ etcd_conf_dir }}/server.key"
+etcd_peer_ca_file: "{{ etcd_conf_dir }}/ca.crt"
+etcd_peer_cert_file: "{{ etcd_conf_dir }}/peer.crt"
+etcd_peer_key_file: "{{ etcd_conf_dir }}/peer.key"
+
+# etcd ca vars
+etcd_ca_dir: "{{ etcd_conf_dir}}/ca"
+etcd_generated_certs_dir: "{{ etcd_conf_dir }}/generated_certs"
+etcd_ca_cert: "{{ etcd_ca_dir }}/ca.crt"
+etcd_ca_key: "{{ etcd_ca_dir }}/ca.key"
+etcd_openssl_conf: "{{ etcd_ca_dir }}/openssl.cnf"
+etcd_ca_name: etcd_ca
+etcd_req_ext: etcd_v3_req
+etcd_ca_exts_peer: etcd_v3_ca_peer
+etcd_ca_exts_server: etcd_v3_ca_server
+etcd_ca_exts_self: etcd_v3_ca_self
+etcd_ca_exts_client: etcd_v3_ca_client
+etcd_ca_crl_dir: "{{ etcd_ca_dir }}/crl"
+etcd_ca_new_certs_dir: "{{ etcd_ca_dir }}/certs"
+etcd_ca_db: "{{ etcd_ca_dir }}/index.txt"
+etcd_ca_serial: "{{ etcd_ca_dir }}/serial"
+etcd_ca_crl_number: "{{ etcd_ca_dir }}/crlnumber"
+etcd_ca_default_days: 365

+ 16 - 0
roles/etcd_common/meta/main.yml

@@ -0,0 +1,16 @@
+---
+galaxy_info:
+  author: Jason DeTiberus
+  description:
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.9
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+  - system
+dependencies:
+- { role: openshift_repos }

+ 13 - 0
roles/etcd_common/tasks/main.yml

@@ -0,0 +1,13 @@
+---
+- set_fact:
+    etcd_host_int_map: "{{ lookup('template', '../templates/host_int_map.j2') | from_yaml }}"
+
+- fail:
+    msg: "Interface {{ item.value.etcd_interface }} not found on host {{ item.key }}"
+  when: "'etcd_interface' in item.value and 'interface' not in item.value"
+  with_dict: etcd_host_int_map
+
+- fail:
+    msg: IPv4 address not found for {{ item.value.interface.device }} on host {{ item.key }}
+  when: "'ipv4' not in item.value.interface or 'address' not in item.value.interface.ipv4"
+  with_dict: etcd_host_int_map

+ 13 - 0
roles/etcd_common/templates/host_int_map.j2

@@ -0,0 +1,13 @@
+---
+{% for host in groups[etcd_peers_group] %}
+{% set entry=hostvars[host] %}
+{{ entry.inventory_hostname }}:
+{% if 'etcd_interface' in entry %}
+  etcd_interface: {{ entry.etcd_interface }}
+{% if entry.etcd_interface in entry.ansible_interfaces %}
+  interface: {{ entry['ansible_' ~ entry.etcd_interface] | to_json }}
+{% endif %}
+{% else %}
+  interface: {{ entry['ansible_' ~ entry.ansible_default_ipv4.interface] | to_json }}
+{% endif %}
+{% endfor %}

+ 10 - 6
roles/openshift_ansible_inventory/tasks/main.yml

@@ -1,12 +1,16 @@
 ---
 - yum:
-    name: openshift-ansible-inventory
+    name: "{{ item }}"
     state: present
+  with_items:
+  - openshift-ansible-inventory
+  - openshift-ansible-inventory-aws
+  - openshift-ansible-inventory-gce
 
 - name:
   copy:
     content: "{{ oo_inventory_accounts | to_nice_yaml }}"
-    dest: /etc/ansible/multi_ec2.yaml
+    dest: /etc/ansible/multi_inventory.yaml
     group: "{{ oo_inventory_group }}"
     owner: "{{ oo_inventory_owner }}"
     mode: "0640"
@@ -20,17 +24,17 @@
 
 - file:
     state: link
-    src: /usr/share/ansible/inventory/multi_ec2.py
-    dest: /etc/ansible/inventory/multi_ec2.py
+    src: /usr/share/ansible/inventory/multi_inventory.py
+    dest: /etc/ansible/inventory/multi_inventory.py
     owner: root
     group: libra_ops
 
 # This cron uses the above location to call its job
 - name: Cron to keep cache fresh
   cron:
-    name: 'multi_ec2_inventory'
+    name: 'multi_inventory'
     minute: '*/10'
-    job: '/usr/share/ansible/inventory/multi_ec2.py --refresh-cache &> /dev/null'
+    job: '/usr/share/ansible/inventory/multi_inventory.py --refresh-cache &> /dev/null'
   when: oo_cron_refresh_cache is defined and oo_cron_refresh_cache
 
 - name: Set cache location

+ 32 - 2
roles/openshift_facts/library/openshift_facts.py

@@ -22,6 +22,7 @@ import copy
 import os
 from distutils.util import strtobool
 from distutils.version import LooseVersion
+from netaddr import IPNetwork
 
 
 def hostname_valid(hostname):
@@ -483,17 +484,34 @@ def set_aggregate_facts(facts):
             dict: the facts dict updated with aggregated facts
     """
     all_hostnames = set()
+    internal_hostnames = set()
     if 'common' in facts:
         all_hostnames.add(facts['common']['hostname'])
         all_hostnames.add(facts['common']['public_hostname'])
+        all_hostnames.add(facts['common']['ip'])
+        all_hostnames.add(facts['common']['public_ip'])
+
+        internal_hostnames.add(facts['common']['hostname'])
+        internal_hostnames.add(facts['common']['ip'])
 
         if 'master' in facts:
+            # FIXME: not sure why but facts['dns']['domain'] fails
+            cluster_domain = 'cluster.local'
             if 'cluster_hostname' in facts['master']:
                 all_hostnames.add(facts['master']['cluster_hostname'])
             if 'cluster_public_hostname' in facts['master']:
                 all_hostnames.add(facts['master']['cluster_public_hostname'])
+            svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
+                         'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
+                         'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
+            all_hostnames.update(svc_names)
+            internal_hostnames.update(svc_names)
+            first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
+            all_hostnames.add(first_svc_ip)
+            internal_hostnames.add(first_svc_ip)
 
         facts['common']['all_hostnames'] = list(all_hostnames)
+        facts['common']['internal_hostnames'] = list(all_hostnames)
 
     return facts
 
@@ -508,8 +526,9 @@ def set_deployment_facts_if_unset(facts):
             dict: the facts dict updated with the generated deployment_type
             facts
     """
-    # Perhaps re-factor this as a map?
-    # pylint: disable=too-many-branches
+    # disabled to avoid breaking up facts related to deployment type into
+    # multiple methods for now.
+    # pylint: disable=too-many-statements, too-many-branches
     if 'common' in facts:
         deployment_type = facts['common']['deployment_type']
         if 'service_type' not in facts['common']:
@@ -550,6 +569,17 @@ def set_deployment_facts_if_unset(facts):
                     registry_url = 'aep3/aep-${component}:${version}'
                 facts[role]['registry_url'] = registry_url
 
+    if 'master' in facts:
+        deployment_type = facts['common']['deployment_type']
+        openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
+        if 'disabled_features' in facts['master']:
+            if deployment_type == 'atomic-enterprise':
+                curr_disabled_features = set(facts['master']['disabled_features'])
+                facts['master']['disabled_features'] = list(curr_disabled_features.union(openshift_features))
+        else:
+            if deployment_type == 'atomic-enterprise':
+                facts['master']['disabled_features'] = openshift_features
+
     if 'node' in facts:
         deployment_type = facts['common']['deployment_type']
         if 'storage_plugin_deps' not in facts['node']:

+ 3 - 0
roles/openshift_facts/tasks/main.yml

@@ -6,5 +6,8 @@
     - ansible_version | version_compare('1.9.0', 'ne')
     - ansible_version | version_compare('1.9.0.1', 'ne')
 
+- name: Ensure python-netaddr is installed
+  yum: pkg=python-netaddr state=installed
+
 - name: Gather Cluster facts
   openshift_facts:

+ 1 - 0
roles/openshift_master/tasks/main.yml

@@ -62,6 +62,7 @@
       api_server_args: "{{ osm_api_server_args | default(None) }}"
       controller_args: "{{ osm_controller_args | default(None) }}"
       infra_nodes: "{{ num_infra | default(None) }}"
+      disabled_features: "{{ osm_disabled_features | default(None) }}"
 
 - name: Install Master package
   yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present

+ 18 - 1
roles/openshift_master/templates/master.yaml.v1.j2

@@ -16,12 +16,18 @@ assetConfig:
     maxRequestsInFlight: 0
     requestTimeoutSeconds: 0
 corsAllowedOrigins:
-{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] %}
+{% for origin in ['127.0.0.1', 'localhost', openshift.common.hostname, openshift.common.ip, openshift.common.public_hostname, openshift.common.public_ip] | unique %}
   - {{ origin }}
 {% endfor %}
 {% for custom_origin in openshift.master.custom_cors_origins | default("") %}
   - {{ custom_origin }}
 {% endfor %}
+{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
+  - {{ name }}
+{% endfor %}
+{% if 'disabled_features' in openshift.master %}
+disabledFeatures: {{ openshift.master.disabled_features | to_json }}
+{% endif %}
 {% if openshift.master.embedded_dns | bool %}
 dnsConfig:
   bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}
@@ -130,3 +136,14 @@ servingInfo:
   keyFile: master.server.key
   maxRequestsInFlight: 500
   requestTimeoutSeconds: 3600
+{% if named_certificates %}
+  namedCertificates:
+{% for named_certificate in named_certificates %}
+  - certFile: {{ named_certificate['certfile'] }}
+    keyFile: {{ named_certificate['keyfile'] }}
+    names:
+{% for name in named_certificate['names'] %}
+    - "{{ name }}"
+{% endfor %}
+{% endfor %}
+{% endif %}

+ 2 - 0
roles/openshift_master_certificates/tasks/main.yml

@@ -34,6 +34,8 @@
     - serviceaccounts.private.key
     - serviceaccounts.public.key
 
+- debug: msg="{{ item.openshift.master.all_hostnames | join (',') }}"
+  with_items: masters_needing_certs
 
 - name: Create the master certificates if they do not already exist
   command: >

+ 9 - 0
roles/os_zabbix/tasks/main.yml

@@ -15,6 +15,7 @@
 - include_vars: template_ops_tools.yml
 - include_vars: template_app_zabbix_server.yml
 - include_vars: template_app_zabbix_agent.yml
+- include_vars: template_performance_copilot.yml
 
 - name: Include Template Heartbeat
   include: ../../lib_zabbix/tasks/create_template.yml
@@ -79,3 +80,11 @@
     server: "{{ ozb_server }}"
     user: "{{ ozb_user }}"
     password: "{{ ozb_password }}"
+
+- name: Include Template Performance Copilot
+  include: ../../lib_zabbix/tasks/create_template.yml
+  vars:
+    template: "{{ g_template_performance_copilot }}"
+    server: "{{ ozb_server }}"
+    user: "{{ ozb_user }}"
+    password: "{{ ozb_password }}"

+ 82 - 0
roles/os_zabbix/vars/template_openshift_master.yml

@@ -31,6 +31,78 @@ g_template_openshift_master:
     applications:
     - Openshift Master
 
+  - key: openshift.master.etcd.create.success
+    description: Show number of successful create actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.create.fail
+    description: Show number of failed create actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.delete.success
+    description: Show number of successful delete actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.delete.fail
+    description: Show number of failed delete actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.get.success
+    description: Show number of successful get actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.get.fail
+    description: Show number of failed get actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.set.success
+    description: Show number of successful set actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.set.fail
+    description: Show number of failed set actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.update.success
+    description: Show number of successful update actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.update.fail
+    description: Show number of failed update actions
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.watchers
+    description: Show number of etcd watchers
+    type: int
+    applications:
+    - Openshift Etcd
+
+  - key: openshift.master.etcd.ping
+    description: etcd ping
+    type: int
+    applications:
+    - Openshift Etcd
+
   ztriggers:
   - name: 'Application creation has failed on {HOST.NAME}'
     expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
@@ -56,3 +128,13 @@ g_template_openshift_master:
     expression: '{Template Openshift Master:openshift.project.counter.last()}=0'
     url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
     priority: info
+
+  - name: 'Low number of etcd watchers on {HOST.NAME}'
+    expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
+    url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+    priority: avg
+
+  - name: 'Etcd ping failed on {HOST.NAME}'
+    expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
+    url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+    priority: high

+ 21 - 0
roles/os_zabbix/vars/template_os_linux.yml

@@ -194,6 +194,11 @@ g_template_os_linux:
     lifetime: 1
     description: "Dynamically register the filesystems"
 
+  - name: disc.disk
+    key: disc.disk
+    lifetime: 1
+    description: "Dynamically register disks on a node"
+
   zitemprototypes:
   - discoveryrule_key: disc.filesys
     name: "disc.filesys.full.{#OSO_FILESYS}"
@@ -211,6 +216,22 @@ g_template_os_linux:
     applications:
     - Disk
 
+  - discoveryrule_key: disc.disk
+    name: "TPS (IOPS) for disk {#OSO_DISK}"
+    key: "disc.disk.tps[{#OSO_DISK}]"
+    value_type: int
+    description: "PCP disk.dev.totals metric measured over a period of time.  This shows how many disk transactions per second the disk is using"
+    applications:
+    - Disk
+
+  - discoveryrule_key: disc.disk
+    name: "Percent Utilized for disk {#OSO_DISK}"
+    key: "disc.disk.putil[{#OSO_DISK}]"
+    value_type: float
+    description: "PCP disk.dev.avactive metric measured over a period of time.  This is the '%util' in the iostat command"
+    applications:
+    - Disk
+
   ztriggerprototypes:
   - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
     expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'

+ 14 - 0
roles/os_zabbix/vars/template_performance_copilot.yml

@@ -0,0 +1,14 @@
+---
+g_template_performance_copilot:
+  name: Template Performance Copilot
+  zitems:
+  - key: pcp.ping
+    applications:
+    - Performance Copilot
+    value_type: int
+
+  ztriggers:
+  - name: 'pcp.ping failed on {HOST.NAME}'
+    expression: '{Template Performance Copilot:pcp.ping.max(#3)}<1'
+    url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_pcp_ping.asciidoc'
+    priority: average

+ 1 - 1
test/units/README.md

@@ -4,4 +4,4 @@ These should be run by sourcing the env-setup:
 $ source test/env-setup
 
 Then navigate to the test/units/ directory.
-$ python -m unittest multi_ec2_test
+$ python -m unittest multi_inventory_test

+ 114 - 0
test/units/multi_inventory_test.py

@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for MultiInventory
+'''
+
+import unittest
+import multi_inventory
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name
+class MultiInventoryTest(unittest.TestCase):
+    '''
+     Test class for multiInventory
+    '''
+
+#    def setUp(self):
+#        '''setup method'''
+#        pass
+
+    def test_merge_simple_1(self):
+        '''Testing a simple merge of 2 dictionaries'''
+        a = {"key1" : 1}
+        b = {"key1" : 2}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [1, 2]})
+
+    def test_merge_b_empty(self):
+        '''Testing a merge of an emtpy dictionary'''
+        a = {"key1" : 1}
+        b = {}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": 1})
+
+    def test_merge_a_empty(self):
+        '''Testing a merge of an emtpy dictionary'''
+        b = {"key1" : 1}
+        a = {}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": 1})
+
+    def test_merge_hash_array(self):
+        '''Testing a merge of a dictionary and a dictionary with an array'''
+        a = {"key1" : {"hasha": 1}}
+        b = {"key1" : [1, 2]}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [{"hasha": 1}, 1, 2]})
+
+    def test_merge_array_hash(self):
+        '''Testing a merge of a dictionary with an array and a dictionary with a hash'''
+        a = {"key1" : [1, 2]}
+        b = {"key1" : {"hasha": 1}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [1, 2, {"hasha": 1}]})
+
+    def test_merge_keys_1(self):
+        '''Testing a merge on a dictionary for keys'''
+        a = {"key1" : [1, 2], "key2" : {"hasha": 2}}
+        b = {"key2" : {"hashb": 1}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"key1": [1, 2], "key2": {"hasha": 2, "hashb": 1}})
+
+    def test_merge_recursive_1(self):
+        '''Testing a recursive merge'''
+        a = {"a" : {"b": {"c": 1}}}
+        b = {"a" : {"b": {"c": 2}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+    def test_merge_recursive_array_item(self):
+        '''Testing a recursive merge for an array'''
+        a = {"a" : {"b": {"c": [1]}}}
+        b = {"a" : {"b": {"c": 2}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": {"b": {"c": [1, 2]}}})
+
+    def test_merge_recursive_hash_item(self):
+        '''Testing a recursive merge for a hash'''
+        a = {"a" : {"b": {"c": {"d": 1}}}}
+        b = {"a" : {"b": {"c": 2}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
+
+    def test_merge_recursive_array_hash(self):
+        '''Testing a recursive merge for an array and a hash'''
+        a = {"a" : [{"b": {"c":  1}}]}
+        b = {"a" : {"b": {"c": 1}}}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+    def test_merge_recursive_hash_array(self):
+        '''Testing a recursive merge for an array and a hash'''
+        a = {"a" : {"b": {"c": 1}}}
+        b = {"a" : [{"b": {"c":  1}}]}
+        result = {}
+        _ = [multi_inventory.MultiInventory.merge_destructively(result, x) for x in [a, b]]
+        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
+
+#    def tearDown(self):
+#        '''TearDown method'''
+#        pass
+
+if __name__ == "__main__":
+    unittest.main()

+ 0 - 95
test/units/mutli_ec2_test.py

@@ -1,95 +0,0 @@
-#!/usr/bin/env python2
-
-import unittest
-import sys
-import os
-import sys
-import multi_ec2
-
-class MultiEc2Test(unittest.TestCase):
-
-    def setUp(self):
-        pass
-
-    def test_merge_simple_1(self):
-        a = {"key1" : 1}
-        b = {"key1" : 2}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [1,2]})
-
-    def test_merge_b_empty(self):
-        a = {"key1" : 1}
-        b = {}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": 1})
-
-    def test_merge_a_empty(self):
-        b = {"key1" : 1}
-        a = {}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": 1})
-
-    def test_merge_hash_array(self):
-        a = {"key1" : {"hasha": 1}}
-        b = {"key1" : [1,2]}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [{"hasha": 1}, 1,2]})
-
-    def test_merge_array_hash(self):
-        a = {"key1" : [1,2]}
-        b = {"key1" : {"hasha": 1}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [1,2, {"hasha": 1}]})
-
-    def test_merge_keys_1(self):
-        a = {"key1" : [1,2], "key2" : {"hasha": 2}}
-        b = {"key2" : {"hashb": 1}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"key1": [1,2], "key2": {"hasha": 2, "hashb": 1}})
-
-    def test_merge_recursive_1(self):
-        a = {"a" : {"b": {"c": 1}}}
-        b = {"a" : {"b": {"c": 2}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
-    def test_merge_recursive_array_item(self):
-        a = {"a" : {"b": {"c": [1]}}}
-        b = {"a" : {"b": {"c": 2}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": {"b": {"c": [1,2]}}})
-
-    def test_merge_recursive_hash_item(self):
-        a = {"a" : {"b": {"c": {"d": 1}}}}
-        b = {"a" : {"b": {"c": 2}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": {"b": {"c": [{"d": 1}, 2]}}})
-
-    def test_merge_recursive_array_hash(self):
-        a = {"a" : [{"b": {"c":  1}}]}
-        b = {"a" : {"b": {"c": 1}}}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
-    def test_merge_recursive_hash_array(self):
-        a = {"a" : {"b": {"c": 1}}}
-        b = {"a" : [{"b": {"c":  1}}]}
-        result = {}
-        [multi_ec2.MultiEc2.merge_destructively(result, x) for x in [a,b]]
-        self.assertEqual(result, {"a": [{"b": {"c": 1}}]})
-
-    def tearDown(self):
-        pass
-
-if __name__ == "__main__":
-  unittest.main()

+ 1 - 1
utils/setup.py

@@ -79,7 +79,7 @@ setup(
     # pip to create the appropriate form of executable for the target platform.
     entry_points={
         'console_scripts': [
-            'oo-install=ooinstall.cli_installer:main',
+            'oo-install=ooinstall.cli_installer:cli',
         ],
     },
 )

+ 92 - 40
utils/src/ooinstall/cli_installer.py

@@ -6,12 +6,13 @@ import click
 import os
 import re
 import sys
-from ooinstall import install_transactions
+from ooinstall import openshift_ansible
 from ooinstall import OOConfig
 from ooinstall.oo_config import Host
 from ooinstall.variants import find_variant, get_variant_version_combos
 
 DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-util/ansible.cfg'
+DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
 
 def validate_ansible_dir(path):
     if not path:
@@ -94,7 +95,7 @@ The OpenShift Node provides the runtime environments for containers.  It will
 host the required services to be managed by the Master.
 
 http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
-http://docs.openshift.com/enterprise/3.0/architecture/infrastructure_components/kubernetes_infrastructure.html#node
+http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#node
     """
     click.echo(message)
 
@@ -190,7 +191,7 @@ Notes:
     facts_confirmed = click.confirm("Do the above facts look correct?")
     if not facts_confirmed:
         message = """
-Edit %s with the desired values and rerun oo-install with --unattended .
+Edit %s with the desired values and rerun atomic-openshift-installer with --unattended .
 """ % oo_cfg.config_path
         click.echo(message)
         # Make sure we actually write out the config file.
@@ -356,8 +357,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
                 hosts_to_run_on.extend(new_nodes)
                 oo_cfg.hosts.extend(new_nodes)
 
-                install_transactions.set_config(oo_cfg)
-                callback_facts, error = install_transactions.default_facts(oo_cfg.hosts)
+                openshift_ansible.set_config(oo_cfg)
+                callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
                 if error:
                     click.echo("There was a problem fetching the required information. " \
                                "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
@@ -367,71 +368,117 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force):
 
     return hosts_to_run_on, callback_facts
 
-@click.command()
+
+@click.group()
+@click.pass_context
+@click.option('--unattended', '-u', is_flag=True, default=False)
 @click.option('--configuration', '-c',
-              type=click.Path(file_okay=True,
-                              dir_okay=False,
-                              writable=True,
-                              readable=True),
-              default=None)
+    type=click.Path(file_okay=True,
+        dir_okay=False,
+        writable=True,
+        readable=True),
+    default=None)
 @click.option('--ansible-playbook-directory',
               '-a',
               type=click.Path(exists=True,
                               file_okay=False,
                               dir_okay=True,
-                              writable=True,
                               readable=True),
               # callback=validate_ansible_dir,
+              default='/usr/share/ansible/openshift-ansible/',
               envvar='OO_ANSIBLE_PLAYBOOK_DIRECTORY')
 @click.option('--ansible-config',
-              type=click.Path(file_okay=True,
-                              dir_okay=False,
-                              writable=True,
-                              readable=True),
-              default=None)
+    type=click.Path(file_okay=True,
+        dir_okay=False,
+        writable=True,
+        readable=True),
+    default=None)
 @click.option('--ansible-log-path',
-              type=click.Path(file_okay=True,
-                              dir_okay=False,
-                              writable=True,
-                              readable=True),
-              default="/tmp/ansible.log")
-@click.option('--unattended', '-u', is_flag=True, default=False)
-@click.option('--force', '-f', is_flag=True, default=False)
+    type=click.Path(file_okay=True,
+        dir_okay=False,
+        writable=True,
+        readable=True),
+    default="/tmp/ansible.log")
 #pylint: disable=too-many-arguments
 # Main CLI entrypoint, not much we can do about too many arguments.
-def main(configuration, ansible_playbook_directory, ansible_config, ansible_log_path, unattended, force):
-    oo_cfg = OOConfig(configuration)
+def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path):
+    """
+    The main click CLI module. Responsible for handling most common CLI options,
+    assigning any defaults and adding to the context for the sub-commands.
+    """
+    ctx.obj = {}
+    ctx.obj['unattended'] = unattended
+    ctx.obj['configuration'] = configuration
+    ctx.obj['ansible_config'] = ansible_config
+    ctx.obj['ansible_log_path'] = ansible_log_path
 
+    oo_cfg = OOConfig(ctx.obj['configuration'])
+
+    # If no playbook dir on the CLI, check the config:
     if not ansible_playbook_directory:
         ansible_playbook_directory = oo_cfg.settings.get('ansible_playbook_directory', '')
+    # If still no playbook dir, check for the default location:
+    if not ansible_playbook_directory and os.path.exists(DEFAULT_PLAYBOOK_DIR):
+        ansible_playbook_directory = DEFAULT_PLAYBOOK_DIR
+    validate_ansible_dir(ansible_playbook_directory)
+    oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
+    oo_cfg.ansible_playbook_directory = ansible_playbook_directory
+    ctx.obj['ansible_playbook_directory'] = ansible_playbook_directory
 
-    if ansible_config:
-        oo_cfg.settings['ansible_config'] = ansible_config
+    if ctx.obj['ansible_config']:
+        oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
     elif os.path.exists(DEFAULT_ANSIBLE_CONFIG):
         # If we're installed by RPM this file should exist and we can use it as our default:
         oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
 
-    validate_ansible_dir(ansible_playbook_directory)
-    oo_cfg.settings['ansible_playbook_directory'] = ansible_playbook_directory
-    oo_cfg.ansible_playbook_directory = ansible_playbook_directory
+    oo_cfg.settings['ansible_log_path'] = ctx.obj['ansible_log_path']
+
+    ctx.obj['oo_cfg'] = oo_cfg
+    openshift_ansible.set_config(oo_cfg)
+
+
+@click.command()
+@click.pass_context
+def uninstall(ctx):
+    oo_cfg = ctx.obj['oo_cfg']
+
+    if len(oo_cfg.hosts) == 0:
+        click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
+        sys.exit(1)
+
+    click.echo("OpenShift will be uninstalled from the following hosts:\n")
+    if not ctx.obj['unattended']:
+        # Prompt interactively to confirm:
+        for host in oo_cfg.hosts:
+            click.echo("  * %s" % host.name)
+        proceed = click.confirm("\nDo you wish to proceed?")
+        if not proceed:
+            click.echo("Uninstall cancelled.")
+            sys.exit(0)
 
-    oo_cfg.settings['ansible_log_path'] = ansible_log_path
-    install_transactions.set_config(oo_cfg)
+    openshift_ansible.run_uninstall_playbook()
 
-    if unattended:
+
+
+@click.command()
+@click.option('--force', '-f', is_flag=True, default=False)
+@click.pass_context
+def install(ctx, force):
+    oo_cfg = ctx.obj['oo_cfg']
+
+    if ctx.obj['unattended']:
         error_if_missing_info(oo_cfg)
     else:
         oo_cfg = get_missing_info_from_user(oo_cfg)
 
     click.echo('Gathering information from hosts...')
-    callback_facts, error = install_transactions.default_facts(oo_cfg.hosts)
+    callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts)
     if error:
         click.echo("There was a problem fetching the required information. " \
                    "Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
         sys.exit(1)
 
-    hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force)
-
+    hosts_to_run_on, callback_facts = get_hosts_to_run_on(oo_cfg, callback_facts, ctx.obj['unattended'], force)
 
     click.echo('Writing config to: %s' % oo_cfg.config_path)
 
@@ -449,10 +496,10 @@ def main(configuration, ansible_playbook_directory, ansible_config, ansible_log_
     message = """
 If changes are needed to the values recorded by the installer please update {}.
 """.format(oo_cfg.config_path)
-    if not unattended:
+    if not ctx.obj['unattended']:
         confirm_continue(message)
 
-    error = install_transactions.run_main_playbook(oo_cfg.hosts,
+    error = openshift_ansible.run_main_playbook(oo_cfg.hosts,
                                                    hosts_to_run_on)
     if error:
         # The bootstrap script will print out the log location.
@@ -475,5 +522,10 @@ http://docs.openshift.com/enterprise/latest/admin_guide/overview.html
         click.echo(message)
         click.pause()
 
+cli.add_command(install)
+cli.add_command(uninstall)
+
 if __name__ == '__main__':
-    main()
+    # This is expected behaviour for context passing with click library:
+    # pylint: disable=unexpected-keyword-arg
+    cli(obj={})

+ 25 - 1
utils/src/ooinstall/install_transactions.py

@@ -2,7 +2,9 @@
 # repo. We will work on these over time.
 # pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
 
+import socket
 import subprocess
+import sys
 import os
 import yaml
 from ooinstall.variants import find_variant
@@ -16,13 +18,15 @@ def set_config(cfg):
 def generate_inventory(hosts):
     print hosts
     global CFG
+
+    installer_host = socket.gethostname()
     base_inventory_path = CFG.settings['ansible_inventory_path']
     base_inventory = open(base_inventory_path, 'w')
     base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
     base_inventory.write('\n[OSEv3:vars]\n')
     base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
     if CFG.settings['ansible_ssh_user'] != 'root':
-        base_inventory.write('ansible_sudo=true\n')
+        base_inventory.write('ansible_become=true\n')
 
     # Find the correct deployment type for ansible:
     ver = find_variant(CFG.settings['variant'],
@@ -41,6 +45,14 @@ def generate_inventory(hosts):
     if 'OO_INSTALL_STAGE_REGISTRY' in os.environ:
         base_inventory.write('oreg_url=registry.access.stage.redhat.com/openshift3/ose-${component}:${version}\n')
 
+    if any(host.hostname == installer_host or host.public_hostname == installer_host
+            for host in hosts):
+        no_pwd_sudo = subprocess.call(['sudo', '-v', '--non-interactive'])
+        if no_pwd_sudo == 1:
+            print 'The atomic-openshift-installer requires sudo access without a password.'
+            sys.exit(1)
+        base_inventory.write("ansible_connection=local\n")
+
     base_inventory.write('\n[masters]\n')
     masters = (host for host in hosts if host.master)
     for master in masters:
@@ -126,8 +138,20 @@ def run_main_playbook(hosts, hosts_to_run_on):
         facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
     return run_ansible(main_playbook_path, inventory_file, facts_env)
 
+
 def run_ansible(playbook, inventory, env_vars):
     return subprocess.call(['ansible-playbook',
                              '--inventory-file={}'.format(inventory),
                              playbook],
                              env=env_vars)
+
+def run_uninstall_playbook():
+    playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
+        'playbooks/adhoc/uninstall.yml')
+    inventory_file = generate_inventory(CFG.hosts)
+    facts_env = os.environ.copy()
+    if 'ansible_log_path' in CFG.settings:
+        facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
+    if 'ansible_config' in CFG.settings:
+        facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
+    return run_ansible(playbook, inventory_file, facts_env)

+ 1 - 1
utils/src/ooinstall/variants.py

@@ -38,7 +38,7 @@ OSE = Variant('openshift-enterprise', 'OpenShift Enterprise',
     ]
 )
 
-AEP = Variant('atomic-enterprise', 'Atomic OpenShift Enterprise',
+AEP = Variant('atomic-enterprise', 'Atomic Enterprise Platform',
     [
         Version('3.1', 'atomic-enterprise')
     ]

+ 37 - 33
utils/test/cli_installer_tests.py

@@ -76,7 +76,7 @@ class OOCliFixture(OOInstallFixture):
         self.cli_args = ["-a", self.work_dir]
 
     def run_cli(self):
-        return self.runner.invoke(cli.main, self.cli_args)
+        return self.runner.invoke(cli.cli, self.cli_args)
 
     def assert_result(self, result, exit_code):
         if result.exception is not None or result.exit_code != exit_code:
@@ -102,8 +102,8 @@ class UnattendedCliTests(OOCliFixture):
         OOCliFixture.setUp(self)
         self.cli_args.append("-u")
 
-    @patch('ooinstall.install_transactions.run_main_playbook')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
         load_facts_mock.return_value = (MOCK_FACTS, 0)
         run_playbook_mock.return_value = 0
@@ -111,8 +111,8 @@ class UnattendedCliTests(OOCliFixture):
         config_file = self.write_config(os.path.join(self.work_dir,
             'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
 
-        self.cli_args.extend(["-c", config_file])
-        result = self.runner.invoke(cli.main, self.cli_args)
+        self.cli_args.extend(["-c", config_file, "install"])
+        result = self.runner.invoke(cli.cli, self.cli_args)
         self.assert_result(result, 0)
 
         load_facts_args = load_facts_mock.call_args[0]
@@ -133,8 +133,8 @@ class UnattendedCliTests(OOCliFixture):
         self.assertEquals(3, len(hosts))
         self.assertEquals(3, len(hosts_to_run_on))
 
-    @patch('ooinstall.install_transactions.run_main_playbook')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_inventory_write(self, load_facts_mock, run_playbook_mock):
 
         # Add an ssh user so we can verify it makes it to the inventory file:
@@ -146,8 +146,8 @@ class UnattendedCliTests(OOCliFixture):
         config_file = self.write_config(os.path.join(self.work_dir,
             'ooinstall.conf'), merged_config)
 
-        self.cli_args.extend(["-c", config_file])
-        result = self.runner.invoke(cli.main, self.cli_args)
+        self.cli_args.extend(["-c", config_file, "install"])
+        result = self.runner.invoke(cli.cli, self.cli_args)
         self.assert_result(result, 0)
 
         # Check the inventory file looks as we would expect:
@@ -172,8 +172,8 @@ class UnattendedCliTests(OOCliFixture):
             self.assertTrue('openshift_hostname' in master_line)
             self.assertTrue('openshift_public_hostname' in master_line)
 
-    @patch('ooinstall.install_transactions.run_main_playbook')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_variant_version_latest_assumed(self, load_facts_mock,
         run_playbook_mock):
         load_facts_mock.return_value = (MOCK_FACTS, 0)
@@ -182,8 +182,8 @@ class UnattendedCliTests(OOCliFixture):
         config_file = self.write_config(os.path.join(self.work_dir,
             'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
 
-        self.cli_args.extend(["-c", config_file])
-        result = self.runner.invoke(cli.main, self.cli_args)
+        self.cli_args.extend(["-c", config_file, "install"])
+        result = self.runner.invoke(cli.cli, self.cli_args)
         self.assert_result(result, 0)
 
         written_config = self._read_yaml(config_file)
@@ -199,8 +199,8 @@ class UnattendedCliTests(OOCliFixture):
         self.assertEquals('openshift-enterprise',
             inventory.get('OSEv3:vars', 'deployment_type'))
 
-    @patch('ooinstall.install_transactions.run_main_playbook')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_variant_version_preserved(self, load_facts_mock,
         run_playbook_mock):
         load_facts_mock.return_value = (MOCK_FACTS, 0)
@@ -211,8 +211,8 @@ class UnattendedCliTests(OOCliFixture):
         config_file = self.write_config(os.path.join(self.work_dir,
             'ooinstall.conf'), config)
 
-        self.cli_args.extend(["-c", config_file])
-        result = self.runner.invoke(cli.main, self.cli_args)
+        self.cli_args.extend(["-c", config_file, "install"])
+        result = self.runner.invoke(cli.cli, self.cli_args)
         self.assert_result(result, 0)
 
         written_config = self._read_yaml(config_file)
@@ -227,8 +227,8 @@ class UnattendedCliTests(OOCliFixture):
         self.assertEquals('enterprise',
             inventory.get('OSEv3:vars', 'deployment_type'))
 
-    @patch('ooinstall.install_transactions.run_ansible')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_ansible')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_no_ansible_config_specified(self, load_facts_mock, run_ansible_mock):
         load_facts_mock.return_value = (MOCK_FACTS, 0)
         run_ansible_mock.return_value = 0
@@ -238,8 +238,8 @@ class UnattendedCliTests(OOCliFixture):
         self._ansible_config_test(load_facts_mock, run_ansible_mock,
             config, None, None)
 
-    @patch('ooinstall.install_transactions.run_ansible')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_ansible')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_ansible_config_specified_cli(self, load_facts_mock, run_ansible_mock):
         load_facts_mock.return_value = (MOCK_FACTS, 0)
         run_ansible_mock.return_value = 0
@@ -250,8 +250,8 @@ class UnattendedCliTests(OOCliFixture):
         self._ansible_config_test(load_facts_mock, run_ansible_mock,
             config, ansible_config, ansible_config)
 
-    @patch('ooinstall.install_transactions.run_ansible')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_ansible')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_ansible_config_specified_in_installer_config(self,
         load_facts_mock, run_ansible_mock):
 
@@ -282,7 +282,8 @@ class UnattendedCliTests(OOCliFixture):
         self.cli_args.extend(["-c", config_file])
         if ansible_config_cli:
             self.cli_args.extend(["--ansible-config", ansible_config_cli])
-        result = self.runner.invoke(cli.main, self.cli_args)
+        self.cli_args.append("install")
+        result = self.runner.invoke(cli.cli, self.cli_args)
         self.assert_result(result, 0)
 
         # Test the env vars for facts playbook:
@@ -388,8 +389,8 @@ class AttendedCliTests(OOCliFixture):
             self.assertTrue('public_ip' in h)
             self.assertTrue('public_hostname' in h)
 
-    @patch('ooinstall.install_transactions.run_main_playbook')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_full_run(self, load_facts_mock, run_playbook_mock):
         load_facts_mock.return_value = (MOCK_FACTS, 0)
         run_playbook_mock.return_value = 0
@@ -401,7 +402,8 @@ class AttendedCliTests(OOCliFixture):
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y')
-        result = self.runner.invoke(cli.main, self.cli_args,
+        self.cli_args.append("install")
+        result = self.runner.invoke(cli.cli, self.cli_args,
             input=cli_input)
         self.assert_result(result, 0)
 
@@ -411,8 +413,8 @@ class AttendedCliTests(OOCliFixture):
         written_config = self._read_yaml(self.config_file)
         self._verify_config_hosts(written_config, 3)
 
-    @patch('ooinstall.install_transactions.run_main_playbook')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_add_nodes(self, load_facts_mock, run_playbook_mock):
 
         # Modify the mock facts to return a version indicating OpenShift
@@ -432,7 +434,8 @@ class AttendedCliTests(OOCliFixture):
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y')
-        result = self.runner.invoke(cli.main,
+        self.cli_args.append("install")
+        result = self.runner.invoke(cli.cli,
                                     self.cli_args,
                                     input=cli_input)
         self.assert_result(result, 0)
@@ -443,8 +446,8 @@ class AttendedCliTests(OOCliFixture):
         written_config = self._read_yaml(self.config_file)
         self._verify_config_hosts(written_config, 3)
 
-    @patch('ooinstall.install_transactions.run_main_playbook')
-    @patch('ooinstall.install_transactions.load_system_facts')
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock):
         load_facts_mock.return_value = (MOCK_FACTS, 0)
         run_playbook_mock.return_value = 0
@@ -454,7 +457,8 @@ class AttendedCliTests(OOCliFixture):
                                         SAMPLE_CONFIG % 'openshift-enterprise')
         cli_input = self._build_input(confirm_facts='y')
         self.cli_args.extend(["-c", config_file])
-        result = self.runner.invoke(cli.main,
+        self.cli_args.append("install")
+        result = self.runner.invoke(cli.cli,
                                     self.cli_args,
                                     input=cli_input)
         self.assert_result(result, 0)