Jelajahi Sumber

Merge pull request #1155 from mwoodson/host_monitoring

merging master into prod
Matt Woodson 9 tahun lalu
induk
melakukan
0bbfef4e19
100 mengubah file dengan 1219 tambahan dan 505 penghapusan
  1. 1 1
      .tito/packages/openshift-ansible
  2. 1 1
      BUILD.md
  3. 4 2
      README_AEP.md
  4. 101 0
      README_CONTAINERIZED_INSTALLATION.md
  5. 3 1
      README_OSE.md
  6. 4 2
      README_origin.md
  7. 62 49
      bin/cluster
  8. 22 25
      bin/ohi
  9. 62 39
      bin/openshift_ansible/awsutil.py
  10. 34 15
      bin/opssh
  11. 8 11
      bin/oscp
  12. 13 17
      bin/ossh
  13. 13 6
      filter_plugins/oo_filters.py
  14. 0 1
      filter_plugins/openshift_master.py
  15. 20 0
      inventory/byo/hosts.aep.example
  16. 20 0
      inventory/byo/hosts.aep_quickstart
  17. 37 0
      inventory/byo/hosts.openstack
  18. 26 0
      inventory/byo/hosts.origin.example
  19. 20 0
      inventory/byo/hosts.ose.example
  20. 120 1
      openshift-ansible.spec
  21. 4 5
      playbooks/adhoc/create_pv/create_pv.yaml
  22. 1 1
      playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
  23. 1 1
      playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
  24. 1 1
      playbooks/adhoc/s3_registry/s3_registry.yml
  25. 53 0
      playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
  26. 21 0
      playbooks/adhoc/setupnfs.yml
  27. 31 3
      playbooks/adhoc/uninstall.yml
  28. 22 0
      playbooks/aws/openshift-cluster/cluster_hosts.yml
  29. 6 18
      playbooks/aws/openshift-cluster/config.yml
  30. 1 1
      playbooks/aws/openshift-cluster/list.yml
  31. 6 9
      playbooks/aws/openshift-cluster/scaleup.yml
  32. 3 2
      playbooks/aws/openshift-cluster/service.yml
  33. 6 3
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  34. 17 17
      playbooks/aws/openshift-cluster/terminate.yml
  35. 2 6
      playbooks/aws/openshift-cluster/update.yml
  36. 6 22
      playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  37. 19 13
      playbooks/aws/openshift-cluster/vars.yml
  38. 13 0
      playbooks/byo/openshift-cluster/cluster_hosts.yml
  39. 3 5
      playbooks/byo/openshift-cluster/config.yml
  40. 3 5
      playbooks/byo/openshift-cluster/scaleup.yml
  41. 1 1
      playbooks/byo/openshift-cluster/upgrades/README.md
  42. 3 0
      playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
  43. 3 0
      playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  44. 4 0
      playbooks/common/openshift-cluster/config.yml
  45. 16 0
      playbooks/common/openshift-cluster/evaluate_groups.yml
  46. 2 1
      playbooks/common/openshift-cluster/update_repos_and_packages.yml
  47. 2 3
      playbooks/common/openshift-cluster/upgrades/files/versions.sh
  48. 8 4
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  49. 9 0
      playbooks/common/openshift-docker/config.yml
  50. 1 0
      playbooks/common/openshift-docker/filter_plugins
  51. 1 0
      playbooks/common/openshift-docker/lookup_plugins
  52. 1 0
      playbooks/common/openshift-docker/roles
  53. 3 1
      playbooks/common/openshift-etcd/config.yml
  54. 38 24
      playbooks/common/openshift-master/config.yml
  55. 5 0
      playbooks/common/openshift-nfs/config.yml
  56. 1 0
      playbooks/common/openshift-nfs/filter_plugins
  57. 1 0
      playbooks/common/openshift-nfs/lookup_plugins
  58. 1 0
      playbooks/common/openshift-nfs/roles
  59. 18 0
      playbooks/common/openshift-nfs/service.yml
  60. 2 0
      playbooks/common/openshift-node/config.yml
  61. 22 0
      playbooks/gce/openshift-cluster/cluster_hosts.yml
  62. 7 23
      playbooks/gce/openshift-cluster/config.yml
  63. 4 7
      playbooks/gce/openshift-cluster/join_node.yml
  64. 1 1
      playbooks/gce/openshift-cluster/list.yml
  65. 3 2
      playbooks/gce/openshift-cluster/service.yml
  66. 2 1
      playbooks/gce/openshift-cluster/tasks/launch_instances.yml
  67. 1 2
      playbooks/gce/openshift-cluster/terminate.yml
  68. 3 7
      playbooks/gce/openshift-cluster/update.yml
  69. 12 6
      playbooks/gce/openshift-cluster/vars.yml
  70. 22 0
      playbooks/libvirt/openshift-cluster/cluster_hosts.yml
  71. 7 19
      playbooks/libvirt/openshift-cluster/config.yml
  72. 1 1
      playbooks/libvirt/openshift-cluster/list.yml
  73. 5 1
      playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
  74. 1 1
      playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
  75. 2 1
      playbooks/libvirt/openshift-cluster/templates/domain.xml
  76. 1 1
      playbooks/libvirt/openshift-cluster/terminate.yml
  77. 3 8
      playbooks/libvirt/openshift-cluster/update.yml
  78. 17 15
      playbooks/libvirt/openshift-cluster/vars.yml
  79. 22 0
      playbooks/openstack/openshift-cluster/cluster_hosts.yml
  80. 8 18
      playbooks/openstack/openshift-cluster/config.yml
  81. 39 30
      playbooks/openstack/openshift-cluster/files/heat_stack.yaml
  82. 7 1
      playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
  83. 5 4
      playbooks/openstack/openshift-cluster/launch.yml
  84. 1 1
      playbooks/openstack/openshift-cluster/list.yml
  85. 1 2
      playbooks/openstack/openshift-cluster/terminate.yml
  86. 3 8
      playbooks/openstack/openshift-cluster/update.yml
  87. 9 4
      playbooks/openstack/openshift-cluster/vars.yml
  88. 1 0
      roles/ansible/tasks/main.yml
  89. 1 0
      roles/cockpit/tasks/main.yml
  90. 1 0
      roles/copr_cli/tasks/main.yml
  91. 1 1
      roles/docker/README.md
  92. 4 1
      roles/docker/handlers/main.yml
  93. 9 1
      roles/docker/tasks/main.yml
  94. 1 0
      roles/etcd/defaults/main.yaml
  95. 3 2
      roles/etcd/handlers/main.yml
  96. 54 8
      roles/etcd/tasks/main.yml
  97. 11 11
      roles/etcd/templates/etcd.conf.j2
  98. 13 0
      roles/etcd/templates/etcd.docker.service
  99. 1 0
      roles/flannel/tasks/main.yml
  100. 0 0
      roles/fluentd_master/tasks/main.yml

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.0.20-1 ./
+3.0.27-1 ./

+ 1 - 1
BUILD.md

@@ -1,7 +1,7 @@
 # openshift-ansible RPM Build instructions
 We use tito to make building and tracking revisions easy.
 
-For more information on tito, please see the [Tito home page](http://rm-rf.ca/tito "Tito home page").
+For more information on tito, please see the [Tito home page](https://github.com/dgoodwin/tito "Tito home page").
 
 
 ## Build openshift-ansible-bin

+ 4 - 2
README_AEP.md

@@ -81,10 +81,10 @@ deployment_type=atomic-enterprise
 
 # Pre-release registry URL; note that in the future these images 
 # may have an atomicenterprise/aep- prefix or so.
-oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
+oreg_url=rcm-img-docker:5001/openshift3/ose-${component}:${version}
 
 # Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm/puddle/build/AtomicOpenShift/3.1/2015-10-27.1', 'enabled': 1, 'gpgcheck': 0}]
 
 # host group for masters
 [masters]
@@ -98,6 +98,8 @@ aep3-node[1:2].example.com
 The hostnames above should resolve both from the hosts themselves and
 the host where ansible is running (if different).
 
+A more complete example inventory file ([hosts.aep.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.aep.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory.
+
 ## Running the ansible playbooks
 From the openshift-ansible checkout run:
 ```sh

+ 101 - 0
README_CONTAINERIZED_INSTALLATION.md

@@ -0,0 +1,101 @@
+# Overview
+
+Users may now deploy containerized versions of OpenShift Origin, OpenShift
+Enterprise, or Atomic Enterprise Platform on Atomic
+Host[https://projectatomic.io] or RHEL, Centos, and Fedora. This includes
+OpenvSwitch based SDN.
+
+
+## Installing on Atomic Host
+
+When installing on Atomic Host you will automatically have containerized
+installation methods selected for you based on detection of _/run/ostree-booted_
+
+## Installing on RHEL, Centos, or Fedora
+
+Currently the default installation method for traditional operating systems is
+via RPMs. If you wish to deploy using containerized installation you may set the
+ansible variable 'containerized=true' on a per host basis. This means that you
+may easily deploy environments mixing containerized and RPM based installs. At
+this point we suggest deploying heterogeneous environments.
+
+## CLI Wrappers
+
+When using containerized installations openshift-ansible will deploy a wrapper
+script on each master located in _/usr/local/bin/openshift_ and a set of
+symbolic links _/usr/local/bin/oc_, _/usr/local/bin/oadm_, and
+_/usr/local/bin/kubectl_ to ease administrative tasks. The wrapper script spawns
+a new container on each invocation so you may notice it's slightly slower than
+native clients.
+
+The wrapper scripts mount a limited subset of paths, _~/.kube_, _/etc/origin/_,
+and _/tmp_. Be mindful of this when passing in files to be processed by `oc` or
+ `oadm`. You may find it easier to redirect input like this :
+ 
+ `oc create -f - < my_file.json`
+
+## Technical Notes
+
+### Requisite Images
+
+Based on your deployment_type the installer will make use of the following
+images. Because you may make use of a private repository we've moved the
+configuration of docker additional, insecure, and blocked registries to the
+beginning of the installation process ensuring that these settings are applied
+before attempting to pull any of the following images.
+
+    Origin
+        openshift/origin
+        openshift/node (node + openshift-sdn + openvswitch rpm for client tools)
+        openshift/openvswitch (centos7 + openvswitch rpm, runs ovsdb ovsctl processes)
+        registry.access.redhat.com/rhel7/etcd
+    OpenShift Enterprise
+        openshift3/ose
+        openshift3/node
+        openshift3/openvswitch
+        registry.access.redhat.com/rhel7/etcd
+    Atomic Enterprise Platform
+        aep3/aep
+        aep3/node
+        aep3/openvswitch
+        registry.access.redhat.com/rhel7/etcd
+        
+  * note openshift3/* and aep3/* images come from registry.access.redhat.com and
+rely on the --additional-repository flag being set appropriately.
+
+### Starting and Stopping Containers
+
+The installer will create relevant systemd units which can be used to start,
+stop, and poll services via normal systemctl commands. These unit names match
+those of an RPM installation with the exception of the etcd service which will
+be named 'etcd_container'. This change is necessary as currently Atomic Host
+ships with etcd package installed as part of Atomic Host and we will instead use
+a containerized version. The installer will disable the built in etcd service.
+etcd is slated to be removed from os-tree in the future.
+
+### File Paths
+
+All configuration files are placed in the same locations as RPM based
+installations and will survive os-tree upgrades.
+
+The examples are installed into _/etc/origin/examples_ rather than
+_/usr/share/openshift/examples_ because that is read-only on Atomic Host.
+
+
+### Storage Requirements
+
+Atomic Host installs normally have a very small root filesystem. However the
+etcd, master, and node containers will persist data in /var/lib. Please ensure
+that you have enough space on the root filesystem.
+
+### OpenvSwitch SDN Initialization
+
+OpenShift SDN initialization requires that the docker bridge be reconfigured and
+docker is restarted. This complicates the situation when the node is running
+within a container. When using the OVS SDN you'll see the node start,
+reconfigure docker, restart docker which will restart all containers, and
+finally start successfully.
+
+The node service may fail to start and be restarted a few times because the
+master services are also restarted along with docker. We currently work around
+this by relying on Restart=always in the docker based systemd units.

+ 3 - 1
README_OSE.md

@@ -82,7 +82,7 @@ deployment_type=enterprise
 # Pre-release additional repo
 openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
 'baseurl':
-'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'http://buildvm/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
 'enabled': 1, 'gpgcheck': 0}]
 
 # Origin copr repo
@@ -105,6 +105,8 @@ ose3-node[1:2].example.com
 The hostnames above should resolve both from the hosts themselves and
 the host where ansible is running (if different).
 
+A more complete example inventory file ([hosts.ose.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.ose.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory.
+
 ## Running the ansible playbooks
 From the openshift-ansible checkout run:
 ```sh

+ 4 - 2
README_origin.md

@@ -59,12 +59,12 @@ option to ansible-playbook.
 # This is an example of a bring your own (byo) host inventory
 
 # Create an OSEv3 group that contains the masters and nodes groups
-[OSv3:children]
+[OSEv3:children]
 masters
 nodes
 
 # Set variables common for all OSEv3 hosts
-[OSv3:vars]
+[OSEv3:vars]
 
 # SSH user, this user should allow ssh based auth without requiring a password
 ansible_ssh_user=root
@@ -95,6 +95,8 @@ osv3-lb.example.com
 The hostnames above should resolve both from the hosts themselves and
 the host where ansible is running (if different).
 
+A more complete example inventory file ([hosts.origin.example](https://github.com/openshift/openshift-ansible/blob/master/inventory/byo/hosts.origin.example)) is available under the [`/inventory/byo`](https://github.com/openshift/openshift-ansible/tree/master/inventory/byo) directory.
+
 ## Running the ansible playbooks
 From the openshift-ansible checkout run:
 ```sh

+ 62 - 49
bin/cluster

@@ -55,94 +55,108 @@ class Cluster(object):
         Create an OpenShift cluster for given provider
         :param args: command line arguments provided by user
         """
-        env = {'cluster_id': args.cluster_id,
+        cluster = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        env['num_masters'] = args.masters
-        env['num_nodes'] = args.nodes
-        env['num_infra'] = args.infra
-        env['num_etcd'] = args.etcd
+        cluster['num_masters'] = args.masters
+        cluster['num_nodes'] = args.nodes
+        cluster['num_infra'] = args.infra
+        cluster['num_etcd'] = args.etcd
+        cluster['cluster_env'] = args.env
 
-        self.action(args, inventory, env, playbook)
+        self.action(args, inventory, cluster, playbook)
 
     def addNodes(self, args):
         """
         Add nodes to an existing cluster for given provider
         :param args: command line arguments provided by user
         """
-        env = {'cluster_id': args.cluster_id,
-               'deployment_type': self.get_deployment_type(args)}
+        cluster = {'cluster_id': args.cluster_id,
+                   'deployment_type': self.get_deployment_type(args),
+                  }
         playbook = "playbooks/{0}/openshift-cluster/addNodes.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        env['num_nodes'] = args.nodes
-        env['num_infra'] = args.infra
+        cluster['num_nodes'] = args.nodes
+        cluster['num_infra'] = args.infra
+        cluster['cluster_env'] = args.env
 
-        self.action(args, inventory, env, playbook)
+        self.action(args, inventory, cluster, playbook)
 
     def terminate(self, args):
         """
         Destroy OpenShift cluster
         :param args: command line arguments provided by user
         """
-        env = {'cluster_id': args.cluster_id,
-               'deployment_type': self.get_deployment_type(args)}
+        cluster = {'cluster_id': args.cluster_id,
+                   'deployment_type': self.get_deployment_type(args),
+                   'cluster_env': args.env,
+                  }
         playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        self.action(args, inventory, env, playbook)
+        self.action(args, inventory, cluster, playbook)
 
     def list(self, args):
         """
         List VMs in cluster
         :param args: command line arguments provided by user
         """
-        env = {'cluster_id': args.cluster_id,
-               'deployment_type': self.get_deployment_type(args)}
+        cluster = {'cluster_id': args.cluster_id,
+                   'deployment_type': self.get_deployment_type(args),
+                   'cluster_env': args.env,
+                  }
         playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        self.action(args, inventory, env, playbook)
+        self.action(args, inventory, cluster, playbook)
 
     def config(self, args):
         """
         Configure or reconfigure OpenShift across clustered VMs
         :param args: command line arguments provided by user
         """
-        env = {'cluster_id': args.cluster_id,
-               'deployment_type': self.get_deployment_type(args)}
+        cluster = {'cluster_id': args.cluster_id,
+                   'deployment_type': self.get_deployment_type(args),
+                   'cluster_env': args.env,
+                  }
         playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        self.action(args, inventory, env, playbook)
+        self.action(args, inventory, cluster, playbook)
 
     def update(self, args):
         """
         Update to latest OpenShift across clustered VMs
         :param args: command line arguments provided by user
         """
-        env = {'cluster_id': args.cluster_id,
-               'deployment_type': self.get_deployment_type(args)}
+        cluster = {'cluster_id': args.cluster_id,
+                   'deployment_type': self.get_deployment_type(args),
+                   'cluster_env': args.env,
+                  }
+
         playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        self.action(args, inventory, env, playbook)
+        self.action(args, inventory, cluster, playbook)
 
     def service(self, args):
         """
         Make the same service call across all nodes in the cluster
         :param args: command line arguments provided by user
         """
-        env = {'cluster_id': args.cluster_id,
-               'deployment_type': self.get_deployment_type(args),
-               'new_cluster_state': args.state}
+        cluster = {'cluster_id': args.cluster_id,
+                   'deployment_type': self.get_deployment_type(args),
+                   'new_cluster_state': args.state,
+                   'cluster_env': args.env,
+                  }
 
         playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        self.action(args, inventory, env, playbook)
+        self.action(args, inventory, cluster, playbook)
 
     def setup_provider(self, provider):
         """
@@ -152,10 +166,9 @@ class Cluster(object):
         """
         config = ConfigParser.ConfigParser()
         if 'gce' == provider:
-            gce_ini_default_path = os.path.join(
-                'inventory/gce/hosts/gce.ini')
+            gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini')
             gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
-            if os.path.exists(gce_ini_path): 
+            if os.path.exists(gce_ini_path):
                 config.readfp(open(gce_ini_path))
 
                 for key in config.options('gce'):
@@ -190,12 +203,12 @@ class Cluster(object):
 
         return inventory
 
-    def action(self, args, inventory, env, playbook):
+    def action(self, args, inventory, cluster, playbook):
         """
         Build ansible-playbook command line and execute
         :param args: command line arguments provided by user
         :param inventory: derived provider library
-        :param env: environment variables for kubernetes
+        :param cluster: cluster variables for kubernetes
         :param playbook: ansible playbook to execute
         """
 
@@ -206,14 +219,14 @@ class Cluster(object):
         if args.option:
             for opt in args.option:
                 k, v = opt.split('=', 1)
-                env['cli_' + k] = v
+                cluster['cli_' + k] = v
 
-        ansible_env = '-e \'{0}\''.format(
-            ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+        ansible_extra_vars = '-e \'{0}\''.format(
+            ' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()])
         )
 
         command = 'ansible-playbook {0} {1} {2} {3}'.format(
-            verbose, inventory, ansible_env, playbook
+            verbose, inventory, ansible_extra_vars, playbook
         )
 
         if args.profile:
@@ -242,7 +255,7 @@ class ActionFailed(Exception):
 
 if __name__ == '__main__':
     """
-    User command to invoke ansible playbooks in a "known" environment
+    User command to invoke ansible playbooks in a "known" configuration
 
     Reads ~/.openshift-ansible for default configuration items
       [DEFAULT]
@@ -251,7 +264,7 @@ if __name__ == '__main__':
       providers = gce,aws,libvirt,openstack
     """
 
-    environment = ConfigParser.SafeConfigParser({
+    cluster_config = ConfigParser.SafeConfigParser({
         'cluster_ids': 'marketing,sales',
         'validate_cluster_ids': 'False',
         'providers': 'gce,aws,libvirt,openstack',
@@ -259,36 +272,36 @@ if __name__ == '__main__':
 
     path = os.path.expanduser("~/.openshift-ansible")
     if os.path.isfile(path):
-        environment.read(path)
+        cluster_config.read(path)
 
     cluster = Cluster()
 
     parser = argparse.ArgumentParser(
-        description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
+        description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks',
     )
     parser.add_argument('-v', '--verbose', action='count',
                         help='Multiple -v options increase the verbosity')
     parser.add_argument('--version', action='version', version='%(prog)s 0.3')
 
     meta_parser = argparse.ArgumentParser(add_help=False)
-    providers = environment.get('DEFAULT', 'providers').split(',')
+    providers = cluster_config.get('DEFAULT', 'providers').split(',')
     meta_parser.add_argument('provider', choices=providers, help='provider')
 
-    if environment.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
-        meta_parser.add_argument('cluster_id', choices=environment.get('DEFAULT', 'cluster_ids').split(','),
+    if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
+        meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','),
                                  help='prefix for cluster VM names')
     else:
         meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
 
     meta_parser.add_argument('-t', '--deployment-type',
-                             choices=['origin', 'online', 'enterprise'],
+                             choices=['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise'],
                              help='Deployment type. (default: origin)')
-    meta_parser.add_argument('-T', '--product-type',
-                             choices=['openshift', 'atomic-enterprise'],
-                             help='Product type. (default: openshift)')
     meta_parser.add_argument('-o', '--option', action='append',
                              help='options')
 
+    meta_parser.add_argument('--env', default='dev', type=str,
+                               help='environment for the cluster.  Defaults to \'dev\'.')
+
     meta_parser.add_argument('-p', '--profile', action='store_true',
                              help='Enable playbook profiling')
 
@@ -350,14 +363,14 @@ if __name__ == '__main__':
     args = parser.parse_args()
 
     if 'terminate' == args.action and not args.force:
-        answer = raw_input("This will destroy the ENTIRE {0} environment. Are you sure? [y/N] ".format(args.cluster_id))
+        answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id))
         if answer not in ['y', 'Y']:
             sys.stderr.write('\nACTION [terminate] aborted by user!\n')
             exit(1)
 
     if 'update' == args.action and not args.force:
         answer = raw_input(
-            "This is destructive and could corrupt {0} environment. Continue? [y/N] ".format(args.cluster_id))
+            "This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id))
         if answer not in ['y', 'Y']:
             sys.stderr.write('\nACTION [update] aborted by user!\n')
             exit(1)

+ 22 - 25
bin/ohi

@@ -48,28 +48,18 @@ class Ohi(object):
             self.aws.print_host_types()
             return 0
 
-        hosts = None
-        if self.args.host_type is not None and \
-           self.args.env is not None:
-            # Both env and host-type specified
-            hosts = self.aws.get_host_list(host_type=self.args.host_type,
-                                           envs=self.args.env,
-                                           version=self.args.openshift_version,
-                                           cached=self.args.cache_only)
-
-        if self.args.host_type is None and \
-           self.args.env is not None:
-            # Only env specified
-            hosts = self.aws.get_host_list(envs=self.args.env,
-                                           version=self.args.openshift_version,
-                                           cached=self.args.cache_only)
-
-        if self.args.host_type is not None and \
-           self.args.env is None:
-            # Only host-type specified
-            hosts = self.aws.get_host_list(host_type=self.args.host_type,
-                                           version=self.args.openshift_version,
-                                           cached=self.args.cache_only)
+        if self.args.v3:
+            version = '3'
+        elif self.args.all_versions:
+            version = 'all'
+        else:
+            version = '2'
+
+        hosts = self.aws.get_host_list(clusters=self.args.cluster,
+                                       host_type=self.args.host_type,
+                                       envs=self.args.env,
+                                       version=version,
+                                       cached=self.args.cache_only)
 
         if hosts is None:
             # We weren't able to determine what they wanted to do
@@ -104,19 +94,26 @@ class Ohi(object):
         parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
 
         parser.add_argument('--list-host-types', default=False, action='store_true', help='List all of the host types')
+        parser.add_argument('--list', default=False, action='store_true', help='List all hosts')
 
-        parser.add_argument('-e', '--env', action="store", help="Which environment to use")
+        parser.add_argument('-c', '--cluster', action="append", help="Which clusterid to use")
+        parser.add_argument('-e', '--env', action="append", help="Which environment to use")
 
         parser.add_argument('-t', '--host-type', action="store", help="Which host type to use")
 
         parser.add_argument('-l', '--user', action='store', default=None, help='username')
 
-        parser.add_argument('-c', '--cache-only', action='store_true', default=False,
+        parser.add_argument('--cache-only', action='store_true', default=False,
                             help='Retrieve the host inventory by cache only. Default is false.')
 
-        parser.add_argument('-o', '--openshift-version', action='store', default='2',
+        parser.add_argument('--v2', action='store_true', default=True,
                             help='Specify the openshift version. Default is 2')
 
+        parser.add_argument('--v3', action='store_true', default=False,
+                            help='Specify the openshift version.')
+
+        parser.add_argument('--all-versions', action='store_true', default=False,
+                            help='Specify the openshift version. Return all versions')
 
         self.args = parser.parse_args()
 

+ 62 - 39
bin/openshift_ansible/awsutil.py

@@ -59,9 +59,23 @@ class AwsUtil(object):
             minv.run()
         return minv.result
 
+    def get_clusters(self):
+        """Searches for cluster tags in the inventory and returns all of the clusters found."""
+        pattern = re.compile(r'^oo_clusterid_(.*)')
+
+        clusters = []
+        inv = self.get_inventory()
+        for key in inv.keys():
+            matched = pattern.match(key)
+            if matched:
+                clusters.append(matched.group(1))
+
+        clusters.sort()
+        return clusters
+
     def get_environments(self):
         """Searches for env tags in the inventory and returns all of the envs found."""
-        pattern = re.compile(r'^tag_env_(.*)')
+        pattern = re.compile(r'^oo_environment_(.*)')
 
         envs = []
         inv = self.get_inventory()
@@ -75,7 +89,7 @@ class AwsUtil(object):
 
     def get_host_types(self):
         """Searches for host-type tags in the inventory and returns all host-types found."""
-        pattern = re.compile(r'^tag_host-type_(.*)')
+        pattern = re.compile(r'^oo_host-type_(.*)')
 
         host_types = []
         inv = self.get_inventory()
@@ -109,13 +123,13 @@ class AwsUtil(object):
         inst_by_env = {}
         for _, host in inv['_meta']['hostvars'].items():
             # If you don't have an environment tag, we're going to ignore you
-            if 'ec2_tag_env' not in host:
+            if 'ec2_tag_environment' not in host:
                 continue
 
-            if host['ec2_tag_env'] not in inst_by_env:
-                inst_by_env[host['ec2_tag_env']] = {}
+            if host['ec2_tag_environment'] not in inst_by_env:
+                inst_by_env[host['ec2_tag_environment']] = {}
             host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
-            inst_by_env[host['ec2_tag_env']][host_id] = host
+            inst_by_env[host['ec2_tag_environment']][host_id] = host
 
         return inst_by_env
 
@@ -154,10 +168,22 @@ class AwsUtil(object):
         return host_type
 
     @staticmethod
+    def gen_version_tag(ver):
+        """Generate the version tag
+        """
+        return "oo_version_%s" % ver
+
+    @staticmethod
+    def gen_clusterid_tag(clu):
+        """Generate the clusterid tag
+        """
+        return "tag_clusterid_%s" % clu
+
+    @staticmethod
     def gen_env_tag(env):
         """Generate the environment tag
         """
-        return "tag_env_%s" % env
+        return "tag_environment_%s" % env
 
     def gen_host_type_tag(self, host_type):
         """Generate the host type tag
@@ -165,47 +191,44 @@ class AwsUtil(object):
         host_type = self.resolve_host_type(host_type)
         return "tag_host-type_%s" % host_type
 
-    def gen_env_host_type_tag(self, host_type, env):
-        """Generate the environment host type tag
-        """
-        host_type = self.resolve_host_type(host_type)
-        return "tag_env-host-type_%s-%s" % (env, host_type)
-
-    def get_host_list(self, host_type=None, envs=None, version=None, cached=False):
+    # This function uses all of these params to perform a filters on our host inventory.
+    # pylint: disable=too-many-arguments
+    def get_host_list(self, clusters=None, host_type=None, envs=None, version=None, cached=False):
         """Get the list of hosts from the inventory using host-type and environment
         """
         retval = set([])
         envs = envs or []
+
         inv = self.get_inventory(cached=cached)
 
-        # We prefer to deal with a list of environments
-        if issubclass(type(envs), basestring):
-            if envs == 'all':
-                envs = self.get_environments()
+        retval.update(inv.get('all_hosts', []))
+
+        if clusters:
+            cluster_hosts = set([])
+            if len(clusters) > 1:
+                for cluster in clusters:
+                    clu_tag = AwsUtil.gen_clusterid_tag(cluster)
+                    cluster_hosts.update(inv.get(clu_tag, []))
             else:
-                envs = [envs]
+                cluster_hosts.update(inv.get(AwsUtil.gen_clusterid_tag(clusters[0]), []))
+
+            retval.intersection_update(cluster_hosts)
+
+        if envs:
+            env_hosts = set([])
+            if len(envs) > 1:
+                for env in envs:
+                    env_tag = AwsUtil.gen_env_tag(env)
+                    env_hosts.update(inv.get(env_tag, []))
+            else:
+                env_hosts.update(inv.get(AwsUtil.gen_env_tag(envs[0]), []))
+
+            retval.intersection_update(env_hosts)
 
-        if host_type and envs:
-            # Both host type and environment were specified
-            for env in envs:
-                retval.update(inv.get('tag_environment_%s' % env, []))
+        if host_type:
             retval.intersection_update(inv.get(self.gen_host_type_tag(host_type), []))
 
-        elif envs and not host_type:
-            # Just environment was specified
-            for env in envs:
-                env_tag = AwsUtil.gen_env_tag(env)
-                if env_tag in inv.keys():
-                    retval.update(inv.get(env_tag, []))
-
-        elif host_type and not envs:
-            # Just host-type was specified
-            host_type_tag = self.gen_host_type_tag(host_type)
-            if host_type_tag in inv.keys():
-                retval.update(inv.get(host_type_tag, []))
-
-        # If version is specified then return only hosts in that version
-        if version:
-            retval.intersection_update(inv.get('oo_version_%s' % version, []))
+        if version != 'all':
+            retval.intersection_update(inv.get(AwsUtil.gen_version_tag(version), []))
 
         return retval

+ 34 - 15
bin/opssh

@@ -13,6 +13,8 @@ Options:
   -p PAR, --par=PAR     max number of parallel threads (OPTIONAL)
   --outdir=OUTDIR       output directory for stdout files (OPTIONAL)
   --errdir=ERRDIR       output directory for stderr files (OPTIONAL)
+  -c CLUSTER, --cluster CLUSTER
+                        which cluster to use
   -e ENV, --env ENV     which environment to use
   -t HOST_TYPE, --host-type HOST_TYPE
                         which host type to use
@@ -45,9 +47,9 @@ fi
 
 # See if ohi is installed
 if ! which ohi &>/dev/null ; then
-    echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
+  echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
 
-    exit 10
+  exit 10
 fi
 
 PAR=200
@@ -64,12 +66,23 @@ while [ $# -gt 0 ] ; do
             shift # get past the value of the option
             ;;
 
+        -c)
+            shift # get past the option
+            CLUSTER=$1
+            shift # get past the value of the option
+            ;;
+
         -e)
             shift # get past the option
             ENV=$1
             shift # get past the value of the option
             ;;
 
+        --v3)
+            OPENSHIFT_VERSION="--v3"
+            shift # get past the value of the option
+            ;;
+
         --timeout)
             shift # get past the option
             TIMEOUT=$1
@@ -106,20 +119,26 @@ while [ $# -gt 0 ] ; do
 done
 
 # Get host list from ohi
-if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then
-    HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)"
-    OHI_ECODE=$?
-elif [ -n "$ENV" ] ; then
-    HOSTS="$(ohi -e "$ENV" 2>/dev/null)"
-    OHI_ECODE=$?
-elif [ -n "$HOST_TYPE" ] ; then
-    HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)"
+CMD=""
+if [ -n "$CLUSTER" ] ; then
+  CMD="$CMD -c $CLUSTER"
+fi
+
+if [ -n "$ENV" ] ; then
+  CMD="$CMD -e $ENV"
+fi
+
+if [ -n "$HOST_TYPE" ] ; then
+  CMD="$CMD -t $HOST_TYPE"
+fi
+
+if [ -n "$OPENSHIFT_VERSION" ] ; then
+  CMD="$CMD $OPENSHIFT_VERSION"
+fi
+
+if [ -n "$CMD" ] ; then
+    HOSTS="$(ohi $CMD 2>/dev/null)"
     OHI_ECODE=$?
-else
-    echo
-    echo "Error: either -e or -t must be specified"
-    echo
-    exit 10
 fi
 
 if [ $OHI_ECODE -ne 0 ] ; then

+ 8 - 11
bin/oscp

@@ -138,7 +138,7 @@ class Oscp(object):
 
         # attempt to select the correct environment if specified
         if self.env:
-            results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
+            results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
 
         if results:
             return results
@@ -164,10 +164,8 @@ class Oscp(object):
                     print '{0:<35} {1}'.format(key, server_info[key])
             else:
                 for host_id, server_info in results[:limit]:
-                    name = server_info['ec2_tag_Name']
-                    ec2_id = server_info['ec2_id']
-                    ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+                          '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
 
                 if limit:
                     print
@@ -177,10 +175,9 @@ class Oscp(object):
         else:
             for env, host_ids in self.host_inventory.items():
                 for host_id, server_info in host_ids.items():
-                    name = server_info['ec2_tag_Name']
-                    ec2_id = server_info['ec2_id']
-                    ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+                          '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
+
 
     def scp(self):
         '''scp files to or from a specified host
@@ -209,12 +206,12 @@ class Oscp(object):
             if len(results) > 1:
                 print "Multiple results found for %s." % self.host
                 for result in results:
-                    print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
+                    print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
                 return # early exit, too many results
 
             # Assume we have one and only one.
             hostname, server_info = results[0]
-            dns = server_info['ec2_public_dns_name']
+            dns = server_info['oo_pulic_ip']
 
             host_str = "%s%s%s" % (self.user, dns, self.path)
 

+ 13 - 17
bin/ossh

@@ -55,15 +55,15 @@ class Ossh(object):
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
         parser.add_argument('-e', '--env', action="store",
-                          help="Which environment to search for the host ")
+                            help="Which environment to search for the host ")
         parser.add_argument('-d', '--debug', default=False,
-                          action="store_true", help="debug mode")
+                            action="store_true", help="debug mode")
         parser.add_argument('-v', '--verbose', default=False,
-                          action="store_true", help="Verbose?")
+                            action="store_true", help="Verbose?")
         parser.add_argument('--refresh-cache', default=False,
-                          action="store_true", help="Force a refresh on the host cache.")
+                            action="store_true", help="Force a refresh on the host cache.")
         parser.add_argument('--list', default=False,
-                          action="store_true", help="list out hosts")
+                            action="store_true", help="list out hosts")
         parser.add_argument('-c', '--command', action='store',
                             help='Command to run on remote host')
         parser.add_argument('-l', '--login_name', action='store',
@@ -127,7 +127,7 @@ class Ossh(object):
 
         # attempt to select the correct environment if specified
         if self.env:
-            results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
+            results = filter(lambda result: result[1]['oo_environment'] == self.env, results)
 
         if results:
             return results
@@ -153,10 +153,8 @@ class Ossh(object):
                     print '{0:<35} {1}'.format(key, server_info[key])
             else:
                 for host_id, server_info in results[:limit]:
-                    name = server_info['ec2_tag_Name']
-                    ec2_id = server_info['ec2_id']
-                    ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+                          '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
 
                 if limit:
                     print
@@ -166,10 +164,8 @@ class Ossh(object):
         else:
             for env, host_ids in self.host_inventory.items():
                 for host_id, server_info in host_ids.items():
-                    name = server_info['ec2_tag_Name']
-                    ec2_id = server_info['ec2_id']
-                    ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{oo_name:<35} {oo_clusterid:<10} {oo_environment:<8} ' \
+                          '{oo_id:<15} {oo_public_ip:<18} {oo_private_ip:<18}'.format(**server_info)
 
     def ssh(self):
         '''SSH to a specified host
@@ -195,12 +191,12 @@ class Ossh(object):
             if len(results) > 1:
                 print "Multiple results found for %s." % self.host
                 for result in results:
-                    print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
+                    print "{oo_name:<35} {oo_clusterid:<5} {oo_environment:<5} {oo_id:<10}".format(**result[1])
                 return # early exit, too many results
 
             # Assume we have one and only one.
-            hostname, server_info = results[0]
-            dns = server_info['ec2_public_dns_name']
+            _, server_info = results[0]
+            dns = server_info['oo_public_ip']
 
             ssh_args.append(dns)
 

+ 13 - 6
filter_plugins/oo_filters.py

@@ -8,12 +8,11 @@ Custom filters for use in openshift-ansible
 from ansible import errors
 from operator import itemgetter
 import OpenSSL.crypto
-import os.path
+import os
 import pdb
 import re
 import json
 
-
 class FilterModule(object):
     ''' Custom ansible filters '''
 
@@ -366,9 +365,6 @@ class FilterModule(object):
                            "keyfile": "/etc/origin/master/named_certificates/custom2.key",
                            "names": [ "some-hostname.com" ] }]
         '''
-        if not issubclass(type(certificates), list):
-            raise errors.AnsibleFilterError("|failed expects certificates is a list")
-
         if not issubclass(type(named_certs_dir), unicode):
             raise errors.AnsibleFilterError("|failed expects named_certs_dir is unicode")
 
@@ -468,6 +464,16 @@ class FilterModule(object):
                 pass
         return clusters
 
+    @staticmethod
+    def oo_generate_secret(num_bytes):
+        ''' generate a session secret '''
+
+        if not issubclass(type(num_bytes), int):
+            raise errors.AnsibleFilterError("|failed expects num_bytes is int")
+
+        secret = os.urandom(num_bytes)
+        return secret.encode('base-64').strip()
+
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {
@@ -486,5 +492,6 @@ class FilterModule(object):
             "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
             "oo_parse_named_certificates": self.oo_parse_named_certificates,
             "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
-            "oo_pretty_print_cluster": self.oo_pretty_print_cluster
+            "oo_pretty_print_cluster": self.oo_pretty_print_cluster,
+            "oo_generate_secret": self.oo_generate_secret
         }

+ 0 - 1
filter_plugins/openshift_master.py

@@ -463,7 +463,6 @@ class FilterModule(object):
         IdentityProviderBase.validate_idp_list(idp_list)
         return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
 
-
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {"translate_idps": self.translate_idps}

+ 20 - 0
inventory/byo/hosts.aep.example

@@ -18,6 +18,9 @@ ansible_ssh_user=root
 # user must be configured for passwordless sudo
 #ansible_sudo=true
 
+# Debug level for all Atomic Enterprise components (Defaults to 2)
+debug_level=2
+
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 deployment_type=atomic-enterprise
 
@@ -27,6 +30,14 @@ deployment_type=atomic-enterprise
 # Enable cluster metrics
 #use_cluster_metrics=true
 
+# Configure metricsPublicURL in the master config for cluster metrics
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+
+# Configure loggingPublicURL in the master config for aggregate logging
+# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html
+#openshift_master_logging_public_url=https://kibana.example.com
+
 # Add additional, insecure, and blocked registries to global docker configuration
 # For enterprise deployment types we ensure that registry.access.redhat.com is
 # included if you do not include it
@@ -91,6 +102,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Override the default controller lease ttl
 #osm_controller_lease_ttl=30
 
+# Configure controller arguments
+#osm_controller_args={'resource-quota-sync-period': ['10s']}
+
+# Configure api server arguments
+#osm_api_server_args={'max-requests-inflight': ['400']}
+
 # default subdomain to use for exposed routes
 #osm_default_subdomain=apps.test.example.com
 
@@ -163,6 +180,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Configure dnsIP in the node config
 #openshift_dns_ip=172.30.0.1
 
+# Configure node kubelet arguments
+#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+
 # host group for masters
 [masters]
 aep3-master[1:3]-ansible.test.example.com

+ 20 - 0
inventory/byo/hosts.aep_quickstart

@@ -0,0 +1,20 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=atomic-enterprise
+osm_use_cockpit=true
+
+[masters]
+ose3-master.example.com
+
+[nodes]
+ose3-master.example.com openshift_scheduleable=True
+
+[etcd]
+
+[lb]

+ 37 - 0
inventory/byo/hosts.openstack

@@ -0,0 +1,37 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+lb
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+ansible_ssh_user=cloud-user
+ansible_sudo=true
+
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
+deployment_type=openshift-enterprise
+
+openshift_additional_repos=[{'id': 'ose-3.1', 'name': 'ose-3.1', 'baseurl': 'http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/ose/3.1/os', 'enabled': 1, 'gpgcheck': 0}]
+
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift.common.config_base }}/htpasswd'}]
+
+#openshift_pkg_version=-3.0.0.0
+
+[masters]
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
+
+[etcd]
+jdetiber-etcd.usersys.redhat.com
+
+[lb]
+#ose3-lb-ansible.test.example.com
+
+[nodes]
+jdetiber-master.usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}"
+jdetiber-node[1:2].usersys.redhat.com openshift_public_hostname="{{ inventory_hostname }}" openshift_hostname="{{ ansible_default_ipv4.address }}" openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 26 - 0
inventory/byo/hosts.origin.example

@@ -6,6 +6,7 @@ masters
 nodes
 etcd
 lb
+nfs
 
 # Set variables common for all OSEv3 hosts
 [OSEv3:vars]
@@ -18,6 +19,9 @@ ansible_ssh_user=root
 # user must be configured for passwordless sudo
 #ansible_sudo=true
 
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
 # deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
 deployment_type=origin
 
@@ -27,6 +31,14 @@ deployment_type=origin
 # Enable cluster metrics
 #use_cluster_metrics=true
 
+# Configure metricsPublicURL in the master config for cluster metrics
+# See: https://docs.openshift.org/latest/install_config/cluster_metrics.html
+#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+
+# Configure loggingPublicURL in the master config for aggregate logging
+# See: https://docs.openshift.org/latest/install_config/aggregate_logging.html
+#openshift_master_logging_public_url=https://kibana.example.com
+
 # Add additional, insecure, and blocked registries to global docker configuration
 # For enterprise deployment types we ensure that registry.access.redhat.com is
 # included if you do not include it
@@ -95,6 +107,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Override the default controller lease ttl
 #osm_controller_lease_ttl=30
 
+# Configure controller arguments
+#osm_controller_args={'resource-quota-sync-period': ['10s']}
+
+# Configure api server arguments
+#osm_api_server_args={'max-requests-inflight': ['400']}
+
 # default subdomain to use for exposed routes
 #osm_default_subdomain=apps.test.example.com
 
@@ -167,6 +185,14 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Configure dnsIP in the node config
 #openshift_dns_ip=172.30.0.1
 
+# NFS Options
+#openshift_nfs_exports_dir=/var/export
+#openshift_nfs_registry_volume=regvol
+#openshift_nfs_export_options='*(rw,sync,all_squash)'
+
+# Configure node kubelet arguments
+#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+
 # host group for masters
 [masters]
 ose3-master[1:3]-ansible.test.example.com

+ 20 - 0
inventory/byo/hosts.ose.example

@@ -18,6 +18,9 @@ ansible_ssh_user=root
 # user must be configured for passwordless sudo
 #ansible_sudo=true
 
+# Debug level for all OpenShift components (Defaults to 2)
+debug_level=2
+
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 deployment_type=openshift-enterprise
 
@@ -27,6 +30,14 @@ deployment_type=openshift-enterprise
 # Enable cluster metrics
 #use_cluster_metrics=true
 
+# Configure metricsPublicURL in the master config for cluster metrics
+# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
+#openshift_master_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+
+# Configure loggingPublicURL in the master config for aggregate logging
+# See: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html
+#openshift_master_logging_public_url=https://kibana.example.com
+
 # Add additional, insecure, and blocked registries to global docker configuration
 # For enterprise deployment types we ensure that registry.access.redhat.com is
 # included if you do not include it
@@ -91,6 +102,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Override the default controller lease ttl
 #osm_controller_lease_ttl=30
 
+# Configure controller arguments
+#osm_controller_args={'resource-quota-sync-period': ['10s']}
+
+# Configure api server arguments
+#osm_api_server_args={'max-requests-inflight': ['400']}
+
 # default subdomain to use for exposed routes
 #osm_default_subdomain=apps.test.example.com
 
@@ -163,6 +180,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # Configure dnsIP in the node config
 #openshift_dns_ip=172.30.0.1
 
+# Configure node kubelet arguments
+#openshift_node_kubelet_args={'max-pods': ['40'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
+
 # host group for masters
 [masters]
 ose3-master[1:3]-ansible.test.example.com

+ 120 - 1
openshift-ansible.spec

@@ -5,7 +5,7 @@
 }
 
 Name:           openshift-ansible
-Version:        3.0.20
+Version:        3.0.27
 Release:        1%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
@@ -259,6 +259,125 @@ Atomic OpenShift Utilities includes
 
 
 %changelog
+* Fri Jan 08 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.27-1
+- Update to metadata tooling. (kwoodson@redhat.com)
+- Fix VM drive cleanup during terminate on libvirt (lhuard@amadeus.com)
+
+* Fri Jan 08 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.26-1
+- Bug 1296388 - fixing typo (bleanhar@redhat.com)
+
+* Thu Jan 07 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.25-1
+- Bug 1296388 - The playbook still configure ManageIQ when
+  openshift_use_manageiq is false (bleanhar@redhat.com)
+- Add a banner to CLI wrapper instructing users that it's only for
+  bootstrapping (sdodson@redhat.com)
+- Rename env into clusterid and add environment in the OpenStack VMs tags
+  (lhuard@amadeus.com)
+- Fix terminate.yml on OpenStack (lhuard@amadeus.com)
+- Install gluster and ceph packages when containerized but not atomic
+  (sdodson@redhat.com)
+- Update openshift_facts config_base for Online deployments (whearn@redhat.com)
+- Fix multi-word arguments & cli wrapper stdin plumbing (sdodson@redhat.com)
+- Improve 3.1/1.1 upgrade check (jdetiber@redhat.com)
+
+* Thu Jan 07 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.24-1
+- Setting relative paths in the upgrade playbooks wasn't working
+  (bleanhar@redhat.com)
+
+* Wed Jan 06 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.23-1
+- Move extra secret validations into openshift_facts. (abutcher@redhat.com)
+- Remove not is_containerized restriction on storage plugin includes.
+  (abutcher@redhat.com)
+- We can't enable manageiq for installations less than OSE 3.1 or Origin 1.1
+  (bleanhar@redhat.com)
+- Fix RHN subscription by explicitly attaching to the right pool
+  (lhuard@amadeus.com)
+- openshift_facts validation (abutcher@redhat.com)
+- Secrets validation. (abutcher@redhat.com)
+- Clean up idempotency issues with session secrets. (abutcher@redhat.com)
+
+* Wed Jan 06 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.22-1
+- playbook for restarting SDN (jdiaz@redhat.com)
+- Stop haproxy and remove package during uninstall. (abutcher@redhat.com)
+- Group name as per hosts.origin.example (donovan.muller@gmail.com)
+- I believe the ami id changed since the initial documentation was created for
+  AWS deployment (rcook@redhat.com)
+
+* Tue Jan 05 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.21-1
+- Fix osm_controller_args and osm_api_server_args settings.
+  (abutcher@redhat.com)
+- Fix error in byo cluster_hosts.yml (jdetiber@redhat.com)
+- Cleanup and fixes for cluster_id change (jdetiber@redhat.com)
+- Fix typo in etcd service status fact. (abutcher@redhat.com)
+- Removing environment and env tags. (kwoodson@redhat.com)
+- Add node kubelet args to inventory examples. (abutcher@redhat.com)
+- Adding ManageIQ service account by default (efreiber@redhat.com)
+- Fixes typo assigning docker_service_status_changed which leads to
+  misinterpretation in handler. (eric.mountain@amadeus.com)
+- Fix restart handlers. (abutcher@redhat.com)
+- Remove lb from docker hosts. (abutcher@redhat.com)
+- Install iptables, iptables-services when not is_aotmic (sdodson@redhat.com)
+- Install all xpaas streams when enabled (sdodson@redhat.com)
+- add the necessary URLs for logging and metrics
+  (git001@users.noreply.github.com)
+- Link to Tito Home Page is Broken (lloy0076@adam.com.au)
+- Conditionalize for 3.1.1/1.1.1 (abutcher@redhat.com)
+- Use notify for workaround controllers unit. (abutcher@redhat.com)
+- change dns triggers to average (jdiaz@redhat.com)
+- add item/trigger for dns tests on all currently running containers
+  (jdiaz@redhat.com)
+- Add jboss-fuse/application-templates/fis-image-streams.json
+  (sdodson@redhat.com)
+- atomic-openshift-installer: Fix broken nosetest (smunilla@redhat.com)
+- Update from jboss-openshift/application-templates ose-v1.2.0-1
+  (sdodson@redhat.com)
+- fix logic to tolerate occasional failures (jdiaz@redhat.com)
+- Clean up versions.sh (sdodson@redhat.com)
+- change ovs mount to /var/run/openvswitch will not require a container restart
+  if openvswitch service is restarted (jdiaz@redhat.com)
+- split zagg.server.processor.errors into separate heartbeat and metrics error
+  items (needed since the scripts are split now). (twiest@redhat.com)
+- quick installer tests (smunilla@redhat.com)
+- atomic-openshift-installer: Remove HA hint for 3.0 install
+  (smunilla@redhat.com)
+- Add some guards to wait for images to be pulled before moving on
+  (sdodson@redhat.com)
+- Install httpd-tools when not is_atomic (sdodson@redhat.com)
+- Properly set use_flannel fact (sbaubeau@redhat.com)
+- Fix containerized variable (sdodson@redhat.com)
+- Skip yum/dnf ops when is_containerized (sdodson@redhat.com)
+- Move all docker config into openshift_docker to minimize docker restarts
+  (sdodson@redhat.com)
+- Create nfs host group with registry volume attachment. (abutcher@redhat.com)
+- Add openshift_cli role (sdodson@redhat.com)
+- pull docker images only if not already present (jdetiber@redhat.com)
+- fixes (jdetiber@redhat.com)
+- Containerization work by @sdodson (sdodson@redhat.com)
+- Initial containerization work from @ibotty (tob@butter.sh)
+- Add zabbix values to track docker container DNS results (jdiaz@redhat.com)
+- Fix registry modification for new deployment types. (dgoodwin@redhat.com)
+- Updates to ohi to pull cache if specified.  Also require version
+  (kwoodson@redhat.com)
+- Zabbix: added trigger to monitor app create over the last hour
+  (mwoodson@redhat.com)
+- added 'Template Zagg Server' (twiest@redhat.com)
+- Fixes typo when setting facts to record whether master/node has been
+  restarted already, to decide whether notify handler should do so or not.
+  Currently, this causes random SDN network setup failures as openshift-node
+  gets restarted while the setup script is running, and the subsequent start
+  fails to configure the SDN because it thinks it's already done.
+  (eric.mountain@amadeus.com)
+- Change controllers service type to simple. (abutcher@redhat.com)
+- Updating env-host-type to host patterns (kwoodson@redhat.com)
+- Add note that Fedora 23+ is acceptable deployment target for origin
+  (admiller@redhat.com)
+- Enforce connection: local and become: no on all localhost plays
+  (jdetiber@redhat.com)
+- Use join for the uncompress command. (jsteffan@fedoraproject.org)
+- Update for latest CentOS-7-x86_64-GenericCloud.  - Use xz compressed image  -
+  Update sha256 for new image  - Update docs to reflect new settings
+  (jsteffan@fedoraproject.org)
+
 * Thu Dec 10 2015 Thomas Wiest <twiest@redhat.com> 3.0.20-1
 - Revert "Automatic commit of package [openshift-ansible] release [3.0.20-1]."
   (twiest@redhat.com)

+ 4 - 5
playbooks/adhoc/create_pv/create_pv.yaml

@@ -3,9 +3,8 @@
 # ansible-playbook -e "cli_volume_size=1" \
 #                  -e "cli_device_name=/dev/xvdf" \
 #                  -e "cli_hosttype=master" \
-#                  -e "cli_env=ops" \
+#                  -e "cli_clusterid=ops" \
 #                  create_pv.yaml
-# FIXME: we need to change "env" to "clusterid" as that's what it really is now.
 #
 - name: Create a volume and attach it to master
   hosts: localhost
@@ -16,7 +15,7 @@
     cli_volume_type: gp2
     cli_volume_iops: ''
     oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
-                 intersect(groups['tag_env_' ~ cli_env]) |
+                 intersect(groups['oo_clusterid_' ~ cli_clusterid]) |
                  first }}"
   pre_tasks:
   - fail:
@@ -26,7 +25,7 @@
     - cli_volume_size
     - cli_device_name
     - cli_hosttype
-    - cli_env
+    - cli_clusterid
 
   - name: set oo_name fact
     set_fact:
@@ -57,7 +56,7 @@
     args:
       tags:
         Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
-        env: "{{cli_env}}"
+        clusterid: "{{cli_clusterid}}"
     register: voltags
 
   - debug: var=voltags

+ 1 - 1
playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml

@@ -113,7 +113,7 @@
     args:
       tags:
         Name: "{{ ec2_tag_Name }}"
-        env: "{{ ec2_tag_env}}"
+        clusterid: "{{ ec2_tag_clusterid }}"
     register: voltags
 
   - name: Wait for volume to attach

+ 1 - 1
playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml

@@ -151,7 +151,7 @@
     args:
       tags:
         Name: "{{ ec2_tag_Name }}"
-        env: "{{ ec2_tag_env }}"
+        clusterid: "{{ ec2_tag_clusterid }}"
     register: voltags
 
   - name: check for attached drive

+ 1 - 1
playbooks/adhoc/s3_registry/s3_registry.yml

@@ -6,7 +6,7 @@
 # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
 # The 'clusterid' is the short name of your cluster.
 
-- hosts: tag_env_{{ clusterid }}:&tag_host-type_openshift-master
+- hosts: tag_clusterid_{{ clusterid }}:&tag_host-type_openshift-master
   remote_user: root
   gather_facts: False
 

+ 53 - 0
playbooks/adhoc/sdn_restart/oo-sdn-restart.yml

@@ -0,0 +1,53 @@
+#!/usr/bin/ansible-playbook
+---
+#example run:
+# ansible-playbook -e "host=ops-node-compute-abcde" oo-sdn-restart.yml
+#
+
+- name: Check vars
+  hosts: localhost
+  gather_facts: false
+ 
+  pre_tasks:
+  - fail:
+      msg: "Playbook requires host to be set"
+    when: host is not defined or host == ''
+
+- name: Restart openshift/docker (and monitoring containers)
+  hosts: oo_version_3:&oo_name_{{ host }}
+  gather_facts: false
+  user: root
+
+  tasks:
+  - name: stop openshift/docker
+    service:
+      name: "{{ item }}"
+      state: stopped
+    with_items:
+    - atomic-openshift-node
+    - docker
+
+  - name: restart openvswitch
+    service:
+      name: openvswitch
+      state: restarted
+
+  - name: wait 5 sec
+    pause:
+      seconds: 5
+
+  - name: start openshift/docker
+    service:
+      name: "{{ item }}"
+      state: started
+    with_items:
+    - atomic-openshift-node
+    - docker
+
+  - name: start monitoring containers
+    service:
+      name: "{{ item }}"
+      state: restarted
+    with_items:
+    - oso-f22-host-monitoring
+    - oso-rhel7-zagg-client

+ 21 - 0
playbooks/adhoc/setupnfs.yml

@@ -0,0 +1,21 @@
+---
+### This playbook is old and we are currently not using NFS.
+- hosts: tag_Name_nfs-v3-stg
+  sudo: no
+  remote_user: root
+  gather_facts: no
+  roles:
+  - role: openshift_storage_nfs_lvm
+    mount_dir: /exports/stg-black
+    volume_prefix: "kwoodsontest"
+    volume_size: 5
+    volume_num_start: 222
+    number_of_volumes: 3
+  tasks:
+  - fetch:
+    dest: json/
+    src: /root/"{{ item }}"
+  with_items:
+  - persistent-volume.kwoodsontest5g0222.json
+  - persistent-volume.kwoodsontest5g0223.json
+  - persistent-volume.kwoodsontest5g0224.json

+ 31 - 3
playbooks/adhoc/uninstall.yml

@@ -19,15 +19,19 @@
       failed_when: false
       register: ostree_output
 
+      # Since we're not calling openshift_facts we'll do this for now
     - set_fact:
         is_atomic: "{{ ostree_output.rc == 0 }}"
+    - set_fact:
+        is_containerized: "{{ is_atomic or containerized | default(false) | bool }}"
 
     - name: Remove br0 interface
       shell: ovs-vsctl del-br br0
       changed_when: False
       failed_when: False
 
-    - service: name={{ item }} state=stopped
+    - name: Stop services
+      service: name={{ item }} state=stopped
       with_items:
         - atomic-enterprise-master
         - atomic-enterprise-node
@@ -36,6 +40,7 @@
         - atomic-openshift-master-controllers
         - atomic-openshift-node
         - etcd
+        - haproxy
         - openshift-master
         - openshift-master-api
         - openshift-master-controllers
@@ -46,8 +51,10 @@
         - origin-master-controllers
         - origin-node
         - pcsd
+      failed_when: false
 
-    - action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+    - name: Remove packages
+      action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
       when: not is_atomic | bool
       with_items:
         - atomic-enterprise
@@ -61,6 +68,7 @@
         - atomic-openshift-sdn-ovs
         - corosync
         - etcd
+        - haproxy
         - openshift
         - openshift-master
         - openshift-node
@@ -132,14 +140,26 @@
       with_items:
         - registry\.access\..*redhat\.com/openshift3
         - registry\.access\..*redhat\.com/aep3
+        - registry\.access\..*redhat\.com/rhel7/etcd
         - docker.io/openshift
 
     - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}"
       changed_when: False
       failed_when: False
       with_items: "{{ images_to_delete.results }}"
+    
+    - name: Remove sdn drop files
+      file: 
+        path: /run/openshift-sdn
+        state: absent
+        
+    - name: restart docker
+      service:
+        name: docker
+        state: restarted
 
-    - file: path={{ item }} state=absent
+    - name: Remove remaining files
+      file: path={{ item }} state=absent
       with_items:
         - "~{{ ansible_ssh_user }}/.kube"
         - /etc/ansible/facts.d/openshift.fact
@@ -149,7 +169,15 @@
         - /etc/openshift
         - /etc/openshift-sdn
         - /etc/origin
+        - /etc/systemd/system/atomic-openshift-master.service
+        - /etc/systemd/system/atomic-openshift-master-api.service
+        - /etc/systemd/system/atomic-openshift-master-controllers.service
+        - /etc/systemd/system/atomic-openshift-node.service
+        - /etc/systemd/system/etcd_container.service
+        - /etc/systemd/system/openvswitch.service
         - /etc/sysconfig/atomic-enterprise-master
+        - /etc/sysconfig/atomic-enterprise-master-api
+        - /etc/sysconfig/atomic-enterprise-master-controllers
         - /etc/sysconfig/atomic-enterprise-node
         - /etc/sysconfig/atomic-openshift-master
         - /etc/sysconfig/atomic-openshift-master-api

+ 22 - 0
playbooks/aws/openshift-cluster/cluster_hosts.yml

@@ -0,0 +1,22 @@
+---
+g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))
+                     | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))
+                     | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_master_hosts: "{{ (groups['tag_host-type_master']|default([]))
+                     | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))
+                     | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_nfs_hosts:    "{{ (groups['tag_host-type_nfs']|default([]))
+                   | intersect((groups['tag_environment_' ~ cluster_id]|default([]))) }}"
+
+g_all_hosts:    "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+                    | union(g_lb_hosts) | default([]) }}"

+ 6 - 18
playbooks/aws/openshift-cluster/config.yml

@@ -1,26 +1,14 @@
 ---
-- hosts: localhost
-  gather_facts: no
-  connection: local
-  become: no
-  vars_files:
-  - vars.yml
-  tasks:
-  - set_fact:
-      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
-      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
 - include: ../../common/openshift-cluster/config.yml
+  vars_files:
+  - ../../aws/openshift-cluster/vars.yml
+  - ../../aws/openshift-cluster/cluster_hosts.yml
   vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
-    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_ssh_user:     "{{ deployment_vars[deployment_type].ssh_user }}"
+    g_sudo:         "{{ deployment_vars[deployment_type].sudo }}"
     g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level }}"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"

+ 1 - 1
playbooks/aws/openshift-cluster/list.yml

@@ -7,7 +7,7 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env_{{ cluster_id }}
+  - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
     when: cluster_id != ''
   - set_fact: scratch_group=all
     when: cluster_id == ''

+ 6 - 9
playbooks/aws/openshift-cluster/scaleup.yml

@@ -7,9 +7,6 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact:
-      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
-      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
   - name: Evaluate oo_hosts_to_update
     add_host:
       name: "{{ item }}"
@@ -21,16 +18,16 @@
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 
 - include: ../../common/openshift-cluster/scaleup.yml
+  vars_files:
+  - ../../aws/openshift-cluster/vars.yml
+  - ../../aws/openshift-cluster/cluster_hosts.yml
   vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
     g_new_node_hosts: "{{ groups.nodes_to_add }}"
-    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
-    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level }}"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"

+ 3 - 2
playbooks/aws/openshift-cluster/service.yml

@@ -6,6 +6,7 @@
   gather_facts: no
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - fail: msg="cluster_id is required to be injected in this playbook"
     when: cluster_id is not defined
@@ -16,7 +17,7 @@
       groups: g_service_masters
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_master_hosts | default([]) }}"
+    with_items: "{{ master_hosts | default([]) }}"
 
   - name: Evaluate g_service_nodes
     add_host:
@@ -24,7 +25,7 @@
       groups: g_service_nodes
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_node_hosts | default([]) }}"
+    with_items: "{{ node_hosts | default([]) }}"
 
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-master/service.yml

+ 6 - 3
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -2,7 +2,8 @@
 - set_fact:
     created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
     docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
-    env: "{{ cluster }}"
+    cluster: "{{ cluster_id }}"
+    env: "{{ cluster_env }}"
     host_type: "{{ type }}"
     sub_host_type: "{{ g_sub_host_type }}"
 
@@ -123,7 +124,8 @@
     wait: yes
     instance_tags:
       created-by: "{{ created_by }}"
-      env: "{{ env }}"
+      clusterid: "{{ cluster }}"
+      environment: "{{ cluster_env }}"
       host-type: "{{ host_type }}"
       sub-host-type: "{{ sub_host_type }}"
     volumes: "{{ volumes }}"
@@ -139,7 +141,8 @@
       Name: "{{ item.0 }}"
 
 - set_fact:
-    instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}"
+    instance_groups: "tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, tag_environment_{{ cluster_env }},
+                    tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}"
 
 - set_fact:
     node_label:

+ 17 - 17
playbooks/aws/openshift-cluster/terminate.yml

@@ -7,13 +7,12 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env_{{ cluster_id }}
   - add_host:
       name: "{{ item }}"
       groups: oo_hosts_to_terminate
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+    with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost'])
 
 - name: Unsubscribe VMs
   hosts: oo_hosts_to_terminate
@@ -29,34 +28,35 @@
   connection: local
   become: no
   gather_facts: no
-  vars:
-    host_vars: "{{ hostvars
-                   | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
   tasks:
     - name: Remove tags from instances
-      ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
-      args:
+      ec2_tag:
+        resource: "{{ hostvars[item]['ec2_id'] }}"
+        region: "{{ hostvars[item]['ec2_region'] }}"
+        state: absent
         tags:
-          env: "{{ item['ec2_tag_env'] }}"
-          host-type: "{{ item['ec2_tag_host-type'] }}"
-          sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}"
-      with_items: host_vars
+          environment:   "{{ hostvars[item]['ec2_tag_environment'] }}"
+          clusterid:     "{{ hostvars[item]['ec2_tag_clusterid'] }}"
+          host-type:     "{{ hostvars[item]['ec2_tag_host-type'] }}"
+          sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
+      with_items: groups.oo_hosts_to_terminate
       when: "'oo_hosts_to_terminate' in groups"
 
     - name: Terminate instances
       ec2:
         state: absent
-        instance_ids: ["{{ item.ec2_id }}"]
-        region: "{{ item.ec2_region }}"
+        instance_ids: ["{{ hostvars[item].ec2_id }}"]
+        region: "{{ hostvars[item].ec2_region }}"
       ignore_errors: yes
       register: ec2_term
-      with_items: host_vars
+      with_items: groups.oo_hosts_to_terminate
       when: "'oo_hosts_to_terminate' in groups"
 
     # Fail if any of the instances failed to terminate with an error other
     # than 403 Forbidden
-    - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}
-      when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+    - fail:
+        msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
+      when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
       with_items: ec2_term.results
 
     - name: Stop instance if termination failed
@@ -65,7 +65,7 @@
         instance_ids: ["{{ item.item.ec2_id }}"]
         region: "{{ item.item.ec2_region }}"
       register: ec2_stop
-      when: "'oo_hosts_to_terminate' in groups and item.failed"
+      when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
       with_items: ec2_term.results
 
     - name: Rename stopped instances

+ 2 - 6
playbooks/aws/openshift-cluster/update.yml

@@ -4,13 +4,9 @@
   connection: local
   become: no
   gather_facts: no
-  vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
-    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - name: Update - Evaluate oo_hosts_to_update
     add_host:
@@ -18,7 +14,7 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
+    with_items: "{{ g_all_hosts | default([]) }}"
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 6 - 22
playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -2,32 +2,16 @@
 # This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.
 # Usage:
 #  ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id>
-- hosts: localhost
-  gather_facts: no
-  vars_files:
-  - ../../vars.yml
-  - "../../vars.{{ deployment_type }}.{{ cluster_id }}.yml"
-
-  tasks:
-  - set_fact:
-      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
-      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
-  - set_fact:
-      tmp_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
-    when: deployment_type != 'online'
-
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+  vars_files:
+  - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}"
+  - "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}"
   vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
-    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level }}"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"

+ 19 - 13
playbooks/aws/openshift-cluster/vars.yml

@@ -1,8 +1,23 @@
 ---
+debug_level: 2
+
+deployment_rhel7_ent_base:
+  # rhel-7.1, requires cloud access subscription
+  image: ami-10663b78
+  image_name:
+  region: us-east-1
+  ssh_user: ec2-user
+  sudo: yes
+  keypair: libra
+  type: m4.large
+  security_groups: [ 'public' ]
+  vpc_subnet:
+  assign_public_ip:
+
 deployment_vars:
   origin:
     # centos-7, requires marketplace
-    image: ami-96a818fe
+    image: ami-61bbf104
     image_name:
     region: us-east-1
     ssh_user: centos
@@ -24,15 +39,6 @@ deployment_vars:
     security_groups: [ 'public' ]
     vpc_subnet:
     assign_public_ip:
-  enterprise:
-    # rhel-7.1, requires cloud access subscription
-    image: ami-10663b78
-    image_name:
-    region: us-east-1
-    ssh_user: ec2-user
-    sudo: yes
-    keypair: libra
-    type: m4.large
-    security_groups: [ 'public' ]
-    vpc_subnet:
-    assign_public_ip:
+  enterprise: "{{ deployment_rhel7_ent_base }}"
+  openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+  atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 13 - 0
playbooks/byo/openshift-cluster/cluster_hosts.yml

@@ -0,0 +1,13 @@
+---
+g_etcd_hosts:   "{{ groups.etcd | default([]) }}"
+
+g_lb_hosts:     "{{ groups.lb | default([]) }}"
+
+g_master_hosts: "{{ groups.masters | default([]) }}"
+
+g_node_hosts:   "{{ groups.nodes | default([]) }}"
+
+g_nfs_hosts:   "{{ groups.nfs | default([]) }}"
+
+g_all_hosts:    "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+                    | union(g_lb_hosts) | default([]) }}"

+ 3 - 5
playbooks/byo/openshift-cluster/config.yml

@@ -1,10 +1,8 @@
 ---
 - include: ../../common/openshift-cluster/config.yml
+  vars_files:
+  - ../../byo/openshift-cluster/cluster_hosts.yml
   vars:
-    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
-    g_master_hosts: "{{ groups.masters | default([]) }}"
-    g_node_hosts: "{{ groups.nodes | default([]) }}"
-    g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level | default(2) }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 3 - 5
playbooks/byo/openshift-cluster/scaleup.yml

@@ -1,10 +1,8 @@
 ---
 - include: ../../common/openshift-cluster/scaleup.yml
+  vars_files:
+  - ../../byo/openshift-cluster/cluster_hosts.yml
   vars:
-    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
-    g_master_hosts: "{{ groups.masters | default([]) }}"
-    g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
-    g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level | default(2) }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 1 - 1
playbooks/byo/openshift-cluster/upgrades/README.md

@@ -1,6 +1,6 @@
 # Upgrade playbooks
 The playbooks provided in this directory can be used for upgrading an existing
-environment. Additional notes for the associated upgrade playbooks are
+cluster. Additional notes for the associated upgrade playbooks are
 provided in their respective directories.
 
 # Upgrades available

+ 3 - 0
playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml

@@ -1,8 +1,11 @@
 ---
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
+  vars_files:
+  - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"
   vars:
     g_etcd_hosts: "{{ groups.etcd | default([]) }}"
     g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_nfs_hosts: "{{ groups.nfs | default([]) }}"
     g_node_hosts: "{{ groups.nodes | default([]) }}"
     g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"

+ 3 - 0
playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -1,8 +1,11 @@
 ---
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+  vars_files:
+  - "{{lookup('file', '../../../../byo/openshift-cluster/cluster_hosts.yml')}}"
   vars:
     g_etcd_hosts: "{{ groups.etcd | default([]) }}"
     g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_nfs_hosts: "{{ groups.nfs | default([]) }}"
     g_node_hosts: "{{ groups.nodes | default([]) }}"
     g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"

+ 4 - 0
playbooks/common/openshift-cluster/config.yml

@@ -1,8 +1,12 @@
 ---
 - include: evaluate_groups.yml
 
+- include: ../openshift-docker/config.yml
+
 - include: ../openshift-etcd/config.yml
 
+- include: ../openshift-nfs/config.yml
+
 - include: ../openshift-master/config.yml
 
 - include: ../openshift-node/config.yml

+ 16 - 0
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -21,6 +21,14 @@
       msg: This playbook requires g_lb_hosts to be set
     when: g_lb_hosts is not defined
 
+  - fail:
+      msg: This playbook requires g_nfs_hosts to be set
+    when: g_nfs_hosts is not defined
+
+  - fail:
+      msg: The nfs group must be limited to one host
+    when: (groups[g_nfs_hosts] | default([])) | length > 1
+
   - name: Evaluate oo_etcd_to_config
     add_host:
       name: "{{ item }}"
@@ -81,3 +89,11 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_lb_hosts | default([]) }}"
+
+  - name: Evaluate oo_nfs_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nfs_to_config
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ g_nfs_hosts | default([]) }}"

+ 2 - 1
playbooks/common/openshift-cluster/update_repos_and_packages.yml

@@ -4,9 +4,10 @@
     openshift_deployment_type: "{{ deployment_type }}"
   roles:
   - role: rhel_subscribe
-    when: deployment_type == "enterprise" and
+    when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and
           ansible_distribution == "RedHat" and
           lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
             default('no', True) | lower in ['no', 'false']
+          and not openshift.common.is_atomic | bool
   - openshift_repos
   - os_update_latest

+ 2 - 3
playbooks/common/openshift-cluster/upgrades/files/versions.sh

@@ -1,9 +1,8 @@
 #!/bin/bash
 
-yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
-
-yum_available=$(yum list available -q "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | tr '\n' ' ')
 
+yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | tr '\n' ' ')
 
 echo "---"
 echo "curr_version: ${yum_installed}"

+ 8 - 4
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -29,6 +29,7 @@
   hosts: oo_first_master
   vars:
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+    target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
   gather_facts: no
   tasks:
   # Pacemaker is currently the only supported upgrade path for multiple masters
@@ -45,8 +46,8 @@
   - fail:
       msg: >
         openshift_pkg_version is {{ openshift_pkg_version }} which is not a
-        valid version for a 3.1 upgrade
-    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare('3.0.2.900','<')
+        valid version for a {{ target_version }} upgrade
+    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
 
   # If this script errors out ansible will show the default stdout/stderr
   # which contains details for the user:
@@ -55,6 +56,8 @@
 
 - name: Verify upgrade can proceed
   hosts: oo_masters_to_config:oo_nodes_to_config
+  vars:
+    target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
   tasks:
   - name: Clean package cache
     command: "{{ ansible_pkg_mgr }} clean all"
@@ -77,8 +80,8 @@
     when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
 
   - fail:
-      msg: Atomic OpenShift 3.1 packages not found
-    when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
+      msg: Upgrade packages not found
+    when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
 
   - set_fact:
       pre_upgrade_complete: True
@@ -235,6 +238,7 @@
 
   - name: Ensure python-yaml present for config upgrade
     action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+    when: not openshift.common.is_atomic | bool
 
   - name: Upgrade master configuration
     openshift_upgrade_config:

+ 9 - 0
playbooks/common/openshift-docker/config.yml

@@ -0,0 +1,9 @@
+- name: Configure docker hosts
+  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+  vars:
+    docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}"
+    docker_insecure_registries: "{{ lookup('oo_option',  'docker_insecure_registries') | oo_split }}"
+    docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}"
+  roles:
+  - openshift_facts
+  - openshift_docker

+ 1 - 0
playbooks/common/openshift-docker/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-docker/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 1 - 0
playbooks/common/openshift-docker/roles

@@ -0,0 +1 @@
+../../../roles

+ 3 - 1
playbooks/common/openshift-etcd/config.yml

@@ -14,7 +14,8 @@
           public_hostname: "{{ openshift_public_hostname | default(None) }}"
           deployment_type: "{{ openshift_deployment_type }}"
       - role: etcd
-        local_facts: {}
+        local_facts:
+          etcd_image: "{{ osm_etcd_image | default(None) }}"
   - name: Check status of etcd certificates
     stat:
       path: "{{ item }}"
@@ -88,6 +89,7 @@
   roles:
   - etcd
   - role: nickhammond.logrotate
+    when: not openshift.common.is_containerized | bool
 
 - name: Delete temporary directory on localhost
   hosts: localhost

+ 38 - 24
playbooks/common/openshift-master/config.yml

@@ -232,31 +232,36 @@
       balance: source
       servers: "{{ hostvars.localhost.haproxy_backend_servers }}"
   roles:
+  - role: openshift_facts
   - role: haproxy
     when: groups.oo_masters_to_config | length > 1
 
-- name: Generate master session keys
+- name: Check for cached session secrets
+  hosts: oo_first_master
+  roles:
+  - role: openshift_facts
+  post_tasks:
+  - openshift_facts:
+      role: master
+      local_facts:
+          session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
+          session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+
+- name: Generate master session secrets
   hosts: oo_first_master
+  vars:
+    g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([]) and openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
+    g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+    g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
+  roles:
+  - role: openshift_facts
   tasks:
-  - fail:
-      msg: "Both openshift_master_session_auth_secrets and openshift_master_session_encryption_secrets must be provided if either variable is set"
-    when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is not defined) or (openshift_master_session_encryption_secrets is defined and openshift_master_session_auth_secrets is not defined)
-  - fail:
-      msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
-    when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
-  - name: Install OpenSSL package
-    action: "{{ ansible_pkg_mgr }} name=openssl state=present"
-  - name: Generate session authentication key
-    command: /usr/bin/openssl rand -base64 24
-    register: session_auth_output
-    when: openshift_master_session_auth_secrets is undefined
-  - name: Generate session encryption key
-    command: /usr/bin/openssl rand -base64 24
-    register: session_encryption_output
-    when: openshift_master_session_encryption_secrets is undefined
-  - set_fact:
-      session_auth_secret: "{{ openshift_master_session_auth_secrets | default([session_auth_output.stdout]) }}"
-      session_encryption_secret: "{{ openshift_master_session_encryption_secrets | default([session_encryption_output.stdout]) }}"
+  - openshift_facts:
+      role: master
+      local_facts:
+        session_auth_secrets: "{{ g_session_auth_secrets }}"
+        session_encryption_secrets: "{{ g_session_encryption_secrets }}"
+    when: not g_session_secrets_present | bool
 
 - name: Parse named certificates
   hosts: localhost
@@ -312,8 +317,8 @@
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
     openshift_master_count: "{{ groups.oo_masters_to_config | length }}"
-    openshift_master_session_auth_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_auth_secret'] }}"
-    openshift_master_session_encryption_secrets: "{{ hostvars[groups['oo_first_master'][0]]['session_encryption_secret'] }}"
+    openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
+    openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
   pre_tasks:
   - name: Ensure certificate directory exists
     file:
@@ -328,6 +333,7 @@
   roles:
   - openshift_master
   - role: nickhammond.logrotate
+    when: not openshift.common.is_containerized | bool
   - role: fluentd_master
     when: openshift.common.use_fluentd | bool
   post_tasks:
@@ -356,7 +362,7 @@
     cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
   roles:
   - role: cockpit
-    when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+    when: not openshift.common.is_containerized and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
       (osm_use_cockpit | bool or osm_use_cockpit is undefined )
 
 - name: Configure flannel
@@ -394,7 +400,15 @@
 
 - name: Create services
   hosts: oo_first_master
+  vars:
+    attach_registry_volume: "{{ groups.oo_nfs_to_config | length > 0 }}"
+  pre_tasks:
+  - set_fact:
+      nfs_host: "{{ groups.oo_nfs_to_config.0 }}"
+      registry_volume_path: "{{ hostvars[groups.oo_nfs_to_config.0].openshift.nfs.exports_dir + '/' + hostvars[groups.oo_nfs_to_config.0].openshift.nfs.registry_volume }}"
+    when: attach_registry_volume | bool
   roles:
   - role: openshift_router
     when: openshift.master.infra_nodes is defined
-  #- role: openshift_registry
+  - role: openshift_registry
+    when: openshift.master.infra_nodes is defined and attach_registry_volume | bool

+ 5 - 0
playbooks/common/openshift-nfs/config.yml

@@ -0,0 +1,5 @@
+---
+- name: Configure nfs hosts
+  hosts: oo_nfs_to_config
+  roles:
+  - role: openshift_storage_nfs

+ 1 - 0
playbooks/common/openshift-nfs/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-nfs/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 1 - 0
playbooks/common/openshift-nfs/roles

@@ -0,0 +1 @@
+../../../roles/

+ 18 - 0
playbooks/common/openshift-nfs/service.yml

@@ -0,0 +1,18 @@
+---
+- name: Populate g_service_nfs host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - fail: msg="new_cluster_state is required to be injected in this playbook"
+    when: new_cluster_state is not defined
+
+  - name: Evaluate g_service_nfs
+    add_host: name={{ item }} groups=g_service_nfs
+    with_items: oo_host_group_exp | default([])
+
+- name: Change state on nfs instance(s)
+  hosts: g_service_nfs
+  connection: ssh
+  gather_facts: no
+  tasks:
+    - service: name=nfs-server state="{{ new_cluster_state }}"

+ 2 - 0
playbooks/common/openshift-node/config.yml

@@ -16,6 +16,7 @@
           hostname: "{{ openshift_hostname | default(None) }}"
           public_hostname: "{{ openshift_public_hostname | default(None) }}"
           deployment_type: "{{ openshift_deployment_type }}"
+          use_flannel: "{{ openshift_use_flannel | default(None) }}"
       - role: node
         local_facts:
           labels: "{{ openshift_node_labels | default(None) }}"
@@ -181,6 +182,7 @@
   - role: flannel
     when: openshift.common.use_flannel | bool
   - role: nickhammond.logrotate
+    when: not openshift.common.is_containerized | bool
   - role: fluentd_node
     when: openshift.common.use_fluentd | bool
   tasks:

+ 22 - 0
playbooks/gce/openshift-cluster/cluster_hosts.yml

@@ -0,0 +1,22 @@
+---
+g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([]))
+                     | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([]))
+                     | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_master_hosts: "{{ (groups['tag_host-type-master']|default([]))
+                     | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_node_hosts:   "{{ (groups['tag_host-type-node']|default([]))
+                     | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_nfs_hosts:    "{{ (groups['tag_host-type-nfs']|default([]))
+                   | intersect((groups['tag_environment-' ~ cluster_id]|default([]))) }}"
+
+g_all_hosts:    "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+                    | union(g_lb_hosts) | default([]) }}"

+ 7 - 23
playbooks/gce/openshift-cluster/config.yml

@@ -1,32 +1,16 @@
 ---
 # TODO: fix firewall related bug with GCE and origin, since GCE is overriding
 # /etc/sysconfig/iptables
-
-- hosts: localhost
-  gather_facts: no
-  connection: local
-  become: no
-  vars_files:
-  - vars.yml
-  tasks:
-  - set_fact:
-      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
-      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-      use_sdn: "{{ do_we_use_openshift_sdn }}"
-      sdn_plugin: "{{ sdn_network_plugin }}"
-
 - include: ../../common/openshift-cluster/config.yml
+  vars_files:
+  - ../../gce/openshift-cluster/vars.yml
+  - ../../gce/openshift-cluster/cluster_hosts.yml
   vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([]))     | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
-    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level }}"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ gce_private_ip }}"
-    openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn  }}"
-    os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}"
+    openshift_use_openshift_sdn: "{{ do_we_use_openshift_sdn }}"

+ 4 - 7
playbooks/gce/openshift-cluster/join_node.yml

@@ -4,13 +4,9 @@
   connection: local
   become: no
   gather_facts: no
-  vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - name: Evaluate oo_hosts_to_update
     add_host:
@@ -28,6 +24,7 @@
   gather_facts: no
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - name: Evaluate oo_nodes_to_config
     add_host:
@@ -38,11 +35,11 @@
 
   - name: Evaluate oo_first_master
     add_host:
-      name: "{{ g_master_hosts | first }}"
+      name: "{{ master_hosts | first }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       groups: oo_first_master
-    when: g_master_hosts is defined and g_master_hosts|length > 0
+    when: master_hosts is defined and master_hosts|length > 0
 
 #- include: config.yml
 - include: ../../common/openshift-node/config.yml

+ 1 - 1
playbooks/gce/openshift-cluster/list.yml

@@ -7,7 +7,7 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+  - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
     when: cluster_id != ''
   - set_fact: scratch_group=all
     when: cluster_id == ''

+ 3 - 2
playbooks/gce/openshift-cluster/service.yml

@@ -6,6 +6,7 @@
   gather_facts: no
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - fail: msg="cluster_id is required to be injected in this playbook"
     when: cluster_id is not defined
@@ -15,14 +16,14 @@
       groups: g_service_nodes
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
+    with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
 
   - add_host:
       name: "{{ item }}"
       groups: g_service_masters
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
+    with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
 
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-master/service.yml

+ 2 - 1
playbooks/gce/openshift-cluster/tasks/launch_instances.yml

@@ -16,7 +16,8 @@
     #service_account_permissions: "datastore,logging-write"
     tags:
       - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
-      - env-{{ cluster }}
+      - environment-{{ cluster_env }}
+      - clusterid-{{ cluster_id }}
       - host-type-{{ type }}
       - sub-host-type-{{ g_sub_host_type }}
   when: instances |length > 0

+ 1 - 2
playbooks/gce/openshift-cluster/terminate.yml

@@ -7,13 +7,12 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env-{{ cluster_id }}
   - add_host:
       name: "{{ item }}"
       groups: oo_hosts_to_terminate
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
+    with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost'])
 
 - name: Unsubscribe VMs
   hosts: oo_hosts_to_terminate

+ 3 - 7
playbooks/gce/openshift-cluster/update.yml

@@ -1,16 +1,12 @@
 ---
 - name: Populate oo_hosts_to_update group
   hosts: localhost
-  become: no
   connection: local
+  become: no
   gather_facts: no
-  vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - name: Evaluate oo_hosts_to_update
     add_host:
@@ -18,7 +14,7 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
+    with_items: "{{ g_all_hosts | default([]) }}"
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 12 - 6
playbooks/gce/openshift-cluster/vars.yml

@@ -1,7 +1,15 @@
 ---
 do_we_use_openshift_sdn: true
-sdn_network_plugin: redhat/openshift-ovs-subnet 
+sdn_network_plugin: redhat/openshift-ovs-subnet
+debug_level: 2
 # os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
+
+deployment_rhel7_ent_base:
+  image: rhel-7
+  machine_type: n1-standard-1
+  ssh_user:
+  sudo: yes
+
 deployment_vars:
   origin:
     image: preinstalled-slave-50g-v5
@@ -13,8 +21,6 @@ deployment_vars:
     machine_type: n1-standard-1
     ssh_user: root
     sudo: no
-  enterprise:
-    image: rhel-7
-    machine_type: n1-standard-1
-    ssh_user:
-    sudo: yes
+  enterprise: "{{ deployment_rhel7_ent_base }}"
+  openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+  atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 22 - 0
playbooks/libvirt/openshift-cluster/cluster_hosts.yml

@@ -0,0 +1,22 @@
+---
+g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([]))
+                     | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([]))
+                     | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_master_hosts: "{{ (groups['tag_host-type-master']|default([]))
+                     | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_node_hosts:   "{{ (groups['tag_host-type-node']|default([]))
+                   | intersect((groups['tag_clusterid-' ~ cluster_id]|default([])))
+                   | intersect((groups['tag_environment-' ~ cluster_env]|default([]))) }}"
+
+g_nfs_hosts:    "{{ (groups['tag_host-type-node']|default([]))
+                   | intersect((groups['tag_environment-' ~ cluster_id]|default([]))) }}"
+
+g_all_hosts:    "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+                    | union(g_lb_hosts) | default([]) }}"

+ 7 - 19
playbooks/libvirt/openshift-cluster/config.yml

@@ -2,26 +2,14 @@
 # TODO: need to figure out a plan for setting hostname, currently the default
 # is localhost, so no hostname value (or public_hostname) value is getting
 # assigned
-
-- hosts: localhost
-  gather_facts: no
-  become: no
-  connection: local
-  vars_files:
-  - vars.yml
-  tasks:
-  - set_fact:
-      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
-      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
 - include: ../../common/openshift-cluster/config.yml
+  vars_files:
+  - ../../libvirt/openshift-cluster/vars.yml
+  - ../../libvirt/openshift-cluster/cluster_hosts.yml
   vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([]))     | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
-    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 1 - 1
playbooks/libvirt/openshift-cluster/list.yml

@@ -7,7 +7,7 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+  - set_fact: scratch_group=tag_clusterid-{{ cluster_id }}
     when: cluster_id != ''
   - set_fact: scratch_group=all
     when: cluster_id == ''

+ 5 - 1
playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml

@@ -4,13 +4,17 @@
     dest: "{{ libvirt_storage_pool_path }}"
     state: directory
 
+# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set.
 - acl:
-    default: yes
+    default: "{{ item }}"
     entity: kvm
     etype: group
     name: "{{ libvirt_storage_pool_path }}"
     permissions: rwx
     state: present
+  with_items:
+    - no
+    - yes
 
 - name: Test if libvirt storage pool for openshift already exists
   command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"

+ 1 - 1
playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

@@ -88,7 +88,7 @@
     ansible_ssh_host: '{{ item.1 }}'
     ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
     ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}'
+    groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
   with_together:
     - instances
     - ips

+ 2 - 1
playbooks/libvirt/openshift-cluster/templates/domain.xml

@@ -3,7 +3,8 @@
   <memory unit='GiB'>1</memory>
   <metadata xmlns:ansible="https://github.com/ansible/ansible">
     <ansible:tags>
-      <ansible:tag>env-{{ cluster }}</ansible:tag>
+      <ansible:tag>environment-{{ cluster_env }}</ansible:tag>
+      <ansible:tag>clusterid-{{ cluster }}</ansible:tag>
       <ansible:tag>host-type-{{ type }}</ansible:tag>
       <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
     </ansible:tags>

+ 1 - 1
playbooks/libvirt/openshift-cluster/terminate.yml

@@ -9,7 +9,7 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: cluster_group=tag_env-{{ cluster_id }}
+  - set_fact: cluster_group=tag_clusterid-{{ cluster_id }}
   - add_host:
       name: "{{ item }}"
       groups: oo_hosts_to_terminate

+ 3 - 8
playbooks/libvirt/openshift-cluster/update.yml

@@ -1,17 +1,12 @@
 ---
 - name: Populate oo_hosts_to_update group
   hosts: localhost
-  become: no
   connection: local
+  become: no
   gather_facts: no
-  vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
-
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - name: Evaluate oo_hosts_to_update
     add_host:
@@ -19,7 +14,7 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_master_hosts  | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
+    with_items: "{{ g_all_hosts | default([]) }}"
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 17 - 15
playbooks/libvirt/openshift-cluster/vars.yml

@@ -3,6 +3,20 @@ libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-open
 libvirt_storage_pool: 'openshift-ansible'
 libvirt_network: openshift-ansible
 libvirt_uri: 'qemu:///system'
+debug_level: 2
+
+# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication.
+# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
+deployment_rhel7_ent_base:
+  image:
+    url:    "{{ lookup('oo_option', 'image_url') |
+                default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+    name:   "{{ lookup('oo_option', 'image_name') |
+                default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+    sha256: "{{ lookup('oo_option', 'image_sha256') |
+                default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
+  ssh_user: openshift
+  sudo: yes
 
 deployment_vars:
   origin:
@@ -24,18 +38,6 @@ deployment_vars:
       sha256:
     ssh_user: root
     sudo: no
-  enterprise:
-    image:
-      url:    "{{ lookup('oo_option', 'image_url') |
-                  default('https://access.cdn.redhat.com//content/origin/files/sha256/ff/ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3/rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
-      name:   "{{ lookup('oo_option', 'image_name') |
-                  default('rhel-guest-image-7.1-20150224.0.x86_64.qcow2', True) }}"
-      sha256: "{{ lookup('oo_option', 'image_sha256') |
-                  default('ff8198653cfd9c39411fc57077451ac291b3a605d305e905932fd6d5b1890bf3', True) }}"
-    ssh_user: openshift
-    sudo: yes
-#  origin:
-#    fedora:
-#      url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
-#      name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-#      sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+  enterprise: "{{ deployment_rhel7_ent_base }}"
+  openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+  atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 22 - 0
playbooks/openstack/openshift-cluster/cluster_hosts.yml

@@ -0,0 +1,22 @@
+---
+g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))
+                     | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))
+                     | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_master_hosts: "{{ (groups['tag_host-type_master']|default([]))
+                     | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                     | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))
+                   | intersect((groups['tag_clusterid_' ~ cluster_id]|default([])))
+                   | intersect((groups['tag_environment_' ~ cluster_env]|default([]))) }}"
+
+g_nfs_hosts:  "{{ (groups['tag_host-type_nfs']|default([]))
+                   | intersect((groups['tag_environment_' ~ cluster_id]|default([]))) }}"
+
+g_all_hosts:    "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+                    | union(g_lb_hosts) | default([]) }}"

+ 8 - 18
playbooks/openstack/openshift-cluster/config.yml

@@ -1,23 +1,13 @@
-- hosts: localhost
-  gather_facts: no
-  become: no
-  connection: local
-  vars_files:
-  - vars.yml
-  tasks:
-  - set_fact:
-      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
-      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
-
+---
 - include: ../../common/openshift-cluster/config.yml
+  vars_files:
+  - ../../openstack/openshift-cluster/vars.yml
+  - ../../openstack/openshift-cluster/cluster_hosts.yml
   vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
-    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_nodeonmaster: true
+    g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    g_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 2
+    openshift_debug_level: "{{ debug_level }}"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ansible_default_ipv4.address }}"

+ 39 - 30
playbooks/openstack/openshift-cluster/files/heat_stack.yaml

@@ -4,6 +4,11 @@ description: OpenShift cluster
 
 parameters:
 
+  cluster_env:
+    type: string
+    label: Cluster environment
+    description: Environment of the cluster
+
   cluster_id:
     type: string
     label: Cluster ID
@@ -345,13 +350,14 @@ resources:
               params:
                 cluster_id: { get_param: cluster_id }
                 k8s_type: etcd
-          cluster_id: { get_param: cluster_id }
-          type:       etcd
-          image:      { get_param: etcd_image }
-          flavor:     { get_param: etcd_flavor }
-          key_name:   { get_resource: keypair }
-          net:        { get_resource: net }
-          subnet:     { get_resource: subnet }
+          cluster_env: { get_param: cluster_env }
+          cluster_id:  { get_param: cluster_id }
+          type:        etcd
+          image:       { get_param: etcd_image }
+          flavor:      { get_param: etcd_flavor }
+          key_name:    { get_resource: keypair }
+          net:         { get_resource: net }
+          subnet:      { get_resource: subnet }
           secgrp:
             - { get_resource: etcd-secgrp }
           floating_network: { get_param: floating_ip_pool }
@@ -375,13 +381,14 @@ resources:
               params:
                 cluster_id: { get_param: cluster_id }
                 k8s_type: master
-          cluster_id: { get_param: cluster_id }
-          type:       master
-          image:      { get_param: master_image }
-          flavor:     { get_param: master_flavor }
-          key_name:   { get_resource: keypair }
-          net:        { get_resource: net }
-          subnet:     { get_resource: subnet }
+          cluster_env: { get_param: cluster_env }
+          cluster_id:  { get_param: cluster_id }
+          type:        master
+          image:       { get_param: master_image }
+          flavor:      { get_param: master_flavor }
+          key_name:    { get_resource: keypair }
+          net:         { get_resource: net }
+          subnet:      { get_resource: subnet }
           secgrp:
             - { get_resource: master-secgrp }
           floating_network: { get_param: floating_ip_pool }
@@ -406,14 +413,15 @@ resources:
                 cluster_id: { get_param: cluster_id }
                 k8s_type: node
                 sub_host_type: compute
-          cluster_id: { get_param: cluster_id }
-          type:       node
-          subtype:    compute
-          image:      { get_param: node_image }
-          flavor:     { get_param: node_flavor }
-          key_name:   { get_resource: keypair }
-          net:        { get_resource: net }
-          subnet:     { get_resource: subnet }
+          cluster_env: { get_param: cluster_env }
+          cluster_id:  { get_param: cluster_id }
+          type:        node
+          subtype:     compute
+          image:       { get_param: node_image }
+          flavor:      { get_param: node_flavor }
+          key_name:    { get_resource: keypair }
+          net:         { get_resource: net }
+          subnet:      { get_resource: subnet }
           secgrp:
             - { get_resource: node-secgrp }
           floating_network: { get_param: floating_ip_pool }
@@ -438,14 +446,15 @@ resources:
                 cluster_id: { get_param: cluster_id }
                 k8s_type: node
                 sub_host_type: infra
-          cluster_id: { get_param: cluster_id }
-          type:       node
-          subtype:    infra
-          image:      { get_param: infra_image }
-          flavor:     { get_param: infra_flavor }
-          key_name:   { get_resource: keypair }
-          net:        { get_resource: net }
-          subnet:     { get_resource: subnet }
+          cluster_env: { get_param: cluster_env }
+          cluster_id:  { get_param: cluster_id }
+          type:        node
+          subtype:     infra
+          image:       { get_param: infra_image }
+          flavor:      { get_param: infra_flavor }
+          key_name:    { get_resource: keypair }
+          net:         { get_resource: net }
+          subnet:      { get_resource: subnet }
           secgrp:
             - { get_resource: node-secgrp }
             - { get_resource: infra-secgrp }

+ 7 - 1
playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml

@@ -9,6 +9,11 @@ parameters:
     label: Name
     description: Name
 
+  cluster_env:
+    type: string
+    label: Cluster environment
+    description: Environment of the cluster
+
   cluster_id:
     type: string
     label: Cluster ID
@@ -105,7 +110,8 @@ resources:
       user_data: { get_file: user-data }
       user_data_format: RAW
       metadata:
-        env: { get_param: cluster_id }
+        environment: { get_param: cluster_env }
+        clusterid: { get_param: cluster_id }
         host-type: { get_param: type }
         sub-host-type:    { get_param: subtype }
 

+ 5 - 4
playbooks/openstack/openshift-cluster/launch.yml

@@ -29,6 +29,7 @@
 
   - name: Create or Update OpenStack Stack
     command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
+             -P cluster_env={{ cluster_env }}
              -P cluster_id={{ cluster_id }}
              -P cidr={{ openstack_network_cidr }}
              -P dns_nameservers={{ openstack_network_dns | join(",") }}
@@ -71,7 +72,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_etcd, tag_sub-host-type_default'
+      groups: 'tag_environment_{{ cluster_env }}, tag_host-type_etcd, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
     with_together:
       - parsed_outputs.etcd_names
       - parsed_outputs.etcd_ips
@@ -83,7 +84,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_sub-host-type_default'
+      groups: 'tag_environment_{{ cluster_env }}, tag_host-type_master, tag_sub-host-type_default, tag_clusterid_{{ cluster_id }}'
     with_together:
       - parsed_outputs.master_names
       - parsed_outputs.master_ips
@@ -95,7 +96,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_sub-host-type_compute'
+      groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_compute, tag_clusterid_{{ cluster_id }}'
     with_together:
       - parsed_outputs.node_names
       - parsed_outputs.node_ips
@@ -107,7 +108,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_sub-host-type_infra'
+      groups: 'tag_environment_{{ cluster_env }}, tag_host-type_node, tag_sub-host-type_infra, tag_clusterid_{{ cluster_id }}'
     with_together:
       - parsed_outputs.infra_names
       - parsed_outputs.infra_ips

+ 1 - 1
playbooks/openstack/openshift-cluster/list.yml

@@ -7,7 +7,7 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env_{{ cluster_id }}
+  - set_fact: scratch_group=tag_clusterid_{{ cluster_id }}
     when: cluster_id != ''
   - set_fact: scratch_group=all
     when: cluster_id == ''

+ 1 - 2
playbooks/openstack/openshift-cluster/terminate.yml

@@ -6,13 +6,12 @@
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: cluster_group=tag_env_{{ cluster_id }}
   - add_host:
       name: "{{ item }}"
       groups: oo_hosts_to_terminate
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[cluster_group] | default([])
+    with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([]))
 
 - name: Unsubscribe VMs
   hosts: oo_hosts_to_terminate

+ 3 - 8
playbooks/openstack/openshift-cluster/update.yml

@@ -1,17 +1,12 @@
 ---
 - name: Populate oo_hosts_to_update group
   hosts: localhost
-  become: no
   connection: local
+  become: no
   gather_facts: no
-  vars:
-    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
-    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
-    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
-    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
-
   vars_files:
   - vars.yml
+  - cluster_hosts.yml
   tasks:
   - name: Evaluate oo_hosts_to_update
     add_host:
@@ -19,7 +14,7 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
+    with_items: "{{ g_all_hosts | default([]) }}"
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 9 - 4
playbooks/openstack/openshift-cluster/vars.yml

@@ -1,4 +1,5 @@
 ---
+debug_level: 2
 openstack_infra_heat_stack:     "{{ lookup('oo_option', 'infra_heat_stack' ) |
                                     default('files/heat_stack.yaml',         True) }}"
 openstack_network_cidr:         "{{ lookup('oo_option', 'net_cidr'         ) |
@@ -19,6 +20,11 @@ openstack_flavor:
   infra:  "{{ lookup('oo_option', 'infra_flavor'     ) | default('m1.small',  True) }}"
   node:   "{{ lookup('oo_option', 'node_flavor'      ) | default('m1.medium', True) }}"
 
+deployment_rhel7_ent_base:
+  image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}"
+  ssh_user: openshift
+  sudo: yes
+
 deployment_vars:
   origin:
     image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}"
@@ -28,7 +34,6 @@ deployment_vars:
     image:
     ssh_user: root
     sudo: no
-  enterprise:
-    image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.1-20150224.0.x86_64', True) }}"
-    ssh_user: openshift
-    sudo: yes
+  enterprise: "{{ deployment_rhel7_ent_base }}"
+  openshift-enterprise: "{{ deployment_rhel7_ent_base }}"
+  atomic-enterprise: "{{ deployment_rhel7_ent_base }}"

+ 1 - 0
roles/ansible/tasks/main.yml

@@ -3,6 +3,7 @@
 
 - name: Install Ansible
   action: "{{ ansible_pkg_mgr }} name=ansible state=present"
+  when: not openshift.common.is_containerized | bool
 
 - include: config.yml
   vars:

+ 1 - 0
roles/cockpit/tasks/main.yml

@@ -6,6 +6,7 @@
     - cockpit-shell
     - cockpit-bridge
     - "{{ cockpit_plugins }}"
+  when: not openshift.common.is_containerized | bool
 
 - name: Enable cockpit-ws
   service:

+ 1 - 0
roles/copr_cli/tasks/main.yml

@@ -1,2 +1,3 @@
 ---
 - action: "{{ ansible_pkg_mgr }} name=copr-cli state=present"
+  when: not openshift.common.is_containerized | bool

+ 1 - 1
roles/docker/README.md

@@ -1,4 +1,4 @@
-Role Name
+Docker
 =========
 
 Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.

+ 4 - 1
roles/docker/handlers/main.yml

@@ -1,7 +1,10 @@
 ---
 
 - name: restart docker
-  service: name=docker state=restarted
+  service:
+    name: docker
+    state: restarted
+  when: not docker_service_status_changed | default(false)
 
 - name: restart udev
   service:

+ 9 - 1
roles/docker/tasks/main.yml

@@ -2,9 +2,17 @@
 # tasks file for docker
 - name: Install docker
   action: "{{ ansible_pkg_mgr }} name=docker state=present"
+  when: not openshift.common.is_atomic | bool
   
 - name: enable and start the docker service
-  service: name=docker enabled=yes state=started
+  service:
+    name: docker
+    enabled: yes
+    state: started
+  register: start_result
+
+- set_fact:
+    docker_service_status_changed: start_result | changed
 
 - include: udev_workaround.yml
   when: docker_udev_workaround | default(False)

+ 1 - 0
roles/etcd/defaults/main.yaml

@@ -1,4 +1,5 @@
 ---
+etcd_service: "{{ 'etcd' if not openshift.common.is_containerized else 'etcd_container' }}"
 etcd_interface: "{{ ansible_default_ipv4.interface }}"
 etcd_client_port: 2379
 etcd_peer_port: 2380

+ 3 - 2
roles/etcd/handlers/main.yml

@@ -1,4 +1,5 @@
 ---
+
 - name: restart etcd
-  service: name=etcd state=restarted
-  when: not etcd_service_status_changed | default(false)
+  service: name={{ etcd_service }} state=restarted
+  when: not (etcd_service_status_changed | default(false) | bool)

+ 54 - 8
roles/etcd/tasks/main.yml

@@ -9,21 +9,67 @@
 
 - name: Install etcd
   action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present"
+  when: not openshift.common.is_containerized | bool
+
+- name: Get docker images
+  command: docker images
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+  register: docker_images
+
+- name: Pull etcd container
+  command: docker pull {{ openshift.etcd.etcd_image }}
+  when: openshift.common.is_containerized | bool and openshift.etcd.etcd_image not in docker_images.stdout
+  
+- name: Wait for etcd image
+  command: >
+      docker images
+  register: docker_images
+  until: openshift.etcd.etcd_image in docker_images.stdout
+  retries: 30
+  delay: 10
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+
+- name: Install etcd container service file
+  template:
+    dest: "/etc/systemd/system/etcd_container.service"
+    src: etcd.docker.service
+  register: install_etcd_result
+  when: openshift.common.is_containerized | bool
+
+- name: Ensure etcd datadir exists
+  when: openshift.common.is_containerized | bool
+  file:
+    path: "{{ etcd_data_dir }}"
+    state: directory
+    mode: 0700
+
+- name: Disable system etcd when containerized
+  when: openshift.common.is_containerized | bool
+  service:
+    name: etcd
+    state: stopped
+    enabled: no
+
+- name: Reload systemd units
+  command: systemctl daemon-reload
+  when: openshift.common.is_containerized and ( install_etcd_result | changed )
 
 - name: Validate permissions on the config dir
   file:
     path: "{{ etcd_conf_dir }}"
     state: directory
-    owner: etcd
-    group: etcd
+    owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+    group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
     mode: 0700
 
 - name: Validate permissions on certificate files
   file:
     path: "{{ item }}"
     mode: 0600
-    group: etcd
-    owner: etcd
+    owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+    group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
   when: etcd_url_scheme == 'https'
   with_items:
   - "{{ etcd_ca_file }}"
@@ -34,8 +80,8 @@
   file:
     path: "{{ item }}"
     mode: 0600
-    group: etcd
-    owner: etcd
+    owner: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
+    group: "{{ 'etcd' if not openshift.common.is_containerized | bool else omit }}"
   when: etcd_peer_url_scheme == 'https'
   with_items:
   - "{{ etcd_peer_ca_file }}"
@@ -52,10 +98,10 @@
 
 - name: Enable etcd
   service:
-    name: etcd
+    name: "{{ etcd_service }}"
     state: started
     enabled: yes
   register: start_result
 
 - set_fact:
-    etcd_service_status_changed = start_result | changed
+    etcd_service_status_changed: "{{ start_result | changed }}"

+ 11 - 11
roles/etcd/templates/etcd.conf.j2

@@ -15,13 +15,13 @@ ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
 ETCD_NAME=default
 {% endif %}
 ETCD_DATA_DIR={{ etcd_data_dir }}
-#ETCD_SNAPSHOT_COUNTER="10000"
-ETCD_HEARTBEAT_INTERVAL="500"
-ETCD_ELECTION_TIMEOUT="2500"
+#ETCD_SNAPSHOT_COUNTER=10000
+ETCD_HEARTBEAT_INTERVAL=500
+ETCD_ELECTION_TIMEOUT=2500
 ETCD_LISTEN_CLIENT_URLS={{ etcd_listen_client_urls }}
-#ETCD_MAX_SNAPSHOTS="5"
-#ETCD_MAX_WALS="5"
-#ETCD_CORS=""
+#ETCD_MAX_SNAPSHOTS=5
+#ETCD_MAX_WALS=5
+#ETCD_CORS=
 
 {% if groups[etcd_peers_group] and groups[etcd_peers_group] | length > 1 %}
 #[cluster]
@@ -29,15 +29,15 @@ ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_initial_advertise_peer_urls }}
 ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
 ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
 ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
-#ETCD_DISCOVERY=""
-#ETCD_DISCOVERY_SRV=""
-#ETCD_DISCOVERY_FALLBACK="proxy"
-#ETCD_DISCOVERY_PROXY=""
+#ETCD_DISCOVERY=
+#ETCD_DISCOVERY_SRV=
+#ETCD_DISCOVERY_FALLBACK=proxy
+#ETCD_DISCOVERY_PROXY=
 {% endif %}
 ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
 
 #[proxy]
-#ETCD_PROXY="off"
+#ETCD_PROXY=off
 
 #[security]
 {% if etcd_url_scheme == 'https' -%}

+ 13 - 0
roles/etcd/templates/etcd.docker.service

@@ -0,0 +1,13 @@
+[Unit]
+Description=The Etcd Server container
+After=docker.service
+
+[Service]
+EnvironmentFile=/etc/etcd/etcd.conf
+ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/var/lib/etcd:z -v /etc/etcd:/etc/etcd:z --env-file=/etc/etcd/etcd.conf --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStop=/usr/bin/docker stop {{ etcd_service }}
+Restart=always
+
+[Install]
+WantedBy=multi-user.target

+ 1 - 0
roles/flannel/tasks/main.yml

@@ -2,6 +2,7 @@
 - name: Install flannel
   sudo: true
   action: "{{ ansible_pkg_mgr }} name=flannel state=present"
+  when: not openshift.common.is_containerized | bool
 
 - name: Set flannel etcd url
   sudo: true

+ 0 - 0
roles/fluentd_master/tasks/main.yml


Beberapa file tidak ditampilkan karena terlalu banyak file yang berubah dalam diff ini