浏览代码

Merge pull request #158 from openshift/master

Merge master into INT for first v3 INT deploy
Troy Dawson 10 年之前
父节点
当前提交
7f7b582a7b
共有 100 个文件被更改,包括 2468 次插入395 次删除
  1. 44 0
      BUILD.md
  2. 5 1
      README.md
  3. 44 4
      README_AWS.md
  4. 22 5
      README_GCE.md
  5. 142 0
      README_OSE.md
  6. 92 0
      README_libvirt.md
  7. 23 0
      ansible.cfg
  8. 190 0
      bin/cluster
  9. 110 0
      bin/ohi
  10. 65 0
      bin/openshift-ansible-bin.spec
  11. 6 0
      bin/openshift_ansible.conf.example
  12. 0 0
      bin/openshift_ansible/__init__.py
  13. 54 16
      bin/awsutil.py
  14. 57 22
      bin/opssh
  15. 25 3
      bin/oscp
  16. 24 2
      bin/ossh
  17. 22 1
      bin/ossh_bash_completion
  18. 0 113
      cluster.sh
  19. 71 43
      filter_plugins/oo_filters.py
  20. 2 0
      inventory/aws/group_vars/all
  21. 28 0
      inventory/byo/group_vars/all
  22. 10 0
      inventory/byo/hosts
  23. 2 0
      inventory/gce/group_vars/all
  24. 2 0
      inventory/libvirt/group_vars/all
  25. 2 0
      inventory/libvirt/hosts
  26. 15 2
      inventory/multi_ec2.py
  27. 50 0
      inventory/openshift-ansible-inventory.spec
  28. 1 0
      playbooks/adhoc/noc/filter_plugins
  29. 41 0
      playbooks/adhoc/noc/get_zabbix_problems.yml
  30. 1 0
      playbooks/adhoc/noc/roles
  31. 1 1
      playbooks/aws/ansible-tower/launch.yml
  32. 1 0
      playbooks/aws/openshift-cluster/filter_plugins
  33. 62 0
      playbooks/aws/openshift-cluster/launch.yml
  34. 63 0
      playbooks/aws/openshift-cluster/launch_instances.yml
  35. 17 0
      playbooks/aws/openshift-cluster/list.yml
  36. 1 0
      playbooks/aws/openshift-cluster/roles
  37. 14 0
      playbooks/aws/openshift-cluster/terminate.yml
  38. 13 0
      playbooks/aws/openshift-cluster/update.yml
  39. 1 0
      playbooks/aws/openshift-cluster/vars.yml
  40. 12 30
      playbooks/aws/openshift-master/config.yml
  41. 7 4
      playbooks/aws/openshift-master/launch.yml
  42. 52 0
      playbooks/aws/openshift-master/terminate.yml
  43. 1 0
      playbooks/aws/openshift-master/vars.yml
  44. 94 36
      playbooks/aws/openshift-node/config.yml
  45. 10 5
      playbooks/aws/openshift-node/launch.yml
  46. 52 0
      playbooks/aws/openshift-node/terminate.yml
  47. 1 0
      playbooks/aws/openshift-node/vars.yml
  48. 6 0
      playbooks/byo/config.yml
  49. 1 0
      playbooks/byo/filter_plugins
  50. 9 0
      playbooks/byo/openshift-master/config.yml
  51. 1 0
      playbooks/byo/openshift-master/filter_plugins
  52. 1 0
      playbooks/byo/openshift-master/roles
  53. 79 0
      playbooks/byo/openshift-node/config.yml
  54. 1 0
      playbooks/byo/openshift-node/filter_plugins
  55. 1 0
      playbooks/byo/openshift-node/roles
  56. 1 0
      playbooks/byo/roles
  57. 1 0
      playbooks/gce/openshift-cluster/filter_plugins
  58. 62 0
      playbooks/gce/openshift-cluster/launch.yml
  59. 44 0
      playbooks/gce/openshift-cluster/launch_instances.yml
  60. 17 0
      playbooks/gce/openshift-cluster/list.yml
  61. 1 0
      playbooks/gce/openshift-cluster/roles
  62. 20 0
      playbooks/gce/openshift-cluster/terminate.yml
  63. 13 0
      playbooks/gce/openshift-cluster/update.yml
  64. 1 0
      playbooks/gce/openshift-cluster/vars.yml
  65. 7 29
      playbooks/gce/openshift-master/config.yml
  66. 10 4
      playbooks/gce/openshift-master/launch.yml
  67. 7 12
      playbooks/gce/openshift-master/terminate.yml
  68. 1 0
      playbooks/gce/openshift-master/vars.yml
  69. 86 35
      playbooks/gce/openshift-node/config.yml
  70. 10 14
      playbooks/gce/openshift-node/launch.yml
  71. 7 12
      playbooks/gce/openshift-node/terminate.yml
  72. 1 0
      playbooks/gce/openshift-node/vars.yml
  73. 1 0
      playbooks/libvirt/openshift-cluster/filter_plugins
  74. 65 0
      playbooks/libvirt/openshift-cluster/launch.yml
  75. 102 0
      playbooks/libvirt/openshift-cluster/launch_instances.yml
  76. 43 0
      playbooks/libvirt/openshift-cluster/list.yml
  77. 1 0
      playbooks/libvirt/openshift-cluster/roles
  78. 41 0
      playbooks/libvirt/openshift-cluster/terminate.yml
  79. 7 0
      playbooks/libvirt/openshift-cluster/vars.yml
  80. 21 0
      playbooks/libvirt/openshift-master/config.yml
  81. 1 0
      playbooks/libvirt/openshift-master/filter_plugins
  82. 1 0
      playbooks/libvirt/openshift-master/roles
  83. 1 0
      playbooks/libvirt/openshift-master/vars.yml
  84. 102 0
      playbooks/libvirt/openshift-node/config.yml
  85. 1 0
      playbooks/libvirt/openshift-node/filter_plugins
  86. 1 0
      playbooks/libvirt/openshift-node/roles
  87. 1 0
      playbooks/libvirt/openshift-node/vars.yml
  88. 62 0
      playbooks/libvirt/templates/domain.xml
  89. 2 0
      playbooks/libvirt/templates/meta-data
  90. 10 0
      playbooks/libvirt/templates/user-data
  91. 3 0
      rel-eng/packages/.readme
  92. 1 0
      rel-eng/packages/openshift-ansible-bin
  93. 1 0
      rel-eng/packages/openshift-ansible-inventory
  94. 5 0
      rel-eng/tito.props
  95. 7 0
      roles/ansible_tower/tasks/main.yaml
  96. 1 1
      roles/docker/tasks/main.yml
  97. 41 0
      roles/openshift_ansible_inventory/README.md
  98. 4 0
      roles/openshift_ansible_inventory/defaults/main.yml
  99. 2 0
      roles/openshift_ansible_inventory/handlers/main.yml
  100. 0 0
      roles/openshift_ansible_inventory/meta/main.yml

+ 44 - 0
BUILD.md

@@ -0,0 +1,44 @@
+# openshift-ansible RPM Build instructions
+We use tito to make building and tracking revisions easy.
+
+For more information on tito, please see the [Tito home page](http://rm-rf.ca/tito "Tito home page").
+
+
+## Build openshift-ansible-bin
+- Change into openshift-ansible/bin
+```
+cd openshift-ansible/bin
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```
+
+
+## Build openshift-ansible-inventory
+- Change into openshift-ansible/inventory
+```
+cd openshift-ansible/inventory
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```

+ 5 - 1
README.md

@@ -20,10 +20,14 @@ Setup
 - Setup for a specific cloud:
   - [AWS](README_AWS.md)
   - [GCE](README_GCE.md)
+  - [local VMs](README_libvirt.md)
+
+- Build
+  - [How to build the openshift-ansible rpms](BUILD.md)
 
 - Directory Structure:
   - [cloud.rb](cloud.rb) - light wrapper around Ansible
-  - [cluster.sh](cluster.sh) - easily create OpenShift 3 clusters
+  - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
   - [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
   - [inventory/](inventory) - houses Ansible dynamic inventory scripts
   - [lib/](lib) - library components of cloud.rb

+ 44 - 4
README_AWS.md

@@ -14,7 +14,7 @@ Create a credentials file
    export AWS_ACCESS_KEY_ID='AKIASTUFF'
    export AWS_SECRET_ACCESS_KEY='STUFF'
 ```
-1. source this file
+2. source this file
 ```
   source ~/.aws_creds
 ```
@@ -23,7 +23,7 @@ Note: You must source this file in each shell that you want to run cloud.rb
 
 (Optional) Setup your $HOME/.ssh/config file
 -------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config' 
+In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
 to setup a private key file to allow ansible to connect to the created hosts.
 
 To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
@@ -34,6 +34,24 @@ Host *.compute-1.amazonaws.com
 
 Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
 
+(Optional) Choose where the cluster will be launched
+----------------------------------------------------
+
+By default, a cluster is launched with the following configuration:
+
+- Instance type: m3.large
+- AMI: ami-307b3658
+- Region: us-east-1
+- Keypair name: libra
+- Security group: public
+
+If needed, these values can be changed by setting environment variables on your system.
+
+- export ec2_instance_type='m3.large'
+- export ec2_ami='ami-307b3658'
+- export ec2_region='us-east-1'
+- export ec2_keypair='libra'
+- export ec2_security_group='public'
 
 Install Dependencies
 --------------------
@@ -51,7 +69,29 @@ OSX:
 Test The Setup
 --------------
 1. cd openshift-ansible
-1. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all ec2 instances being listed)
+```
+  bin/cluster list aws ''
+```
+
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
+```
+  bin/cluster create aws <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+  bin/cluster update aws <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
 ```
-  ./cloud.rb aws list
+  bin/cluster terminate aws <cluster-id>
 ```

+ 22 - 5
README_GCE.md

@@ -4,7 +4,7 @@ GCE Setup Instructions
 
 Get a gce service key
 ---------------------
-1. ask your GCE project administrator for a GCE service key
+1. Ask your GCE project administrator for a GCE service key
 
 Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
 
@@ -65,12 +65,29 @@ Install Dependencies
 Test The Setup
 --------------
 1. cd openshift-ansible/
-2. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all gce instances being listed)
 ```
-  ./cloud.rb gce list
+  bin/cluster list gce ''
 ```
 
-3. Try to create an instance:
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
 ```
-  ./cloud.rb gce launch -n ${USER}-node1 -e int --type os3-node
+  bin/cluster create gce <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+  bin/cluster update gce <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
+```
+  bin/cluster terminate gce <cluster-id>
 ```

+ 142 - 0
README_OSE.md

@@ -0,0 +1,142 @@
+# Installing OSEv3 from dev puddles using ansible
+
+* [Requirements](#requirements)
+* [Caveats](#caveats)
+* [Known Issues](#known-issues)
+* [Configuring the host inventory](#configuring-the-host-inventory)
+* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
+* [Running the ansible playbooks](#running-the-ansible-playbooks)
+* [Post-ansible steps](#post-ansible-steps)
+
+## Requirements
+* ansible
+  * Tested using ansible-1.8.2-1.fc20.noarch, but should work with version 1.8+
+  * Available in Fedora channels
+  * Available for EL with EPEL and Optional channel
+* One or more RHEL 7.1 VMs
+* ssh key based auth for the root user needs to be pre-configured from the host
+  running ansible to the remote hosts
+* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
+  
+  ```sh
+  git clone https://github.com/openshift/openshift-ansible.git
+  cd openshift-ansible
+  ```
+
+## Caveats
+This ansible repo is currently under heavy revision for providing OSE support;
+the following items are highly likely to change before the OSE support is
+merged into the upstream repo:
+  * the current git branch for testing
+  * how the inventory file should be configured
+  * variables that need to be set
+  * bootstrapping steps
+  * other configuration steps
+
+## Known Issues
+* Host subscriptions are not configurable yet, the hosts need to be
+  pre-registered with subscription-manager or have the RHEL base repo
+  pre-configured. If using subscription-manager the following commands will
+  disable all but the rhel-7-server rhel-7-server-extras and
+  rhel-server7-ose-beta repos:
+```sh
+subscription-manager repos --disable="*"
+subscription-manager repos \
+--enable="rhel-7-server-rpms" \
+--enable="rhel-7-server-extras-rpms" \
+--enable="rhel-server-7-ose-beta-rpms"
+```
+* Configuration of router is not automated yet
+* Configuration of docker-registry is not automated yet
+* End-to-end testing has not been completed yet using this module
+* root user is used for all ansible actions; eventually we will support using
+  a non-root user with sudo.
+
+## Configuring the host inventory
+[Ansible docs](http://docs.ansible.com/intro_inventory.html)
+
+Example inventory file for configuring one master and two nodes for the test
+environment. This can be configured in the default inventory file
+(/etc/ansible/hosts), or using a custom file and passing the --inventory
+option to ansible-playbook.
+
+/etc/ansible/hosts:
+```ini
+# This is an example of a bring your own (byo) host inventory
+
+# host group for masters
+[masters]
+ose3-master.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2].example.com
+```
+
+The hostnames above should resolve both from the hosts themselves and
+the host where ansible is running (if different).
+
+## Creating the default variables for the hosts and host groups
+[Ansible docs](http://docs.ansible.com/intro_inventory.html#id9)
+
+#### Group vars for all hosts
+/etc/ansible/group_vars/all:
+```yaml
+---
+# Assume that we want to use the root as the ssh user for all hosts
+ansible_ssh_user: root
+
+# Default debug level for all OpenShift hosts
+openshift_debug_level: 4
+
+# Set the OpenShift deployment type for all hosts
+openshift_deployment_type: enterprise
+
+# Override the default registry for development
+openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# To use the latest OpenShift Enterprise Errata puddle:
+#openshift_additional_repos:
+#- id: ose-devel
+#  name: ose-devel
+#  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+#  enabled: 1
+#  gpgcheck: 0
+# To use the latest OpenShift Enterprise Whitelist puddle:
+openshift_additional_repos:
+- id: ose-devel
+  name: ose-devel
+  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+  enabled: 1
+  gpgcheck: 0
+
+```
+
+## Running the ansible playbooks
+From the openshift-ansible checkout run:
+```sh
+ansible-playbook playbooks/byo/config.yml
+```
+**Note:** this assumes that the host inventory is /etc/ansible/hosts and the
+group_vars are defined in /etc/ansible/group_vars, if using a different
+inventory file (and a group_vars directory that is in the same directory as
+the directory as the inventory) use the -i option for ansible-playbook.
+
+## Post-ansible steps
+#### Create the default router
+On the master host:
+```sh
+systemctl restart openshift-sdn-master
+openshift ex router --create=true \
+  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}'
+```
+
+#### Create the default docker-registry
+On the master host:
+```sh
+openshift ex registry --create=true \
+  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
+  --mount-host=/var/lib/openshift/docker-registry
+```

+ 92 - 0
README_libvirt.md

@@ -0,0 +1,92 @@
+
+LIBVIRT Setup instructions
+==========================
+
+`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
+
+This makes `libvirt` useful to develop, test and debug Openshift and openshift-ansible locally on the developer’s workstation before going to the cloud.
+
+Install dependencies
+--------------------
+
+1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2. Install [ebtables](http://ebtables.netfilter.org/)
+3. Install [qemu](http://wiki.qemu.org/Main_Page)
+4. Install [libvirt](http://libvirt.org/)
+5. Enable and start the libvirt daemon, e.g:
+   * ``systemctl enable libvirtd``
+   * ``systemctl start libvirtd``
+6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7. Check that your `$HOME` is accessible to the qemu user²
+
+#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
+
+You can test it with the following command:
+```
+virsh -c qemu:///system pool-list
+```
+
+If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
+
+In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
+
+```
+sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
+polkit.addRule(function(action, subject) {
+        if (action.id == "org.libvirt.unix.manage" &&
+            subject.user == "$USER") {
+                return polkit.Result.YES;
+                polkit.log("action=" + action);
+                polkit.log("subject=" + subject);
+        }
+});
+EOF
+```
+
+If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
+
+```
+ls -l /var/run/libvirt/libvirt-sock
+srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
+
+usermod -a -G libvirtd $USER
+# $USER needs to logout/login to have the new group be taken into account
+```
+
+(Replace `$USER` with your login name)
+
+#### ² Qemu will run with a specific user. It must have access to the VMs drives
+
+All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
+
+As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
+
+If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
+
+```
+error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
+```
+
+In order to fix that issue, you have several possibilities:
+* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
+  * backed by a filesystem with a lot of free disk space
+  * writable by your user;
+  * accessible by the qemu user.
+* Grant the qemu user access to the storage pool.
+
+On Arch:
+
+```
+setfacl -m g:kvm:--x ~
+```
+
+Test the setup
+--------------
+
+```
+cd openshift-ansible
+
+bin/cluster create -m 1 -n 3 libvirt lenaic
+
+bin/cluster terminate libvirt lenaic
+```

+ 23 - 0
ansible.cfg

@@ -0,0 +1,23 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+# Uncomment to use the provided BYO inventory
+#hostfile = inventory/byo/hosts
+
+# Uncomment to use the provided GCE dynamic inventory script
+#hostfile = inventory/gce/gce.py
+
+# Uncomment to use the provided AWS dynamic inventory script
+#hostfile = inventory/aws/ec2.py

+ 190 - 0
bin/cluster

@@ -0,0 +1,190 @@
+#!/usr/bin/env python2
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import ConfigParser
+import sys
+import os
+
+
+class Cluster(object):
+    """
+    Control and Configuration Interface for OpenShift Clusters
+    """
+    def __init__(self):
+        # setup ansible ssh environment
+        if 'ANSIBLE_SSH_ARGS' not in os.environ:
+            os.environ['ANSIBLE_SSH_ARGS'] = (
+                '-o ForwardAgent=yes '
+                '-o StrictHostKeyChecking=no '
+                '-o UserKnownHostsFile=/dev/null '
+                '-o ControlMaster=auto '
+                '-o ControlPersist=600s '
+            )
+
+    def create(self, args):
+        """
+        Create an OpenShift cluster for given provider
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id}
+        playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        env['num_masters'] = args.masters
+        env['num_nodes'] = args.nodes
+
+        return self.action(args, inventory, env, playbook)
+
+    def terminate(self, args):
+        """
+        Destroy OpenShift cluster
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id}
+        playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
+    def list(self, args):
+        """
+        List VMs in cluster
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id}
+        playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
+    def update(self, args):
+        """
+        Update to latest OpenShift across clustered VMs
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id}
+        playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
+    def setup_provider(self, provider):
+        """
+        Setup ansible playbook environment
+        :param provider: command line arguments provided by user
+        :return: path to inventory for given provider
+        """
+        config = ConfigParser.ConfigParser()
+        if 'gce' == provider:
+            config.readfp(open('inventory/gce/gce.ini'))
+
+            for key in config.options('gce'):
+                os.environ[key] = config.get('gce', key)
+
+            inventory = '-i inventory/gce/gce.py'
+        elif 'aws' == provider:
+            config.readfp(open('inventory/aws/ec2.ini'))
+
+            for key in config.options('ec2'):
+                os.environ[key] = config.get('ec2', key)
+
+            inventory = '-i inventory/aws/ec2.py'
+        elif 'libvirt' == provider:
+            inventory = '-i inventory/libvirt/hosts'
+        else:
+            # this code should never be reached
+            raise ValueError("invalid PROVIDER {}".format(provider))
+
+        return inventory
+
+    def action(self, args, inventory, env, playbook):
+        """
+        Build ansible-playbook command line and execute
+        :param args: command line arguments provided by user
+        :param inventory: derived provider library
+        :param env: environment variables for kubernetes
+        :param playbook: ansible playbook to execute
+        :return: exit status from ansible-playbook command
+        """
+
+        verbose = ''
+        if args.verbose > 0:
+            verbose = '-{}'.format('v' * args.verbose)
+
+        ansible_env = '-e \'{}\''.format(
+            ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+        )
+
+        command = 'ansible-playbook {} {} {} {}'.format(
+            verbose, inventory, ansible_env, playbook
+        )
+
+        if args.verbose > 1:
+            command = 'time {}'.format(command)
+
+        if args.verbose > 0:
+            sys.stderr.write('RUN [{}]\n'.format(command))
+            sys.stderr.flush()
+
+        return os.system(command)
+
+
+if __name__ == '__main__':
+    """
+    Implemented to support writing unit tests
+    """
+
+    cluster = Cluster()
+
+    providers = ['gce', 'aws', 'libvirt']
+    parser = argparse.ArgumentParser(
+        description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
+    )
+    parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity')
+    parser.add_argument('--version', action='version', version='%(prog)s 0.2')
+
+    meta_parser = argparse.ArgumentParser(add_help=False)
+    meta_parser.add_argument('provider', choices=providers, help='provider')
+    meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
+
+    action_parser = parser.add_subparsers(dest='action', title='actions', description='Choose from valid actions')
+
+    create_parser = action_parser.add_parser('create', help='Create a cluster', parents=[meta_parser])
+    create_parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster')
+    create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster')
+    create_parser.set_defaults(func=cluster.create)
+
+    terminate_parser = action_parser.add_parser('terminate', help='Destroy a cluster', parents=[meta_parser])
+    terminate_parser.add_argument('-f', '--force', action='store_true', help='Destroy cluster without confirmation')
+    terminate_parser.set_defaults(func=cluster.terminate)
+
+    update_parser = action_parser.add_parser('update', help='Update OpenShift across cluster', parents=[meta_parser])
+    update_parser.add_argument('-f', '--force', action='store_true', help='Update cluster without confirmation')
+    update_parser.set_defaults(func=cluster.update)
+
+    list_parser = action_parser.add_parser('list', help='List VMs in cluster', parents=[meta_parser])
+    list_parser.set_defaults(func=cluster.list)
+
+    args = parser.parse_args()
+
+    if 'terminate' == args.action and not args.force:
+        answer = raw_input("This will destroy the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+        if answer not in ['y', 'Y']:
+            sys.stderr.write('\nACTION [terminate] aborted by user!\n')
+            exit(1)
+
+    if 'update' == args.action and not args.force:
+        answer = raw_input("This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
+        if answer not in ['y', 'Y']:
+            sys.stderr.write('\nACTION [update] aborted by user!\n')
+            exit(1)
+
+    status = args.func(args)
+    if status != 0:
+        sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
+    exit(status)

+ 110 - 0
bin/ohi

@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import traceback
+import sys
+import os
+import re
+import tempfile
+import time
+import subprocess
+import ConfigParser
+
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
+
+class Ohi(object):
+    def __init__(self):
+        self.inventory = None
+        self.host_type_aliases = {}
+        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
+
+        self.parse_cli_args()
+        self.parse_config_file()
+
+        self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+
+    def run(self):
+        if self.args.list_host_types:
+            self.aws.print_host_types()
+            return 0
+
+        hosts = None
+        if self.args.host_type is not None and \
+           self.args.env is not None:
+            # Both env and host-type specified
+            hosts = self.aws.get_host_list(host_type=self.args.host_type, \
+                                           env=self.args.env)
+
+        if self.args.host_type is None and \
+           self.args.env is not None:
+            # Only env specified
+            hosts = self.aws.get_host_list(env=self.args.env)
+
+        if self.args.host_type is not None and \
+           self.args.env is None:
+            # Only host-type specified
+            hosts = self.aws.get_host_list(host_type=self.args.host_type)
+
+        if hosts is None:
+            # We weren't able to determine what they wanted to do
+            raise ArgumentError("Invalid combination of arguments")
+
+        for host in hosts:
+            print host
+        return 0
+
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+            self.host_type_aliases = {}
+            if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                    value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+                    self.host_type_aliases[alias] = value
+
+    def parse_cli_args(self):
+        """Setup the command line parser with the options we want
+        """
+
+        parser = argparse.ArgumentParser(description='Openshift Host Inventory')
+
+        parser.add_argument('--list-host-types', default=False, action='store_true',
+                       help='List all of the host types')
+
+        parser.add_argument('-e', '--env', action="store",
+                       help="Which environment to use")
+
+        parser.add_argument('-t', '--host-type', action="store",
+                       help="Which host type to use")
+
+        self.args = parser.parse_args()
+
+
+if __name__ == '__main__':
+    if len(sys.argv) == 1:
+        print "\nError: No options given. Use --help to see the available options\n"
+        sys.exit(0)
+
+    try:
+        ohi = Ohi()
+        exitcode = ohi.run()
+        sys.exit(exitcode)
+    except ArgumentError as e:
+        print "\nError: %s\n" % e.message

+ 65 - 0
bin/openshift-ansible-bin.spec

@@ -0,0 +1,65 @@
+Summary:       OpenShift Ansible Scripts for working with metadata hosts
+Name:          openshift-ansible-bin
+Version:       0.0.8
+Release:       1%{?dist}
+License:       ASL 2.0
+URL:           https://github.com/openshift/openshift-ansible
+Source0:       %{name}-%{version}.tar.gz
+Requires:      python2, openshift-ansible-inventory
+BuildRequires: python2-devel
+BuildArch:     noarch
+
+%description
+Scripts to make it nicer when working with hosts that are defined only by metadata.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}%{_bindir}
+mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
+mkdir -p %{buildroot}/etc/bash_completion.d
+mkdir -p %{buildroot}/etc/openshift_ansible
+
+cp -p ossh oscp opssh ohi %{buildroot}%{_bindir}
+cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
+
+cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
+
+%files
+%{_bindir}/*
+%{python_sitelib}/openshift_ansible/
+/etc/bash_completion.d/*
+%config(noreplace) /etc/openshift_ansible/
+
+%changelog
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
+- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
+
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
+- added the ability to run opssh and ohi on all hosts in an environment, as
+  well as all hosts of the same host-type regardless of environment
+  (twiest@redhat.com)
+- added ohi (twiest@redhat.com)
+* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
+- fixed bug where opssh would throw an exception if pssh returned a non-zero
+  exit code (twiest@redhat.com)
+
+* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
+- fixed the opssh default output behavior to be consistent with pssh. Also
+  fixed a bug in how directories are named for --outdir and --errdir.
+  (twiest@redhat.com)
+* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
+- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
+- created a python package named openshift_ansible (twiest@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+

+ 6 - 0
bin/openshift_ansible.conf.example

@@ -0,0 +1,6 @@
+#[main]
+#inventory = /usr/share/ansible/inventory/multi_ec2.py
+
+#[host_type_aliases]
+#host-type-one = aliasa,aliasb
+#host-type-two = aliasfortwo

+ 0 - 0
bin/openshift_ansible/__init__.py


+ 54 - 16
bin/awsutil.py

@@ -5,28 +5,36 @@ import os
 import json
 import re
 
+class ArgumentError(Exception):
+    def __init__(self, message):
+        self.message = message
+
 class AwsUtil(object):
-    def __init__(self):
-        self.host_type_aliases = {
-                'legacy-openshift-broker': ['broker', 'ex-srv'],
-                         'openshift-node': ['node', 'ex-node'],
-                   'openshift-messagebus': ['msg'],
-            'openshift-customer-database': ['mongo'],
-                'openshift-website-proxy': ['proxy'],
-            'openshift-community-website': ['drupal'],
-                         'package-mirror': ['mirror'],
-        }
+    def __init__(self, inventory_path=None, host_type_aliases={}):
+        self.host_type_aliases = host_type_aliases
+        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        if inventory_path is None:
+            inventory_path = os.path.realpath(os.path.join(self.file_path, \
+                                              '..', '..', 'inventory', \
+                                              'multi_ec2.py'))
+
+        if not os.path.isfile(inventory_path):
+            raise Exception("Inventory file not found [%s]" % inventory_path)
 
+        self.inventory_path = inventory_path
+        self.setup_host_type_alias_lookup()
+
+    def setup_host_type_alias_lookup(self):
         self.alias_lookup = {}
         for key, values in self.host_type_aliases.iteritems():
             for value in values:
                 self.alias_lookup[value] = key
 
-        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-        self.multi_ec2_path = os.path.realpath(os.path.join(self.file_path, '..','inventory','multi_ec2.py'))
+
 
     def get_inventory(self,args=[]):
-        cmd = [self.multi_ec2_path]
+        cmd = [self.inventory_path]
 
         if args:
             cmd.extend(args)
@@ -124,15 +132,45 @@ class AwsUtil(object):
             return self.alias_lookup[host_type]
         return host_type
 
+    def gen_env_tag(self, env):
+        """Generate the environment tag
+        """
+        return "tag_environment_%s" % env
+
+    def gen_host_type_tag(self, host_type):
+        """Generate the host type tag
+        """
+        host_type = self.resolve_host_type(host_type)
+        return "tag_host-type_%s" % host_type
+
     def gen_env_host_type_tag(self, host_type, env):
         """Generate the environment host type tag
         """
         host_type = self.resolve_host_type(host_type)
         return "tag_env-host-type_%s-%s" % (env, host_type)
 
-    def get_host_list(self, host_type, env):
+    def get_host_list(self, host_type=None, env=None):
         """Get the list of hosts from the inventory using host-type and environment
         """
         inv = self.get_inventory()
-        host_type_tag = self.gen_env_host_type_tag(host_type, env)
-        return inv[host_type_tag]
+
+        if host_type is not None and \
+           env is not None:
+            # Both host type and environment were specified
+            env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
+            return inv[env_host_type_tag]
+
+        if host_type is None and \
+           env is not None:
+            # Just environment was specified
+            host_type_tag = self.gen_env_tag(env)
+            return inv[host_type_tag]
+
+        if host_type is not None and \
+           env is None:
+            # Just host-type was specified
+            host_type_tag = self.gen_host_type_tag(host_type)
+            return inv[host_type_tag]
+
+        # We should never reach here!
+        raise ArgumentError("Invalid combination of parameters")

+ 57 - 22
bin/opssh

@@ -2,7 +2,6 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 import argparse
-import awsutil
 import traceback
 import sys
 import os
@@ -10,54 +9,71 @@ import re
 import tempfile
 import time
 import subprocess
+import ConfigParser
 
-DEFAULT_PSSH_PAR=200
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+DEFAULT_PSSH_PAR = 200
 PSSH = '/usr/bin/pssh'
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
 
 class Opssh(object):
     def __init__(self):
+        self.inventory = None
+        self.host_type_aliases = {}
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-        self.aws = awsutil.AwsUtil()
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
 
         self.parse_cli_args()
+        self.parse_config_file()
+
+        self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
 
+    def run(self):
         if self.args.list_host_types:
             self.aws.print_host_types()
-            return
+            return 0
 
-        if self.args.env and \
-           self.args.host_type and \
-           self.args.command:
-            retval = self.run_pssh()
-            if retval != 0:
-                raise ValueError("pssh run failed")
+        if self.args.host_type is not None or \
+           self.args.env is not None:
+            return self.run_pssh()
 
-            return
-
-        # If it makes it here, we weren't able to determine what they wanted to do
-        raise ValueError("Invalid combination of arguments")
+        # We weren't able to determine what they wanted to do
+        raise ArgumentError("Invalid combination of arguments")
 
     def run_pssh(self):
         """Actually run the pssh command based off of the supplied options
         """
 
         # Default set of options
-        pssh_args = [PSSH, '-i', '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+        pssh_args = [PSSH, '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+
+        if self.args.inline:
+            pssh_args.append("--inline")
 
         if self.args.outdir:
-            pssh_args.append("--outdir='%s'" % self.args.outdir)
+            pssh_args.extend(["--outdir", self.args.outdir])
 
         if self.args.errdir:
-            pssh_args.append("--errdir='%s'" % self.args.errdir)
+            pssh_args.extend(["--errdir", self.args.errdir])
+
+        hosts = self.aws.get_host_list(host_type=self.args.host_type,
+                                       env=self.args.env)
 
-        hosts = self.aws.get_host_list(self.args.host_type, self.args.env)
         with tempfile.NamedTemporaryFile(prefix='opssh-', delete=True) as f:
             for h in hosts:
                 f.write(h + os.linesep)
             f.flush()
 
-            pssh_args.extend(["-h", "%s" % f.name])
-            pssh_args.append("%s" % self.args.command)
+            pssh_args.extend(["-h", f.name])
+            pssh_args.append(self.args.command)
 
             print
             print "Running: %s" % ' '.join(pssh_args)
@@ -66,6 +82,20 @@ class Opssh(object):
 
         return None
 
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+            self.host_type_aliases = {}
+            if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                    value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+                    self.host_type_aliases[alias] = value
 
     def parse_cli_args(self):
         """Setup the command line parser with the options we want
@@ -79,7 +109,7 @@ class Opssh(object):
         parser.add_argument('-e', '--env', action="store",
                        help="Which environment to use")
 
-        parser.add_argument('-t', '--host-type', action="store",
+        parser.add_argument('-t', '--host-type', action="store", default=None,
                        help="Which host type to use")
 
         parser.add_argument('-c', '--command', action='store',
@@ -88,6 +118,9 @@ class Opssh(object):
         parser.add_argument('--user', action='store', default='root',
                        help='username')
 
+        parser.add_argument('-i', '--inline', default=False, action='store_true',
+                       help='inline aggregated output and error for each server')
+
         parser.add_argument('-p', '--par', action='store', default=DEFAULT_PSSH_PAR,
                        help=('max number of parallel threads (default %s)' % DEFAULT_PSSH_PAR))
 
@@ -107,5 +140,7 @@ if __name__ == '__main__':
 
     try:
         opssh = Opssh()
-    except ValueError as e:
+        exitcode = opssh.run()
+        sys.exit(exitcode)
+    except ArgumentError as e:
         print "\nError: %s\n" % e.message

+ 25 - 3
bin/oscp

@@ -2,21 +2,34 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 import argparse
-import awsutil
 import traceback
 import sys
 import os
 import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
 
 class Oscp(object):
     def __init__(self):
+        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
+
         self.parse_cli_args()
+        self.parse_config_file()
 
         # parse host and user
         self.process_host()
 
-        self.aws = awsutil.AwsUtil()
+        self.aws = awsutil.AwsUtil(self.inventory)
 
         # get a dict of host inventory
         if self.args.list:
@@ -38,9 +51,18 @@ class Oscp(object):
         else:
             self.scp()
 
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
-        parser.add_argument('-e', '--env', 
+        parser.add_argument('-e', '--env',
                           action="store", help="Environment where this server exists.")
         parser.add_argument('-d', '--debug', default=False,
                           action="store_true", help="debug mode")

+ 24 - 2
bin/ossh

@@ -2,18 +2,31 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 import argparse
-import awsutil
 import traceback
 import sys
 import os
 import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
 
 class Ossh(object):
     def __init__(self):
+        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
+
         self.parse_cli_args()
+        self.parse_config_file()
 
-        self.aws = awsutil.AwsUtil()
+        self.aws = awsutil.AwsUtil(self.inventory)
 
         # get a dict of host inventory
         if self.args.list:
@@ -37,6 +50,15 @@ class Ossh(object):
         else:
             self.ssh()
 
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser.add_argument('-e', '--env', action="store",

+ 22 - 1
bin/ossh_bash_completion

@@ -1,6 +1,7 @@
 __ossh_known_hosts(){
     if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])'
+        /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
     fi
 }
 
@@ -16,3 +17,23 @@ _ossh()
     return 0
 }
 complete -F _ossh ossh oscp
+
+__opssh_known_hosts(){
+    if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+                /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+    fi
+}
+
+_opssh()
+{
+    local cur prev known_hosts
+    COMPREPLY=()
+    cur="${COMP_WORDS[COMP_CWORD]}"
+    prev="${COMP_WORDS[COMP_CWORD-1]}"
+    known_hosts="$(__opssh_known_hosts)"
+    COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
+
+    return 0
+}
+complete -F _opssh opssh
+

+ 0 - 113
cluster.sh

@@ -1,113 +0,0 @@
-#!/bin/bash -eu
-
-NODES=2
-MASTERS=1
-
-# If the environment variable OO_PROVDER is defined, it used for the provider
-PROVIDER=${OO_PROVIDER:-''}
-# Otherwise, default is gce (Google Compute Engine)
-if [ "x$PROVIDER" == "x" ];then
-   PROVIDER=gce
-fi
-
-UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]')
-
-
-# Use OO_MASTER_PLAYBOOK/OO_NODE_PLAYBOOK environment variables for playbooks if defined,
-# otherwise use openshift default values.
-MASTER_PLAYBOOK=${OO_MASTER_PLAYBOOK:-'openshift-master'}
-NODE_PLAYBOOK=${OO_NODE_PLAYBOOK:-'openshift-node'}
-
-
-# @formatter:off
-function usage {
-    cat 1>&2 <<-EOT
-        ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag}
-
-        Supported environment tags:
-        $(grep --no-messages 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb)
-        $([ $? -ne 0 ] && echo "No supported environment tags found for ${PROVIDER}")
-
-        Optional arguments for create:
-        [-p|--provider, -m|--masters, -n|--nodes, --master-playbook, --node-playbook]
-
-        Optional arguments for terminate|update:
-        [-p|--provider, --master-playbook, --node-playbook]
-EOT
-}
-# @formatter:on
-
-function create_cluster {
-    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK -c $MASTERS
-
-    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$NODE_PLAYBOOK -c $NODES
-
-    update_cluster
-
-    echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${NODES}/${NODE_PLAYBOOK} nodes using ${PROVIDER} provider\n"
-}
-
-function update_cluster {
-    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-function terminate_cluster {
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-[ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1)
-
-function check_argval {
-    if [[ $1 == -* ]]; then
-        echo "Invalid value: '$1'"
-        usage
-        exit 1
-    fi
-}
-
-# Using GNU getopt to support both small and long formats
-OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,nodes:,master-playbook:,node-playbook:,help \
-	        -n "$0" -- "$@"`
-eval set -- "$OPTIONS"
-
-while true; do
-    case "$1" in
-        -h|--help) (usage; exit 1) ; shift ;;
-        -p|--provider) PROVIDER="$2" ; check_argval $2 ; shift 2 ;;
-        -m|--masters) MASTERS="$2" ; check_argval $2 ; shift 2 ;;
-        -n|--nodes) NODES="$2" ; check_argval $2 ; shift 2 ;;
-        --master-playbook) MASTER_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
-        --node-playbook) NODE_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
-        --) shift ; break ;;
-        *) break ;;
-    esac
-done
-
-shift $((OPTIND-1))
-
-[ -z "${1:-}" ] && (usage; exit 1)
-
-case "${1}" in
-    'create')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        create_cluster ;;
-    'update')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        update_cluster ;;
-    'terminate')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        terminate_cluster ;;
-    'list')   ./cloud.rb "${PROVIDER}" list ;;
-    'help')   usage; exit 0 ;;
-    *)
-        echo -n 1>&2 "${1} is not a supported operation";
-        usage;
-        exit 1 ;;
-esac
-
-exit 0

+ 71 - 43
filter_plugins/oo_filters.py

@@ -1,39 +1,51 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
 from ansible import errors, runner
 import json
 import pdb
 
 def oo_pdb(arg):
-  ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+    ''' This pops you into a pdb instance where arg is the data passed in from the filter.
         Ex: "{{ hostvars | oo_pdb }}"
-  '''
-  pdb.set_trace()
-  return arg
+    '''
+    pdb.set_trace()
+    return arg
 
 def oo_len(arg):
-  ''' This returns the length of the argument
+    ''' This returns the length of the argument
         Ex: "{{ hostvars | oo_len }}"
-  '''
-  return len(arg)
+    '''
+    return len(arg)
 
 def get_attr(data, attribute=None):
-  ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+    ''' This looks up dictionary attributes of the form a.b.c and returns the value.
         Ex: data = {'a': {'b': {'c': 5}}}
             attribute = "a.b.c"
             returns 5
-  '''
+    '''
+    if not attribute:
+        raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+    ptr = data
+    for attr in attribute.split('.'):
+        ptr = ptr[attr]
 
-  if not attribute:
-    raise errors.AnsibleFilterError("|failed expects attribute to be set")
+    return ptr
 
-  ptr = data
-  for attr in attribute.split('.'):
-    ptr = ptr[attr]
+def oo_flatten(data):
+    ''' This filter plugin will flatten a list of lists
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects to flatten a List")
+
+    return [ item for sublist in data for item in sublist ]
 
-  return ptr
 
 def oo_collect(data, attribute=None, filters={}):
-  ''' This takes a list of dict and collects all attributes specified into a list
-      If filter is specified then we will include all items that match _ALL_ of filters.
+    ''' This takes a list of dict and collects all attributes specified into a list
+        If filter is specified then we will include all items that match _ALL_ of filters.
         Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
                      {'a':2, 'z': 'z'},        # True, return
                      {'a':3, 'z': 'z'},        # True, return
@@ -42,44 +54,60 @@ def oo_collect(data, attribute=None, filters={}):
             attribute = 'a'
             filters   = {'z': 'z'}
             returns [1, 2, 3]
-  '''
+    '''
 
-  if not issubclass(type(data), list):
-    raise errors.AnsibleFilterError("|failed expects to filter on a List")
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects to filter on a List")
 
-  if not attribute:
-    raise errors.AnsibleFilterError("|failed expects attribute to be set")
+    if not attribute:
+        raise errors.AnsibleFilterError("|failed expects attribute to be set")
 
-  if filters:
-    retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
-  else:
-    retval = [get_attr(d, attribute) for d in data]
+    if filters:
+        retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
+    else:
+        retval = [get_attr(d, attribute) for d in data]
 
-  return retval
+    return retval
 
 def oo_select_keys(data, keys):
-  ''' This returns a list, which contains the value portions for the keys
+    ''' This returns a list, which contains the value portions for the keys
         Ex: data = { 'a':1, 'b':2, 'c':3 }
             keys = ['a', 'c']
             returns [1, 3]
-  '''
+    '''
+
+    if not issubclass(type(data), dict):
+        raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
 
-  if not issubclass(type(data), dict):
-    raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
+    if not issubclass(type(keys), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
 
-  if not issubclass(type(keys), list):
-    raise errors.AnsibleFilterError("|failed expects first param is a list")
+    # Gather up the values for the list of keys passed in
+    retval = [data[key] for key in keys]
 
-  # Gather up the values for the list of keys passed in
-  retval = [data[key] for key in keys]
+    return retval
 
-  return retval
+def oo_prepend_strings_in_list(data, prepend):
+    ''' This takes a list of strings and prepends a string to each item in the
+        list
+        Ex: data = ['cart', 'tree']
+            prepend = 'apple-'
+            returns ['apple-cart', 'apple-tree']
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
+    if not all(isinstance(x, basestring) for x in data):
+        raise errors.AnsibleFilterError("|failed expects first param is a list of strings")
+    retval = [prepend + s for s in data]
+    return retval
 
 class FilterModule (object):
-  def filters(self):
-    return {
-      "oo_select_keys": oo_select_keys,
-      "oo_collect": oo_collect,
-      "oo_len": oo_len,
-      "oo_pdb": oo_pdb
-    }
+    def filters(self):
+        return {
+                "oo_select_keys": oo_select_keys,
+                "oo_collect": oo_collect,
+                "oo_flatten": oo_flatten,
+                "oo_len": oo_len,
+                "oo_pdb": oo_pdb,
+                "oo_prepend_strings_in_list": oo_prepend_strings_in_list
+                }

+ 2 - 0
inventory/aws/group_vars/all

@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root

+ 28 - 0
inventory/byo/group_vars/all

@@ -0,0 +1,28 @@
+---
+# lets assume that we want to use the root as the ssh user for all hosts
+ansible_ssh_user: root
+
+# default debug level for all OpenShift hosts
+openshift_debug_level: 4
+
+# set the OpenShift deployment type for all hosts
+openshift_deployment_type: enterprise
+
+# Override the default registry for development
+openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Use latest Errata puddle as an additional repo:
+#openshift_additional_repos:
+#- id: ose-devel
+#  name: ose-devel
+#  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+#  enabled: 1
+#  gpgcheck: 0
+
+# Use latest Whitelist puddle as an additional repo:
+openshift_additional_repos:
+- id: ose-devel
+  name: ose-devel
+  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
+  enabled: 1
+  gpgcheck: 0

+ 10 - 0
inventory/byo/hosts

@@ -0,0 +1,10 @@
+# This is an example of a bring your own (byo) host inventory
+
+# host group for masters
+[masters]
+ose3-master-ansible.test.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2]-ansible.test.example.com
+

+ 2 - 0
inventory/gce/group_vars/all

@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root

+ 2 - 0
inventory/libvirt/group_vars/all

@@ -0,0 +1,2 @@
+---
+ansible_ssh_user: root

+ 2 - 0
inventory/libvirt/hosts

@@ -0,0 +1,2 @@
+# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
+localhost ansible_python_interpreter=/usr/bin/python2

+ 15 - 2
inventory/multi_ec2.py

@@ -12,6 +12,8 @@ import json
 import pprint
 
 
+CONFIG_FILE_NAME = 'multi_ec2.yaml'
+
 class MultiEc2(object):
 
     def __init__(self):
@@ -20,11 +22,22 @@ class MultiEc2(object):
         self.result = {}
         self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-        self.config_file = os.path.join(self.file_path,"multi_ec2.yaml")
+
+        same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
+        etc_dir_config_file = os.path.join(os.path.sep, 'etc','ansible', CONFIG_FILE_NAME)
+
+        # Prefer a file in the same directory, fall back to a file in etc
+        if os.path.isfile(same_dir_config_file):
+            self.config_file = same_dir_config_file
+        elif os.path.isfile(etc_dir_config_file):
+            self.config_file = etc_dir_config_file
+        else:
+            self.config_file = None # expect env vars
+
         self.parse_cli_args()
 
         # load yaml
-        if os.path.isfile(self.config_file):
+        if self.config_file and os.path.isfile(self.config_file):
             self.config = self.load_yaml_config()
         elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
             self.config = {}

+ 50 - 0
inventory/openshift-ansible-inventory.spec

@@ -0,0 +1,50 @@
+Summary:       OpenShift Ansible Inventories
+Name:          openshift-ansible-inventory
+Version:       0.0.2
+Release:       1%{?dist}
+License:       ASL 2.0
+URL:           https://github.com/openshift/openshift-ansible
+Source0:       %{name}-%{version}.tar.gz
+Requires:      python2
+BuildRequires: python2-devel
+BuildArch:     noarch
+
+%description
+Ansible Inventories used with the openshift-ansible scripts and playbooks.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}/etc/ansible
+mkdir -p %{buildroot}/usr/share/ansible/inventory
+mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
+mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
+
+cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
+cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
+cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce
+
+%files
+%config(noreplace) /etc/ansible/*
+%dir /usr/share/ansible/inventory
+/usr/share/ansible/inventory/multi_ec2.py*
+/usr/share/ansible/inventory/aws/ec2.py*
+%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini
+/usr/share/ansible/inventory/gce/gce.py*
+
+%changelog
+* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added the ability to have a config file in /etc/openshift_ansible to
+  multi_ec2.py. (twiest@redhat.com)
+- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
+- gce inventory/playbook updates for node registration changes
+  (jdetiber@redhat.com)
+- Various fixes (jdetiber@redhat.com)
+
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+

+ 1 - 0
playbooks/adhoc/noc/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 41 - 0
playbooks/adhoc/noc/get_zabbix_problems.yml

@@ -0,0 +1,41 @@
+---
+- name: 'Get current hosts who have triggers that are alerting by trigger description'
+  hosts: localhost
+  gather_facts: no
+  roles:
+    - os_zabbix
+  post_tasks:
+    - assert:
+        that: oo_desc is defined
+
+    - zbxapi:
+        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+        zbx_class: Trigger
+        action: get
+        params:
+          only_true: true
+          output: extend
+          selectHosts: extend
+          searchWildCardsEnabled: 1
+          search:
+            description: "{{ oo_desc }}"
+      register: problems
+
+    - debug: var=problems
+
+    - set_fact:
+        problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
+
+    - debug: var=problem_hosts
+
+    - add_host:
+        name: "{{ item }}"
+        groups: problem_hosts_group
+      with_items: problem_hosts
+
+- name: "Run on problem hosts"
+  hosts: problem_hosts_group
+  gather_facts: no
+  tasks:
+    - command: "{{ oo_cmd }}"
+      when: oo_cmd is defined

+ 1 - 0
playbooks/adhoc/noc/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 1
playbooks/aws/ansible-tower/launch.yml

@@ -6,7 +6,7 @@
 
   vars:
     inst_region: us-east-1
-    rhel7_ami: ami-a24e30ca
+    rhel7_ami: ami-906240f8
     user_data_file: user_data.txt
 
   vars_files:

+ 1 - 0
playbooks/aws/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 62 - 0
playbooks/aws/openshift-cluster/launch.yml

@@ -0,0 +1,62 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+      - vars.yml
+  tasks:
+    - set_fact: k8s_type="master"
+
+    - name: Generate master instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: master_names_output
+      with_sequence: start=1 end={{ num_masters }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        master_names: "{{ master_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ master_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+
+    - set_fact: k8s_type="node"
+
+    - name: Generate node instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: node_names_output
+      with_sequence: start=1 end={{ num_nodes }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        node_names: "{{ node_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ node_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+
+- hosts: "tag_env_{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+
+- include: list.yml

+ 63 - 0
playbooks/aws/openshift-cluster/launch_instances.yml

@@ -0,0 +1,63 @@
+---
+- set_fact:
+    machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}"
+    machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}"
+    machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}"
+    machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
+    created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+    security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}"
+    env: "{{ cluster }}"
+    host_type: "{{ type }}"
+    env_host_type: "{{ cluster }}-openshift-{{ type }}"
+
+- name: Launch instance(s)
+  ec2:
+    state: present
+    region: "{{ machine_region }}"
+    keypair: "{{ machine_keypair }}"
+    group: "{{ security_group }}"
+    instance_type: "{{ machine_type }}"
+    image: "{{ machine_image }}"
+    count: "{{ instances | oo_len }}"
+    wait: yes
+    instance_tags:
+      created-by: "{{ created_by }}"
+      env: "{{ env }}"
+      host-type: "{{ host_type }}"
+      env-host-type: "{{ env_host_type }}"
+  register: ec2
+
+- name: Add Name tag to instances
+  ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
+  with_together:
+  - instances
+  - ec2.instances
+  args:
+    tags:
+      Name: "{{ item.0 }}"
+
+- set_fact:
+    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+  add_host:
+    hostname: "{{ item.0 }}"
+    ansible_ssh_host: "{{ item.1.dns_name }}"
+    groups: "{{ instance_groups }}"
+    ec2_private_ip_address: "{{ item.1.private_ip }}"
+    ec2_ip_address: "{{ item.1.public_ip }}"
+  with_together:
+  - instances
+  - ec2.instances
+
+- name: Wait for ssh
+  wait_for: "port=22 host={{ item.dns_name }}"
+  with_items: ec2.instances
+
+- name: Wait for root user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 10
+  with_items: ec2.instances

+ 17 - 0
playbooks/aws/openshift-cluster/list.yml

@@ -0,0 +1,17 @@
+---
+- name: Generate oo_list_hosts group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - set_fact: scratch_group=tag_env_{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: scratch_group is not defined
+  - add_host: name={{ item }} groups=oo_list_hosts
+    with_items: groups[scratch_group] | difference(['localhost'])
+
+- name: List Hosts
+  hosts: oo_list_hosts
+  gather_facts: no
+  tasks:
+  - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}"

+ 1 - 0
playbooks/aws/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 14 - 0
playbooks/aws/openshift-cluster/terminate.yml

@@ -0,0 +1,14 @@
+---
+- name: Terminate instance(s)
+  hosts: localhost
+
+  vars_files:
+    - vars.yml
+
+- include: ../openshift-node/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]'
+
+- include: ../openshift-master/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]'

+ 13 - 0
playbooks/aws/openshift-cluster/update.yml

@@ -0,0 +1,13 @@
+---
+- hosts: "tag_env_{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"

+ 1 - 0
playbooks/aws/openshift-cluster/vars.yml

@@ -0,0 +1 @@
+---

+ 12 - 30
playbooks/aws/openshift-master/config.yml

@@ -1,42 +1,24 @@
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_masters_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
   - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_masters_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
-- name: "Gather facts for nodes in {{ oo_env }}"
-  hosts: "tag_env-host-type_{{ oo_env }}-openshift-node"
-  connection: ssh
-  user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_node_ips fact on localhost
-      set_fact:
-        openshift_node_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
+- name: Configure instances
+  hosts: oo_masters_to_config
+  vars:
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"
+    # TODO: this should be removed once openshift-sdn packages are available
+    openshift_use_openshift_sdn: False
   vars_files:
-    - vars.yml
+  - vars.yml
   roles:
-    - repos
-    - {
-        role: openshift_master,
-        openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
-        openshift_env: "{{ oo_env }}"
-        openshift_public_ip: "{{ ec2_ip_address }}"
-      }
+    - openshift_master
+    #- openshift_sdn_master
     - pods
     - os_env_extras

+ 7 - 4
playbooks/aws/openshift-master/launch.yml

@@ -45,14 +45,17 @@
       args:
         tags: "{{ oo_new_inst_tags }}"
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_masters_to_config
+      add_host:
+        hostname: "{{ item.0 }}"
+        ansible_ssh_host: "{{ item.1.dns_name }}"
+        groupname: oo_masters_to_config
+        ec2_private_ip_address: "{{ item.1.private_ip }}"
+        ec2_ip_address: "{{ item.1.public_ip }}"
       with_together:
         - oo_new_inst_names
         - ec2.instances
 
-    - debug: var=ec2
-
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.dns_name }}"
       with_items: ec2.instances

+ 52 - 0
playbooks/aws/openshift-master/terminate.yml

@@ -0,0 +1,52 @@
+---
+- name: Populate oo_masters_to_terminate host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Evaluate oo_host_group_exp if it's set
+      add_host: "name={{ item }} groups=oo_masters_to_terminate"
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+- name: Gather facts for instances to terminate
+  hosts: oo_masters_to_terminate
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+        | oo_select_keys(groups['oo_masters_to_terminate']) }}"
+  tasks:
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+      when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: item.failed
+      with_items: ec2_term.results
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+

+ 1 - 0
playbooks/aws/openshift-master/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 94 - 36
playbooks/aws/openshift-node/config.yml

@@ -1,49 +1,107 @@
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_nodes_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
   - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_nodes_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
+  - add_host:
+      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+    when: oo_host_group_exp is defined
 
-- name: "Gather facts for masters in {{ oo_env }}"
-  hosts: "tag_env-host-type_{{ oo_env }}-openshift-master"
-  connection: ssh
-  user: root
 
-- name: "Set OO sepcific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
+- name: Gather and set facts for hosts to configure
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+    - role: common
+      local_facts:
+        hostname: "{{ ec2_private_ip_address }}"
+        public_hostname: "{{ ec2_ip_address }}"
+        # TODO: this should be removed once openshift-sdn packages are available
+        use_openshift_sdn: False
+    - role: node
+      local_facts:
+        external_id: "{{ openshift_node_external_id | default(None) }}"
+        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+        labels: "{{ openshfit_node_labels | default(None) }}"
+        annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+  hosts: oo_first_master
+  vars:
+    openshift_nodes: "{{ hostvars
+          | oo_select_keys(groups['oo_nodes_to_config']) }}"
+  roles:
+  - openshift_register_nodes
   tasks:
-    - name: Setting openshift_master_ips fact on localhost
-      set_fact:
-        openshift_master_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined
-    - name: Setting openshift_master_public_ips fact on localhost
-      set_fact:
-        openshift_master_public_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ec2_ip_address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
+  - name: Create local temp directory for syncing certs
+    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
+
+  - name: Sync master certs to localhost
+    synchronize:
+      mode: pull
+      checksum: yes
+      src: /var/lib/openshift/openshift.local.certificates
+      dest: "{{ mktemp.stdout }}"
+
+
+- name: Configure instances
+  hosts: oo_nodes_to_config
   vars_files:
-    - vars.yml
+  - vars.yml
+  vars:
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"
+    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+    cert_parent_rel_path: openshift.local.certificates
+    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+    cert_base_path: /var/lib/openshift
+    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+  pre_tasks:
+  - name: Ensure certificate directories exists
+    file:
+      path: "{{ item }}"
+      state: directory
+    with_items:
+    - "{{ cert_path }}"
+    - "{{ cert_parent_path }}/ca"
+
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  - name: Sync certs to nodes
+    synchronize:
+      checksum: yes
+      src: "{{ item.src }}"
+      dest: "{{ item.dest }}"
+      owner: no
+      group: no
+    with_items:
+    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+      dest: "{{ cert_parent_path }}"
+    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+      dest: "{{ cert_parent_path }}/ca/cert.crt"
+  - local_action: file name={{ sync_tmpdir }} state=absent
+    run_once: true
   roles:
-    - repos
-    - docker
-    - {
-        role: openshift_node,
-        openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
-        openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
-        openshift_env: "{{ oo_env }}"
-        openshift_public_ip: "{{ ec2_ip_address }}"
-      }
+    - openshift_node
+    #- openshift_sdn_node
     - os_env_extras
+    - os_env_extras_node

+ 10 - 5
playbooks/aws/openshift-node/launch.yml

@@ -27,7 +27,9 @@
       register: ec2
 
     - name: Add new instances public IPs to the atomic proxy host group
-      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+      add_host:
+        hostname: "{{ item.public_ip }}"
+        groupname: new_ec2_instances"
       with_items: ec2.instances
 
     - name: Add Name and environment tags to instances
@@ -45,14 +47,17 @@
       args:
         tags: "{{ oo_new_inst_tags }}"
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_nodes_to_config
+      add_host:
+        hostname: "{{ item.0 }}"
+        ansible_ssh_host: "{{ item.1.dns_name }}"
+        groupname: oo_nodes_to_config
+        ec2_private_ip_address: "{{ item.1.private_ip }}"
+        ec2_ip_address: "{{ item.1.public_ip }}"
       with_together:
         - oo_new_inst_names
         - ec2.instances
 
-    - debug: var=ec2
-
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.dns_name }}"
       with_items: ec2.instances

+ 52 - 0
playbooks/aws/openshift-node/terminate.yml

@@ -0,0 +1,52 @@
+---
+- name: Populate oo_nodes_to_terminate host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Evaluate oo_host_group_exp if it's set
+      add_host: "name={{ item }} groups=oo_nodes_to_terminate"
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+- name: Gather facts for instances to terminate
+  hosts: oo_nodes_to_terminate
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+        | oo_select_keys(groups['oo_nodes_to_terminate']) }}"
+  tasks:
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+      when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: item.failed
+      with_items: ec2_term.results
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+

+ 1 - 0
playbooks/aws/openshift-node/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 6 - 0
playbooks/byo/config.yml

@@ -0,0 +1,6 @@
+---
+- name: Run the openshift-master config playbook
+  include: openshift-master/config.yml
+
+- name: Run the openshift-node config playbook
+  include: openshift-node/config.yml

+ 1 - 0
playbooks/byo/filter_plugins

@@ -0,0 +1 @@
+../../filter_plugins

+ 9 - 0
playbooks/byo/openshift-master/config.yml

@@ -0,0 +1,9 @@
+---
+- name: Gather facts for node hosts
+  hosts: nodes
+
+- name: Configure master instances
+  hosts: masters
+  roles:
+  - openshift_master
+  - openshift_sdn_master

+ 1 - 0
playbooks/byo/openshift-master/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/byo/openshift-master/roles

@@ -0,0 +1 @@
+../../../roles

+ 79 - 0
playbooks/byo/openshift-node/config.yml

@@ -0,0 +1,79 @@
+---
+- name: Gather facts for node hosts
+  hosts: nodes
+  roles:
+  - openshift_facts
+  tasks:
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: 'node'
+      local_facts:
+        hostname: "{{ openshift_hostname | default(None) }}"
+        external_id: "{{ openshift_node_external_id | default(None) }}"
+        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+        labels: "{{ openshfit_node_labels | default(None) }}"
+        annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+  hosts: masters[0]
+  vars:
+    openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}"
+  roles:
+  - openshift_register_nodes
+  tasks:
+  - name: Create local temp directory for syncing certs
+    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
+
+  - name: Sync master certs to localhost
+    synchronize:
+      mode: pull
+      checksum: yes
+      src: /var/lib/openshift/openshift.local.certificates
+      dest: "{{ mktemp.stdout }}"
+
+
+- name: Configure node instances
+  hosts: nodes
+  vars:
+    sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}"
+    cert_parent_rel_path: openshift.local.certificates
+    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+    cert_base_path: /var/lib/openshift
+    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+    openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001
+  pre_tasks:
+  - name: Ensure certificate directories exists
+    file:
+      path: "{{ item }}"
+      state: directory
+    with_items:
+    - "{{ cert_path }}"
+    - "{{ cert_parent_path }}/ca"
+
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  - name: Sync certs to nodes
+    synchronize:
+      checksum: yes
+      src: "{{ item.src }}"
+      dest: "{{ item.dest }}"
+      owner: no
+      group: no
+    with_items:
+    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+      dest: "{{ cert_parent_path }}"
+    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+      dest: "{{ cert_parent_path }}/ca/cert.crt"
+  - local_action: file name={{ sync_tmpdir }} state=absent
+    run_once: true
+  roles:
+  - openshift_node
+  - openshift_sdn_node

+ 1 - 0
playbooks/byo/openshift-node/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/byo/openshift-node/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 0
playbooks/byo/roles

@@ -0,0 +1 @@
+../../roles

+ 1 - 0
playbooks/gce/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 62 - 0
playbooks/gce/openshift-cluster/launch.yml

@@ -0,0 +1,62 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+      - vars.yml
+  tasks:
+    - set_fact: k8s_type="master"
+
+    - name: Generate master instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: master_names_output
+      with_sequence: start=1 end={{ num_masters }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        master_names: "{{ master_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ master_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+
+    - set_fact: k8s_type="node"
+
+    - name: Generate node instance names(s)
+      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
+      register: node_names_output
+      with_sequence: start=1 end={{ num_nodes }}
+
+    # These set_fact's cannot be combined
+    - set_fact:
+        node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
+
+    - set_fact:
+        node_names: "{{ node_names_string.strip().split(' ') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: "{{ node_names }}"
+        cluster: "{{ cluster_id }}"
+        type: "{{ k8s_type }}"
+
+- hosts: "tag_env-{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+
+- include: list.yml

+ 44 - 0
playbooks/gce/openshift-cluster/launch_instances.yml

@@ -0,0 +1,44 @@
+---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
+- set_fact:
+    machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
+    machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
+
+- name: Launch instance(s)
+  gce:
+    instance_names: "{{ instances }}"
+    machine_type: "{{ machine_type }}"
+    image: "{{ machine_image }}"
+    service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    project_id: "{{ lookup('env', 'gce_project_id') }}"
+    tags:
+      - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
+      - "env-{{ cluster }}"
+      - "host-type-{{ type }}"
+      - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+  register: gce
+
+- name: Add new instances to groups and set variables needed
+  add_host:
+    hostname: "{{ item.name }}"
+    ansible_ssh_host: "{{ item.public_ip }}"
+    groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
+    gce_public_ip: "{{ item.public_ip }}"
+    gce_private_ip: "{{ item.private_ip }}"
+  with_items: gce.instance_data
+
+- name: Wait for ssh
+  wait_for: "port=22 host={{ item.public_ip }}"
+  with_items: gce.instance_data
+
+- name: Wait for root user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 10
+  with_items: gce.instance_data

+ 17 - 0
playbooks/gce/openshift-cluster/list.yml

@@ -0,0 +1,17 @@
+---
+- name: Generate oo_list_hosts group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: scratch_group is not defined
+  - add_host: name={{ item }} groups=oo_list_hosts
+    with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated)
+
+- name: List Hosts
+  hosts: oo_list_hosts
+  gather_facts: no
+  tasks:
+  - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}"

+ 1 - 0
playbooks/gce/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 20 - 0
playbooks/gce/openshift-cluster/terminate.yml

@@ -0,0 +1,20 @@
+---
+- name: Terminate instance(s)
+  hosts: localhost
+
+  vars_files:
+    - vars.yml
+
+- include: ../openshift-node/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+
+- include: ../openshift-master/terminate.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"

+ 13 - 0
playbooks/gce/openshift-cluster/update.yml

@@ -0,0 +1,13 @@
+---
+- hosts: "tag_env-{{ cluster_id }}"
+  roles:
+  - openshift_repos
+  - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"

+ 1 - 0
playbooks/gce/openshift-cluster/vars.yml

@@ -0,0 +1 @@
+---

+ 7 - 29
playbooks/gce/openshift-master/config.yml

@@ -1,42 +1,20 @@
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: master/config.yml, populate oo_masters_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
   - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_masters_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
 
-- name: "Gather facts for nodes in {{ oo_env }}"
-  hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
-  connection: ssh
-  user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_node_ips fact on localhost
-      set_fact:
-        openshift_node_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
-
 - name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
+  hosts: oo_masters_to_config
+  vars:
+    openshift_hostname: "{{ gce_private_ip }}"
   vars_files:
-    - vars.yml
+  - vars.yml
   roles:
-    - repos
-    - {
-        role: openshift_master,
-        openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
-        openshift_public_ip: "{{ gce_public_ip }}",
-        openshift_env: "{{ oo_env }}",
-      }
+    - openshift_master
     - pods
     - os_env_extras

+ 10 - 4
playbooks/gce/openshift-master/launch.yml

@@ -1,4 +1,8 @@
 ---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
 - name: Launch instance(s)
   hosts: localhost
   connection: local
@@ -24,16 +28,18 @@
         tags: "{{ oo_new_inst_tags }}"
       register: gce
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_masters_to_config
+      add_host:
+        hostname: "{{ item.name }}"
+        ansible_ssh_host: "{{ item.public_ip }}"
+        groupname: oo_masters_to_config
+        gce_private_ip: "{{ item.private_ip }}"
       with_items: gce.instance_data
 
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.public_ip }}"
       with_items: gce.instance_data
 
-    - debug: var=gce
-
     - name: Wait for root user setup
       command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
       register: result

+ 7 - 12
playbooks/gce/openshift-master/terminate.yml

@@ -1,20 +1,17 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_masters_to_terminate host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
-    - debug: var=oo_host_group_exp
-
     - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+      add_host: "name={{ item }} groups=oo_masters_to_terminate"
       with_items: "{{ oo_host_group_exp | default('') }}"
       when: oo_host_group_exp is defined
 
-    - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
-- name: Terminate instances
+- name: Terminate master instances
   hosts: localhost
   connection: local
+  gather_facts: no
   tasks:
     - name: Terminate master instances
       gce:
@@ -22,12 +19,10 @@
         pem_file: "{{ gce_pem_file }}"
         project_id: "{{ gce_project_id }}"
         state: 'absent'
-        instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
-        disks: "{{ groups['oo_hosts_to_terminate'] }}"
+        instance_names: "{{ groups['oo_masters_to_terminate'] }}"
+        disks: "{{ groups['oo_masters_to_terminate'] }}"
       register: gce
 
-    - debug: var=gce
-
     - name: Remove disks of instances
       gce_pd:
         service_account_email: "{{ gce_service_account_email }}"

+ 1 - 0
playbooks/gce/openshift-master/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 86 - 35
playbooks/gce/openshift-node/config.yml

@@ -1,49 +1,100 @@
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: node/config.yml, populate oo_nodes_to_config host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
   - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
+    add_host: "name={{ item }} groups=oo_nodes_to_config"
     with_items: "{{ oo_host_group_exp | default('') }}"
     when: oo_host_group_exp is defined
+  - add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+    when: oo_host_group_exp is defined
 
-- name: "Gather facts for masters in {{ oo_env }}"
-  hosts: "tag_env-host-type-{{ oo_env }}-openshift-master"
-  connection: ssh
-  user: root
 
-- name: "Set OO sepcific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
+- name: Gather and set facts for hosts to configure
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+    - role: common
+      local_facts:
+        hostname: "{{ gce_private_ip }}"
+    - role: node
+      local_facts:
+        external_id: "{{ openshift_node_external_id | default(None) }}"
+        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+        labels: "{{ openshfit_node_labels | default(None) }}"
+        annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+  hosts: oo_first_master
+  vars:
+    openshift_nodes: "{{ hostvars
+          | oo_select_keys(groups['oo_nodes_to_config']) }}"
+  roles:
+  - openshift_register_nodes
   tasks:
-    - name: Setting openshift_master_ips fact on localhost
-      set_fact:
-        openshift_master_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-    - name: Setting openshift_master_public_ips fact on localhost
-      set_fact:
-        openshift_master_public_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='gce_public_ip') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
+  - name: Create local temp directory for syncing certs
+    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
 
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
+  - name: Sync master certs to localhost
+    synchronize:
+      mode: pull
+      checksum: yes
+      src: /var/lib/openshift/openshift.local.certificates
+      dest: "{{ mktemp.stdout }}"
+
+- name: Configure instances
+  hosts: oo_nodes_to_config
   vars_files:
-    - vars.yml
+  - vars.yml
+  vars:
+    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+    cert_parent_rel_path: openshift.local.certificates
+    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+    cert_base_path: /var/lib/openshift
+    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+  pre_tasks:
+  - name: Ensure certificate directories exists
+    file:
+      path: "{{ item }}"
+      state: directory
+    with_items:
+    - "{{ cert_path }}"
+    - "{{ cert_parent_path }}/ca"
+
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  - name: Sync certs to nodes
+    synchronize:
+      checksum: yes
+      src: "{{ item.src }}"
+      dest: "{{ item.dest }}"
+      owner: no
+      group: no
+    with_items:
+    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+      dest: "{{ cert_parent_path }}"
+    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+      dest: "{{ cert_parent_path }}/ca/cert.crt"
+  - local_action: file name={{ sync_tmpdir }} state=absent
+    run_once: true
   roles:
-    - repos
-    - docker
-    - {
-        role: openshift_node,
-        openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
-        openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
-        openshift_public_ip: "{{ gce_public_ip }}",
-        openshift_env: "{{ oo_env }}",
-      }
+    - openshift_node
     - os_env_extras
+    - os_env_extras_node

+ 10 - 14
playbooks/gce/openshift-node/launch.yml

@@ -1,4 +1,8 @@
 ---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
 - name: Launch instance(s)
   hosts: localhost
   connection: local
@@ -24,16 +28,18 @@
         tags: "{{ oo_new_inst_tags }}"
       register: gce
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_nodes_to_config
+      add_host:
+        hostname: "{{ item.name }}"
+        ansible_ssh_host: "{{ item.public_ip }}"
+        groupname: oo_nodes_to_config
+        gce_private_ip: "{{ item.private_ip }}"
       with_items: gce.instance_data
 
     - name: Wait for ssh
       wait_for: "port=22 host={{ item.public_ip }}"
       with_items: gce.instance_data
 
-    - debug: var=gce
-
     - name: Wait for root user setup
       command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
       register: result
@@ -45,13 +51,3 @@
 
 # Apply the configs, separate so that just the configs can be run by themselves
 - include: config.yml
-
-# Always bounce service to pick up new credentials
-#- name: "Restart instances"
-#  hosts: oo_hosts_to_config
-#  connection: ssh
-#  user: root
-#  tasks:
-#    - debug: var=groups.oo_hosts_to_config
-#    - name: Restart OpenShift
-#      service: name=openshift-node enabled=yes state=restarted

+ 7 - 12
playbooks/gce/openshift-node/terminate.yml

@@ -1,20 +1,17 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_nodes_to_terminate host group if needed
   hosts: localhost
   gather_facts: no
   tasks:
-    - debug: var=oo_host_group_exp
-
     - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_hosts_to_terminate"
+      add_host: "name={{ item }} groups=oo_nodes_to_terminate"
       with_items: "{{ oo_host_group_exp | default('') }}"
       when: oo_host_group_exp is defined
 
-    - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
-- name: Terminate instances
+- name: Terminate node instances
   hosts: localhost
   connection: local
+  gather_facts: no
   tasks:
     - name: Terminate node instances
       gce:
@@ -22,12 +19,10 @@
         pem_file: "{{ gce_pem_file }}"
         project_id: "{{ gce_project_id }}"
         state: 'absent'
-        instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
-        disks: "{{ groups['oo_hosts_to_terminate'] }}"
+        instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
+        disks: "{{ groups['oo_nodes_to_terminate'] }}"
       register: gce
 
-    - debug: var=gce
-
     - name: Remove disks of instances
       gce_pd:
         service_account_email: "{{ gce_service_account_email }}"

+ 1 - 0
playbooks/gce/openshift-node/vars.yml

@@ -1,2 +1,3 @@
 ---
 openshift_debug_level: 4
+openshift_cluster_id: "{{ cluster_id }}"

+ 1 - 0
playbooks/libvirt/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 65 - 0
playbooks/libvirt/openshift-cluster/launch.yml

@@ -0,0 +1,65 @@
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
+    libvirt_storage_pool: 'openshift'
+    libvirt_uri: 'qemu:///system'
+
+  vars_files:
+    - vars.yml
+
+  tasks:
+    - set_fact:
+        k8s_type: master
+
+    - name: Generate master instance name(s)
+      set_fact:
+        scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
+      register: master_names_output
+      with_sequence: start=1 end='{{ num_masters }}'
+
+    - set_fact:
+        master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: '{{ master_names }}'
+        cluster: '{{ cluster_id }}'
+        type: '{{ k8s_type }}'
+        group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+
+    - set_fact:
+        k8s_type: node
+
+    - name: Generate node instance name(s)
+      set_fact:
+        scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
+      register: node_names_output
+      with_sequence: start=1 end='{{ num_nodes }}'
+
+    - set_fact:
+        node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+
+    - include: launch_instances.yml
+      vars:
+        instances: '{{ node_names }}'
+        cluster: '{{ cluster_id }}'
+        type: '{{ k8s_type }}'
+
+- hosts: 'tag_env-{{ cluster_id }}'
+  roles:
+    - openshift_repos
+    - os_update_latest
+
+- include: ../openshift-master/config.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
+    oo_env: '{{ cluster_id }}'
+
+- include: ../openshift-node/config.yml
+  vars:
+    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
+    oo_env: '{{ cluster_id }}'

+ 102 - 0
playbooks/libvirt/openshift-cluster/launch_instances.yml

@@ -0,0 +1,102 @@
+- name: Create the libvirt storage directory for openshift
+  file:
+    dest: '{{ libvirt_storage_pool_path }}'
+    state: directory
+
+- name: Download Base Cloud image
+  get_url:
+    url: '{{ base_image_url }}'
+    sha256sum: '{{ base_image_sha256 }}'
+    dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
+
+- name: Create the cloud-init config drive path
+  file:
+    dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+    state: directory
+  with_items: '{{ instances }}'
+
+- name: Create the cloud-init config drive files
+  template:
+    src: '{{ item[1] }}'
+    dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
+  with_nested:
+    - '{{ instances }}'
+    - [ user-data, meta-data ]
+
+- name: Create the cloud-init config drive
+  command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+  args:
+    chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+    creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+  with_items: '{{ instances }}'
+
+- name: Create the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+  ignore_errors: yes
+
+- name: Refresh the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
+
+- name: Create VMs drives
+  command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
+  with_items: '{{ instances }}'
+
+- name: Create VMs
+  virt:
+    name: '{{ item }}'
+    command: define
+    xml: "{{ lookup('template', '../templates/domain.xml') }}"
+    uri: '{{ libvirt_uri }}'
+  with_items: '{{ instances }}'
+
+- name: Start VMs
+  virt:
+    name: '{{ item }}'
+    state: running
+    uri: '{{ libvirt_uri }}'
+  with_items: '{{ instances }}'
+
+- name: Collect MAC addresses of the VMs
+  shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+  register: scratch_mac
+  with_items: '{{ instances }}'
+
+- name: Wait for the VMs to get an IP
+  command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
+  ignore_errors: yes
+  register: nb_allocated_ips
+  until: nb_allocated_ips.stdout == '{{ instances | length }}'
+  retries: 30
+  delay: 1
+
+- name: Collect IP addresses of the VMs
+  shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+  register: scratch_ip
+  with_items: '{{ scratch_mac.results }}'
+
+- set_fact:
+    ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+
+- name: Add new instances
+  add_host:
+    hostname: '{{ item.0 }}'
+    ansible_ssh_host: '{{ item.1 }}'
+    ansible_ssh_user: root
+    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+  with_together:
+    - instances
+    - ips
+
+- name: Wait for ssh
+  wait_for:
+    host: '{{ item }}'
+    port: 22
+  with_items: ips
+
+- name: Wait for root user setup
+  command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
+  register: result
+  until: result.rc == 0
+  retries: 30
+  delay: 1
+  with_items: ips

+ 43 - 0
playbooks/libvirt/openshift-cluster/list.yml

@@ -0,0 +1,43 @@
+- name: Generate oo_list_hosts group
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    libvirt_uri: 'qemu:///system'
+
+  tasks:
+    - name: List VMs
+      virt:
+        command: list_vms
+      register: list_vms
+
+    - name: Collect MAC addresses of the VMs
+      shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
+      register: scratch_mac
+      with_items: '{{ list_vms.list_vms }}'
+      when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+    - name: Collect IP addresses of the VMs
+      shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
+      register: scratch_ip
+      with_items: '{{ scratch_mac.results }}'
+      when: item.skipped is not defined
+
+    - name: Add hosts
+      add_host:
+        hostname: '{{ item[0] }}'
+        ansible_ssh_host: '{{ item[1].stdout }}'
+        ansible_ssh_user: root
+        groups: oo_list_hosts
+      with_together:
+        - '{{ list_vms.list_vms }}'
+        - '{{ scratch_ip.results }}'
+      when: item[1].skipped is not defined
+
+- name: List Hosts
+  hosts: oo_list_hosts
+
+  tasks:
+    - debug:
+        msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'

+ 1 - 0
playbooks/libvirt/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 41 - 0
playbooks/libvirt/openshift-cluster/terminate.yml

@@ -0,0 +1,41 @@
+- name: Terminate instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+
+  vars:
+    libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
+    libvirt_storage_pool: 'openshift'
+    libvirt_uri: 'qemu:///system'
+
+  tasks:
+    - name: List VMs
+      virt:
+        command: list_vms
+      register: list_vms
+
+    - name: Destroy VMs
+      virt:
+        name: '{{ item[0] }}'
+        command: '{{ item[1] }}'
+        uri: '{{ libvirt_uri }}'
+      with_nested:
+        - '{{ list_vms.list_vms }}'
+        - [ destroy, undefine ]
+      when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+    - name: Delete VMs config drive
+      file:
+        path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
+        state: absent
+      with_items: '{{ list_vms.list_vms }}'
+      when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+
+    - name: Delete VMs drives
+      command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
+      args:
+        removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
+      with_nested:
+        - '{{ list_vms.list_vms }}'
+        - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
+      when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'

+ 7 - 0
playbooks/libvirt/openshift-cluster/vars.yml

@@ -0,0 +1,7 @@
+# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+
+base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
+base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
+base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab

+ 21 - 0
playbooks/libvirt/openshift-master/config.yml

@@ -0,0 +1,21 @@
+- name: master/config.yml, populate oo_masters_to_config host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: "Evaluate oo_host_group_exp if it's set"
+      add_host:
+        name: '{{ item }}'
+        groups: oo_masters_to_config
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+- name: Configure instances
+  hosts: oo_masters_to_config
+  vars:
+    openshift_hostname: '{{ ansible_default_ipv4.address }}'
+  vars_files:
+    - vars.yml
+  roles:
+    - openshift_master
+    - pods
+    - os_env_extras

+ 1 - 0
playbooks/libvirt/openshift-master/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/libvirt/openshift-master/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 0
playbooks/libvirt/openshift-master/vars.yml

@@ -0,0 +1 @@
+openshift_debug_level: 4

+ 102 - 0
playbooks/libvirt/openshift-node/config.yml

@@ -0,0 +1,102 @@
+- name: node/config.yml, populate oo_nodes_to_config host group if needed
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: "Evaluate oo_host_group_exp if it's set"
+      add_host:
+        name: '{{ item }}'
+        groups: oo_nodes_to_config
+      with_items: "{{ oo_host_group_exp | default('') }}"
+      when: oo_host_group_exp is defined
+
+    - add_host:
+        name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+        groups: oo_first_master
+      when: oo_host_group_exp is defined
+
+
+- name: Gather and set facts for hosts to configure
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+    - role: common
+      local_facts:
+        hostname: "{{ ansible_default_ipv4.address }}"
+    - role: node
+      local_facts:
+        external_id: "{{ openshift_node_external_id | default(None) }}"
+        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
+        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
+        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
+        labels: "{{ openshfit_node_labels | default(None) }}"
+        annotations: "{{ openshfit_node_annotations | default(None) }}"
+
+
+- name: Register nodes
+  hosts: oo_first_master
+  vars:
+    openshift_nodes: "{{ hostvars
+          | oo_select_keys(groups['oo_nodes_to_config']) }}"
+  roles:
+  - openshift_register_nodes
+  tasks:
+  - name: Create local temp directory for syncing certs
+    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
+
+  - name: Sync master certs to localhost
+    synchronize:
+      mode: pull
+      checksum: yes
+      src: /var/lib/openshift/openshift.local.certificates
+      dest: "{{ mktemp.stdout }}"
+
+- name: Configure instances
+  hosts: oo_nodes_to_config
+  vars_files:
+  - vars.yml
+  vars:
+    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
+    cert_parent_rel_path: openshift.local.certificates
+    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
+    cert_base_path: /var/lib/openshift
+    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
+    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
+  pre_tasks:
+  - name: Ensure certificate directories exists
+    file:
+      path: "{{ item }}"
+      state: directory
+    with_items:
+    - "{{ cert_path }}"
+    - "{{ cert_parent_path }}/ca"
+
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  - name: Sync certs to nodes
+    synchronize:
+      checksum: yes
+      src: "{{ item.src }}"
+      dest: "{{ item.dest }}"
+      owner: no
+      group: no
+    with_items:
+    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
+      dest: "{{ cert_parent_path }}"
+    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
+      dest: "{{ cert_parent_path }}/ca/cert.crt"
+  - local_action: file name={{ sync_tmpdir }} state=absent
+    run_once: true
+  roles:
+    - openshift_node
+    - os_env_extras
+    - os_env_extras_node

+ 1 - 0
playbooks/libvirt/openshift-node/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/libvirt/openshift-node/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 0
playbooks/libvirt/openshift-node/vars.yml

@@ -0,0 +1 @@
+openshift_debug_level: 4

+ 62 - 0
playbooks/libvirt/templates/domain.xml

@@ -0,0 +1,62 @@
+<domain type='kvm' id='8'>
+  <name>{{ item }}</name>
+  <memory unit='GiB'>1</memory>
+  <currentMemory unit='GiB'>1</currentMemory>
+  <vcpu placement='static'>2</vcpu>
+  <os>
+    <type arch='x86_64' machine='pc'>hvm</type>
+    <boot dev='hd'/>
+  </os>
+  <features>
+    <acpi/>
+    <apic/>
+    <pae/>
+  </features>
+  <clock offset='utc'>
+    <timer name='rtc' tickpolicy='catchup'/>
+    <timer name='pit' tickpolicy='delay'/>
+    <timer name='hpet' present='no'/>
+  </clock>
+  <on_poweroff>destroy</on_poweroff>
+  <on_reboot>restart</on_reboot>
+  <on_crash>restart</on_crash>
+  <devices>
+    <emulator>/usr/bin/qemu-system-x86_64</emulator>
+    <disk type='file' device='disk'>
+      <driver name='qemu' type='qcow2'/>
+      <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+      <target dev='vda' bus='virtio'/>
+    </disk>
+    <disk type='file' device='cdrom'>
+      <driver name='qemu' type='raw'/>
+      <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+      <target dev='vdb' bus='virtio'/>
+      <readonly/>
+    </disk>
+    <controller type='usb' index='0' />
+    <interface type='network'>
+      <source network='default'/>
+      <model type='virtio'/>
+    </interface>
+    <serial type='pty'>
+      <target port='0'/>
+    </serial>
+    <console type='pty'>
+      <target type='serial' port='0'/>
+    </console>
+    <channel type='spicevmc'>
+      <target type='virtio' name='com.redhat.spice.0'/>
+    </channel>
+    <input type='tablet' bus='usb' />
+    <input type='mouse' bus='ps2'/>
+    <input type='keyboard' bus='ps2'/>
+    <graphics type='spice' autoport='yes' />
+    <video>
+      <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
+    </video>
+    <redirdev bus='usb' type='spicevmc'>
+    </redirdev>
+    <memballoon model='virtio'>
+    </memballoon>
+  </devices>
+</domain>

+ 2 - 0
playbooks/libvirt/templates/meta-data

@@ -0,0 +1,2 @@
+instance-id: {{ item[0] }}
+local-hostname: {{ item[0] }}

+ 10 - 0
playbooks/libvirt/templates/user-data

@@ -0,0 +1,10 @@
+#cloud-config
+
+disable_root: 0
+
+system_info:
+  default_user:
+    name: root
+
+ssh_authorized_keys:
+  - {{ lookup('file', '~/.ssh/id_rsa.pub') }}

+ 3 - 0
rel-eng/packages/.readme

@@ -0,0 +1,3 @@
+the rel-eng/packages directory contains metadata files
+named after their packages. Each file has the latest tagged
+version and the project's relative directory.

+ 1 - 0
rel-eng/packages/openshift-ansible-bin

@@ -0,0 +1 @@
+0.0.8-1 bin/

+ 1 - 0
rel-eng/packages/openshift-ansible-inventory

@@ -0,0 +1 @@
+0.0.2-1 inventory/

+ 5 - 0
rel-eng/tito.props

@@ -0,0 +1,5 @@
+[buildconfig]
+builder = tito.builder.Builder
+tagger = tito.tagger.VersionTagger
+changelog_do_not_remove_cherrypick = 0
+changelog_format = %s (%ae)

+ 7 - 0
roles/ansible_tower/tasks/main.yaml

@@ -9,6 +9,7 @@
   - ansible
   - telnet
   - ack
+  - python-ansible-tower-cli
 
 - name: download Tower setup
   get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
@@ -25,3 +26,9 @@
 - name: Open firewalld port for https
   firewalld: port=8080/tcp permanent=true state=enabled
 
+- name: Set (httpd_can_network_connect) flag on and keep it persistent across reboots
+  seboolean: name=httpd_can_network_connect state=yes persistent=yes
+
+- name: Set (httpd_can_network_connect_db) flag on and keep it persistent across reboots
+  seboolean: name=httpd_can_network_connect_db state=yes persistent=yes
+

+ 1 - 1
roles/docker/tasks/main.yml

@@ -11,5 +11,5 @@
 # From the origin rpm there exists instructions on how to
 # setup origin properly.  The following steps come from there
 - name: Change root to be in the Docker group
-  user: name=root groups=docker append=yes
+  user: name=root groups=dockerroot append=yes
 

+ 41 - 0
roles/openshift_ansible_inventory/README.md

@@ -0,0 +1,41 @@
+Openshift Ansible Inventory
+=========
+
+Install and configure openshift-ansible-inventory.
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+oo_inventory_group
+oo_inventory_user
+oo_inventory_accounts
+oo_inventory_cache_max_age
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+    - hosts: servers
+      roles:
+         - { role: username.rolename, x: 42 }
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+Openshift operations, Red Hat, Inc

+ 4 - 0
roles/openshift_ansible_inventory/defaults/main.yml

@@ -0,0 +1,4 @@
+---
+oo_inventory_group: root
+oo_inventory_owner: root
+oo_inventory_cache_max_age: 1800

+ 2 - 0
roles/openshift_ansible_inventory/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for openshift_ansible_inventory

+ 0 - 0
roles/openshift_ansible_inventory/meta/main.yml


部分文件因为文件数量过多而无法显示