Browse Source

Merge pull request #188 from openshift/master

Merge master into stage
Wesley Hearn 10 years ago
parent
commit
519e097df3
100 changed files with 3490 additions and 479 deletions
  1. 44 0
      BUILD.md
  2. 5 1
      README.md
  3. 63 4
      README_AWS.md
  4. 22 5
      README_GCE.md
  5. 241 0
      README_OSE.md
  6. 130 0
      README_libvirt.md
  7. 23 0
      ansible.cfg
  8. 241 0
      bin/cluster
  9. 110 0
      bin/ohi
  10. 65 0
      bin/openshift-ansible-bin.spec
  11. 6 0
      bin/openshift_ansible.conf.example
  12. 0 0
      bin/openshift_ansible/__init__.py
  13. 54 16
      bin/awsutil.py
  14. 57 22
      bin/opssh
  15. 25 3
      bin/oscp
  16. 24 2
      bin/ossh
  17. 22 1
      bin/ossh_bash_completion
  18. 0 113
      cluster.sh
  19. 164 53
      filter_plugins/oo_filters.py
  20. 390 0
      git/.pylintrc
  21. 45 0
      git/parent.rb
  22. 14 0
      git/pylint.sh
  23. 72 0
      git/yaml_validation.rb
  24. 0 0
      inventory/aws/hosts/ec2.ini
  25. 0 0
      inventory/aws/hosts/ec2.py
  26. 1 0
      inventory/aws/hosts/hosts
  27. 34 0
      inventory/byo/hosts
  28. 0 0
      inventory/gce/hosts/gce.py
  29. 1 0
      inventory/gce/hosts/hosts
  30. 1 0
      inventory/libvirt/hosts/hosts
  31. 20 0
      inventory/libvirt/hosts/libvirt.ini
  32. 179 0
      inventory/libvirt/hosts/libvirt_generic.py
  33. 65 45
      inventory/multi_ec2.py
  34. 2 2
      inventory/multi_ec2.yaml.example
  35. 50 0
      inventory/openshift-ansible-inventory.spec
  36. 1 0
      playbooks/adhoc/noc/filter_plugins
  37. 41 0
      playbooks/adhoc/noc/get_zabbix_problems.yml
  38. 1 0
      playbooks/adhoc/noc/roles
  39. 1 1
      playbooks/aws/ansible-tower/launch.yml
  40. 36 0
      playbooks/aws/openshift-cluster/config.yml
  41. 1 0
      playbooks/aws/openshift-cluster/filter_plugins
  42. 30 0
      playbooks/aws/openshift-cluster/launch.yml
  43. 302 0
      playbooks/aws/openshift-cluster/library/ec2_ami_find.py
  44. 24 0
      playbooks/aws/openshift-cluster/list.yml
  45. 1 0
      playbooks/aws/openshift-cluster/roles
  46. 132 0
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  47. 29 0
      playbooks/aws/openshift-cluster/templates/user_data.j2
  48. 16 0
      playbooks/aws/openshift-cluster/terminate.yml
  49. 18 0
      playbooks/aws/openshift-cluster/update.yml
  50. 1 0
      playbooks/aws/openshift-cluster/vars.defaults.yml
  51. 9 0
      playbooks/aws/openshift-cluster/vars.online.int.yml
  52. 9 0
      playbooks/aws/openshift-cluster/vars.online.prod.yml
  53. 9 0
      playbooks/aws/openshift-cluster/vars.online.stage.yml
  54. 38 0
      playbooks/aws/openshift-cluster/vars.yml
  55. 14 37
      playbooks/aws/openshift-master/config.yml
  56. 10 9
      playbooks/aws/openshift-master/launch.yml
  57. 2 0
      playbooks/aws/openshift-master/terminate.yml
  58. 0 2
      playbooks/aws/openshift-master/vars.yml
  59. 19 43
      playbooks/aws/openshift-node/config.yml
  60. 14 11
      playbooks/aws/openshift-node/launch.yml
  61. 2 0
      playbooks/aws/openshift-node/terminate.yml
  62. 0 2
      playbooks/aws/openshift-node/vars.yml
  63. 64 0
      playbooks/aws/terminate.yml
  64. 6 0
      playbooks/byo/config.yml
  65. 1 0
      playbooks/byo/filter_plugins
  66. 15 0
      playbooks/byo/openshift-master/config.yml
  67. 1 0
      playbooks/byo/openshift-master/filter_plugins
  68. 1 0
      playbooks/byo/openshift-master/roles
  69. 21 0
      playbooks/byo/openshift-node/config.yml
  70. 1 0
      playbooks/byo/openshift-node/filter_plugins
  71. 1 0
      playbooks/byo/openshift-node/roles
  72. 10 0
      playbooks/byo/openshift_facts.yml
  73. 1 0
      playbooks/byo/roles
  74. 4 0
      playbooks/common/openshift-cluster/config.yml
  75. 1 0
      playbooks/common/openshift-cluster/filter_plugins
  76. 1 0
      playbooks/common/openshift-cluster/roles
  77. 11 0
      playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
  78. 11 0
      playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
  79. 7 0
      playbooks/common/openshift-cluster/update_repos_and_packages.yml
  80. 19 0
      playbooks/common/openshift-master/config.yml
  81. 1 0
      playbooks/common/openshift-master/filter_plugins
  82. 1 0
      playbooks/common/openshift-master/roles
  83. 127 0
      playbooks/common/openshift-node/config.yml
  84. 1 0
      playbooks/common/openshift-node/filter_plugins
  85. 1 0
      playbooks/common/openshift-node/roles
  86. 37 0
      playbooks/gce/openshift-cluster/config.yml
  87. 1 0
      playbooks/gce/openshift-cluster/filter_plugins
  88. 28 0
      playbooks/gce/openshift-cluster/launch.yml
  89. 24 0
      playbooks/gce/openshift-cluster/list.yml
  90. 1 0
      playbooks/gce/openshift-cluster/roles
  91. 41 0
      playbooks/gce/openshift-cluster/tasks/launch_instances.yml
  92. 34 0
      playbooks/gce/openshift-cluster/terminate.yml
  93. 18 0
      playbooks/gce/openshift-cluster/update.yml
  94. 15 0
      playbooks/gce/openshift-cluster/vars.yml
  95. 13 37
      playbooks/gce/openshift-master/config.yml
  96. 12 8
      playbooks/gce/openshift-master/launch.yml
  97. 11 17
      playbooks/gce/openshift-master/terminate.yml
  98. 0 2
      playbooks/gce/openshift-master/vars.yml
  99. 18 43
      playbooks/gce/openshift-node/config.yml
  100. 0 0
      playbooks/gce/openshift-node/launch.yml

+ 44 - 0
BUILD.md

@@ -0,0 +1,44 @@
+# openshift-ansible RPM Build instructions
+We use tito to make building and tracking revisions easy.
+
+For more information on tito, please see the [Tito home page](http://rm-rf.ca/tito "Tito home page").
+
+
+## Build openshift-ansible-bin
+- Change into openshift-ansible/bin
+```
+cd openshift-ansible/bin
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```
+
+
+## Build openshift-ansible-inventory
+- Change into openshift-ansible/inventory
+```
+cd openshift-ansible/inventory
+```
+- Build a test package (no tagging needed)
+```
+tito build --test --rpm
+```
+- Tag a new build (bumps version number and adds log entries)
+```
+tito tag
+```
+- Follow the on screen tito instructions to push the tags
+- Build a new package based on the latest tag information
+```
+tito build --rpm
+```

+ 5 - 1
README.md

@@ -20,10 +20,14 @@ Setup
 - Setup for a specific cloud:
 - Setup for a specific cloud:
   - [AWS](README_AWS.md)
   - [AWS](README_AWS.md)
   - [GCE](README_GCE.md)
   - [GCE](README_GCE.md)
+  - [local VMs](README_libvirt.md)
+
+- Build
+  - [How to build the openshift-ansible rpms](BUILD.md)
 
 
 - Directory Structure:
 - Directory Structure:
   - [cloud.rb](cloud.rb) - light wrapper around Ansible
   - [cloud.rb](cloud.rb) - light wrapper around Ansible
-  - [cluster.sh](cluster.sh) - easily create OpenShift 3 clusters
+  - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
   - [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
   - [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
   - [inventory/](inventory) - houses Ansible dynamic inventory scripts
   - [inventory/](inventory) - houses Ansible dynamic inventory scripts
   - [lib/](lib) - library components of cloud.rb
   - [lib/](lib) - library components of cloud.rb

+ 63 - 4
README_AWS.md

@@ -14,7 +14,7 @@ Create a credentials file
    export AWS_ACCESS_KEY_ID='AKIASTUFF'
    export AWS_ACCESS_KEY_ID='AKIASTUFF'
    export AWS_SECRET_ACCESS_KEY='STUFF'
    export AWS_SECRET_ACCESS_KEY='STUFF'
 ```
 ```
-1. source this file
+2. source this file
 ```
 ```
   source ~/.aws_creds
   source ~/.aws_creds
 ```
 ```
@@ -23,7 +23,7 @@ Note: You must source this file in each shell that you want to run cloud.rb
 
 
 (Optional) Setup your $HOME/.ssh/config file
 (Optional) Setup your $HOME/.ssh/config file
 -------------------------------------------
 -------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config' 
+In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
 to setup a private key file to allow ansible to connect to the created hosts.
 to setup a private key file to allow ansible to connect to the created hosts.
 
 
 To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
 To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
@@ -34,6 +34,43 @@ Host *.compute-1.amazonaws.com
 
 
 Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
 Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
 
 
+(Optional) Choose where the cluster will be launched
+----------------------------------------------------
+
+By default, a cluster is launched with the following configuration:
+
+- Instance type: m3.large
+- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments)
+- Region: us-east-1
+- Keypair name: libra
+- Security group: public
+
+Master specific defaults:
+- Master root volume size: 10 (in GiBs)
+- Master root volume type: gp2
+- Master root volume iops: 500 (only applicable when volume type is io1)
+
+Node specific defaults:
+- Node root volume size: 10 (in GiBs)
+- Node root volume type: gp2
+- Node root volume iops: 500 (only applicable when volume type is io1)
+- Docker volume size: 25 (in GiBs)
+- Docker volume ephemeral: true (Whether the docker volume is ephemeral)
+- Docker volume type: gp2 (only applicable if ephemeral is false)
+- Docker volume iops: 500 (only applicable when volume type is io1)
+
+If needed, these values can be changed by setting environment variables on your system.
+
+- export ec2_instance_type='m3.large'
+- export ec2_ami='ami-307b3658'
+- export ec2_region='us-east-1'
+- export ec2_keypair='libra'
+- export ec2_security_group='public'
+- export os_master_root_vol_size='20'
+- export os_master_root_vol_type='standard'
+- export os_node_root_vol_size='15'
+- export os_docker_vol_size='50'
+- export os_docker_vol_ephemeral='false'
 
 
 Install Dependencies
 Install Dependencies
 --------------------
 --------------------
@@ -51,7 +88,29 @@ OSX:
 Test The Setup
 Test The Setup
 --------------
 --------------
 1. cd openshift-ansible
 1. cd openshift-ansible
-1. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all ec2 instances being listed)
+```
+  bin/cluster list aws ''
+```
+
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
+```
+  bin/cluster create aws <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+  bin/cluster update aws <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
 ```
 ```
-  ./cloud.rb aws list
+  bin/cluster terminate aws <cluster-id>
 ```
 ```

+ 22 - 5
README_GCE.md

@@ -4,7 +4,7 @@ GCE Setup Instructions
 
 
 Get a gce service key
 Get a gce service key
 ---------------------
 ---------------------
-1. ask your GCE project administrator for a GCE service key
+1. Ask your GCE project administrator for a GCE service key
 
 
 Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
 Note: If your GCE project does not show a Service Account under <Project>/APIs & auth/Credentials, you will need to use "Create new Client ID" to create a Service Account before your administrator can create the service key for you.
 
 
@@ -65,12 +65,29 @@ Install Dependencies
 Test The Setup
 Test The Setup
 --------------
 --------------
 1. cd openshift-ansible/
 1. cd openshift-ansible/
-2. Try to list all instances:
+1. Try to list all instances (Passing an empty string as the cluster_id
+argument will result in all gce instances being listed)
 ```
 ```
-  ./cloud.rb gce list
+  bin/cluster list gce ''
 ```
 ```
 
 
-3. Try to create an instance:
+Creating a cluster
+------------------
+1. To create a cluster with one master and two nodes
 ```
 ```
-  ./cloud.rb gce launch -n ${USER}-node1 -e int --type os3-node
+  bin/cluster create gce <cluster-id>
+```
+
+Updating a cluster
+---------------------
+1. To update the cluster
+```
+  bin/cluster update gce <cluster-id>
+```
+
+Terminating a cluster
+---------------------
+1. To terminate the cluster
+```
+  bin/cluster terminate gce <cluster-id>
 ```
 ```

+ 241 - 0
README_OSE.md

@@ -0,0 +1,241 @@
+# Installing OSEv3 from dev puddles using ansible
+
+* [Requirements](#requirements)
+* [Caveats](#caveats)
+* [Known Issues](#known-issues)
+* [Configuring the host inventory](#configuring-the-host-inventory)
+* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
+* [Running the ansible playbooks](#running-the-ansible-playbooks)
+* [Post-ansible steps](#post-ansible-steps)
+* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
+
+## Requirements
+* ansible
+  * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+
+  * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the bulids from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
+  * Available in Fedora channels
+  * Available for EL with EPEL and Optional channel
+* One or more RHEL 7.1 VMs
+* Either ssh key based auth for the root user or ssh key based auth for a user
+  with sudo access (no password)
+* A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
+  
+  ```sh
+  git clone https://github.com/openshift/openshift-ansible.git
+  cd openshift-ansible
+  ```
+
+## Caveats
+This ansible repo is currently under heavy revision for providing OSE support;
+the following items are highly likely to change before the OSE support is
+merged into the upstream repo:
+  * the current git branch for testing
+  * how the inventory file should be configured
+  * variables that need to be set
+  * bootstrapping steps
+  * other configuration steps
+
+## Known Issues
+* Host subscriptions are not configurable yet, the hosts need to be
+  pre-registered with subscription-manager or have the RHEL base repo
+  pre-configured. If using subscription-manager the following commands will
+  disable all but the rhel-7-server rhel-7-server-extras and
+  rhel-server7-ose-beta repos:
+```sh
+subscription-manager repos --disable="*"
+subscription-manager repos \
+--enable="rhel-7-server-rpms" \
+--enable="rhel-7-server-extras-rpms" \
+--enable="rhel-server-7-ose-beta-rpms"
+```
+* Configuration of router is not automated yet
+* Configuration of docker-registry is not automated yet
+
+## Configuring the host inventory
+[Ansible docs](http://docs.ansible.com/intro_inventory.html)
+
+Example inventory file for configuring one master and two nodes for the test
+environment. This can be configured in the default inventory file
+(/etc/ansible/hosts), or using a custom file and passing the --inventory
+option to ansible-playbook.
+
+/etc/ansible/hosts:
+```ini
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
+'baseurl':
+'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
+'OpenShift Origin COPR', 'baseurl':
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
+'enabled': 1, 'gpgcheck': 1, gpgkey:
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# host group for masters
+[masters]
+ose3-master.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2].example.com
+```
+
+The hostnames above should resolve both from the hosts themselves and
+the host where ansible is running (if different).
+
+## Running the ansible playbooks
+From the openshift-ansible checkout run:
+```sh
+ansible-playbook playbooks/byo/config.yml
+```
+**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
+inventory file use the -i option for ansible-playbook.
+
+## Post-ansible steps
+#### Create the default router
+On the master host:
+```sh
+systemctl restart openshift-sdn-master
+openshift ex router --create=true \
+  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}'
+```
+
+#### Create the default docker-registry
+On the master host:
+```sh
+openshift ex registry --create=true \
+  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-client/.kubeconfig \
+  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
+  --mount-host=/var/lib/openshift/docker-registry
+```
+
+## Overriding detected ip addresses and hostnames
+Some deployments will require that the user override the detected hostnames
+and ip addresses for the hosts. To see what the default values will be you can
+run the openshift_facts playbook:
+```sh
+ansible-playbook playbooks/byo/openshift_facts.yml
+```
+The output will be similar to:
+```
+ok: [10.3.9.45] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+                    "ip": "172.16.4.79",
+                    "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.45",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                  ... <snip> ...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+ok: [10.3.9.42] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+                    "ip": "172.16.4.75",
+                    "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.42",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                  ...<snip>...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+ok: [10.3.9.36] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+                    "ip": "172.16.4.73",
+                    "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.36",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                    ...<snip>...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+```
+Now, we want to verify the detected common settings to verify that they are
+what we expect them to be (if not, we can override them).
+
+* hostname
+  * Should resolve to the internal ip from the instances themselves.
+  * openshift_hostname will override.
+* ip
+  * Should be the internal ip of the instance.
+  * openshift_ip will override.
+* public hostname
+  * Should resolve to the external ip from hosts outside of the cloud
+  * provider openshift_public_hostname will override.
+* public_ip
+  * Should be the externally accessible ip associated with the instance
+  * openshift_public_ip will override
+* use_openshift_sdn
+  * Should be true unless the cloud is GCE.
+  * openshift_use_openshift_sdn overrides
+
+To override the the defaults, you can set the variables in your inventory:
+```
+...snip...
+[masters]
+ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
+...snip...
+```

+ 130 - 0
README_libvirt.md

@@ -0,0 +1,130 @@
+LIBVIRT Setup instructions
+==========================
+
+`libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
+
+This makes `libvirt` useful to develop, test and debug Openshift and openshift-ansible locally on the developer’s workstation before going to the cloud.
+
+Install dependencies
+--------------------
+
+1.	Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2.	Install [ebtables](http://ebtables.netfilter.org/)
+3.	Install [qemu](http://wiki.qemu.org/Main_Page)
+4.	Install [libvirt](http://libvirt.org/)
+5.	Enable and start the libvirt daemon, e.g:
+	-	`systemctl enable libvirtd`
+	-	`systemctl start libvirtd`
+6.	[Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7.	Check that your `$HOME` is accessible to the qemu user²
+8.	Configure dns resolution on the host³
+
+#### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
+
+You can test it with the following command:
+
+```
+virsh -c qemu:///system pool-list
+```
+
+If you have access error messages, please read https://libvirt.org/acl.html and https://libvirt.org/aclpolkit.html .
+
+In short, if your libvirt has been compiled with Polkit support (ex: Arch, Fedora 21), you can create `/etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules` as follows to grant full access to libvirt to `$USER`
+
+```
+sudo /bin/sh -c "cat - > /etc/polkit-1/rules.d/50-org.libvirt.unix.manage.rules" << EOF
+polkit.addRule(function(action, subject) {
+        if (action.id == "org.libvirt.unix.manage" &&
+            subject.user == "$USER") {
+                return polkit.Result.YES;
+                polkit.log("action=" + action);
+                polkit.log("subject=" + subject);
+        }
+});
+EOF
+```
+
+If your libvirt has not been compiled with Polkit (ex: Ubuntu 14.04.1 LTS), check the permissions on the libvirt unix socket:
+
+```
+ls -l /var/run/libvirt/libvirt-sock
+srwxrwx--- 1 root libvirtd 0 févr. 12 16:03 /var/run/libvirt/libvirt-sock
+
+usermod -a -G libvirtd $USER
+# $USER needs to logout/login to have the new group be taken into account
+```
+
+(Replace `$USER` with your login name)
+
+#### ² Qemu will run with a specific user. It must have access to the VMs drives
+
+All the disk drive resources needed by the VMs (Fedora disk image, cloud-init files) are put inside `~/libvirt-storage-pool-openshift/`.
+
+As we’re using the `qemu:///system` instance of libvirt, qemu will run with a specific `user:group` distinct from your user. It is configured in `/etc/libvirt/qemu.conf`. That qemu user must have access to that libvirt storage pool.
+
+If your `$HOME` is world readable, everything is fine. If your `$HOME` is private, `ansible` will fail with an error message like:
+
+```
+error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
+```
+
+In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool.
+
+On Arch:
+
+```
+setfacl -m g:kvm:--x ~
+```
+
+#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
+
+-	Verify NetworkManager is configured to use dnsmasq:
+
+```sh
+$ sudo vi /etc/NetworkManager/NetworkManager.conf
+[main]
+dns=dnsmasq
+```
+
+-	Configure dnsmasq to use the Virtual Network router for example.com:
+
+```sh
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+```
+
+Test The Setup
+--------------
+
+1.	cd openshift-ansible/
+2.	Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
+
+```
+  bin/cluster list libvirt ''
+```
+
+Creating a cluster
+------------------
+
+1.	To create a cluster with one master and two nodes
+
+```
+  bin/cluster create libvirt lenaic
+```
+
+Updating a cluster
+------------------
+
+1.	To update the cluster
+
+```
+  bin/cluster update libvirt lenaic
+```
+
+Terminating a cluster
+---------------------
+
+1.	To terminate the cluster
+
+```
+  bin/cluster terminate libvirt lenaic
+```

+ 23 - 0
ansible.cfg

@@ -0,0 +1,23 @@
+# config file for ansible -- http://ansible.com/
+# ==============================================
+
+# This config file provides examples for running
+# the OpenShift playbooks with the provided
+# inventory scripts. Only global defaults are
+# left uncommented
+
+[defaults]
+# Add the roles directory to the roles path
+roles_path = roles/
+
+# Set the log_path
+log_path = /tmp/ansible.log
+
+# Uncomment to use the provided BYO inventory
+#hostfile = inventory/byo/hosts
+
+# Uncomment to use the provided GCE dynamic inventory script
+#hostfile = inventory/gce/gce.py
+
+# Uncomment to use the provided AWS dynamic inventory script
+#hostfile = inventory/aws/ec2.py

+ 241 - 0
bin/cluster

@@ -0,0 +1,241 @@
+#!/usr/bin/env python2
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import ConfigParser
+import sys
+import os
+
+
+class Cluster(object):
+    """
+    Control and Configuration Interface for OpenShift Clusters
+    """
+    def __init__(self):
+        # setup ansible ssh environment
+        if 'ANSIBLE_SSH_ARGS' not in os.environ:
+            os.environ['ANSIBLE_SSH_ARGS'] = (
+                '-o ForwardAgent=yes '
+                '-o StrictHostKeyChecking=no '
+                '-o UserKnownHostsFile=/dev/null '
+                '-o ControlMaster=auto '
+                '-o ControlPersist=600s '
+            )
+
+    def get_deployment_type(self, args):
+        """
+        Get the deployment_type based on the environment variables and the
+        command line arguments
+        :param args: command line arguments provided by the user
+        :return: string representing the deployment type
+        """
+        deployment_type = 'origin'
+        if args.deployment_type:
+            deployment_type = args.deployment_type
+        elif 'OS_DEPLOYMENT_TYPE' in os.environ:
+            deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
+        return deployment_type
+
+    def create(self, args):
+        """
+        Create an OpenShift cluster for given provider
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
+        playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        env['num_masters'] = args.masters
+        env['num_nodes'] = args.nodes
+
+        return self.action(args, inventory, env, playbook)
+
+    def terminate(self, args):
+        """
+        Destroy OpenShift cluster
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
+        playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
+    def list(self, args):
+        """
+        List VMs in cluster
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
+        playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
+    def config(self, args):
+        """
+        Configure or reconfigure OpenShift across clustered VMs
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
+        playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
+    def update(self, args):
+        """
+        Update to latest OpenShift across clustered VMs
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
+        playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
+    def setup_provider(self, provider):
+        """
+        Setup ansible playbook environment
+        :param provider: command line arguments provided by user
+        :return: path to inventory for given provider
+        """
+        config = ConfigParser.ConfigParser()
+        if 'gce' == provider:
+            config.readfp(open('inventory/gce/hosts/gce.ini'))
+
+            for key in config.options('gce'):
+                os.environ[key] = config.get('gce', key)
+
+            inventory = '-i inventory/gce/hosts'
+        elif 'aws' == provider:
+            config.readfp(open('inventory/aws/hosts/ec2.ini'))
+
+            for key in config.options('ec2'):
+                os.environ[key] = config.get('ec2', key)
+
+            inventory = '-i inventory/aws/hosts'
+        elif 'libvirt' == provider:
+            inventory = '-i inventory/libvirt/hosts'
+        else:
+            # this code should never be reached
+            raise ValueError("invalid PROVIDER {}".format(provider))
+
+        return inventory
+
+    def action(self, args, inventory, env, playbook):
+        """
+        Build ansible-playbook command line and execute
+        :param args: command line arguments provided by user
+        :param inventory: derived provider library
+        :param env: environment variables for kubernetes
+        :param playbook: ansible playbook to execute
+        :return: exit status from ansible-playbook command
+        """
+
+        verbose = ''
+        if args.verbose > 0:
+            verbose = '-{}'.format('v' * args.verbose)
+
+        ansible_env = '-e \'{}\''.format(
+            ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+        )
+
+        command = 'ansible-playbook {} {} {} {}'.format(
+            verbose, inventory, ansible_env, playbook
+        )
+
+        if args.verbose > 1:
+            command = 'time {}'.format(command)
+
+        if args.verbose > 0:
+            sys.stderr.write('RUN [{}]\n'.format(command))
+            sys.stderr.flush()
+
+        return os.system(command)
+
+
+if __name__ == '__main__':
+    """
+    Implemented to support writing unit tests
+    """
+
+    cluster = Cluster()
+
+    providers = ['gce', 'aws', 'libvirt']
+    parser = argparse.ArgumentParser(
+        description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
+    )
+    parser.add_argument('-v', '--verbose', action='count',
+                        help='Multiple -v options increase the verbosity')
+    parser.add_argument('--version', action='version', version='%(prog)s 0.2')
+
+    meta_parser = argparse.ArgumentParser(add_help=False)
+    meta_parser.add_argument('provider', choices=providers, help='provider')
+    meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
+    meta_parser.add_argument('-t', '--deployment-type',
+                             choices=['origin', 'online', 'enterprise'],
+                             help='Deployment type. (default: origin)')
+
+    action_parser = parser.add_subparsers(dest='action', title='actions',
+                                          description='Choose from valid actions')
+
+    create_parser = action_parser.add_parser('create', help='Create a cluster',
+                                             parents=[meta_parser])
+    create_parser.add_argument('-m', '--masters', default=1, type=int,
+                               help='number of masters to create in cluster')
+    create_parser.add_argument('-n', '--nodes', default=2, type=int,
+                               help='number of nodes to create in cluster')
+    create_parser.set_defaults(func=cluster.create)
+
+    config_parser = action_parser.add_parser('config',
+                                             help='Configure or reconfigure a cluster',
+                                             parents=[meta_parser])
+    config_parser.set_defaults(func=cluster.config)
+
+    terminate_parser = action_parser.add_parser('terminate',
+                                                help='Destroy a cluster',
+                                                parents=[meta_parser])
+    terminate_parser.add_argument('-f', '--force', action='store_true',
+                                  help='Destroy cluster without confirmation')
+    terminate_parser.set_defaults(func=cluster.terminate)
+
+    update_parser = action_parser.add_parser('update',
+                                             help='Update OpenShift across cluster',
+                                             parents=[meta_parser])
+    update_parser.add_argument('-f', '--force', action='store_true',
+                               help='Update cluster without confirmation')
+    update_parser.set_defaults(func=cluster.update)
+
+    list_parser = action_parser.add_parser('list', help='List VMs in cluster',
+                                           parents=[meta_parser])
+    list_parser.set_defaults(func=cluster.list)
+
+    args = parser.parse_args()
+
+    if 'terminate' == args.action and not args.force:
+        answer = raw_input("This will destroy the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+        if answer not in ['y', 'Y']:
+            sys.stderr.write('\nACTION [terminate] aborted by user!\n')
+            exit(1)
+
+    if 'update' == args.action and not args.force:
+        answer = raw_input("This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
+        if answer not in ['y', 'Y']:
+            sys.stderr.write('\nACTION [update] aborted by user!\n')
+            exit(1)
+
+    status = args.func(args)
+    if status != 0:
+        sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
+    exit(status)

+ 110 - 0
bin/ohi

@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+import argparse
+import traceback
+import sys
+import os
+import re
+import tempfile
+import time
+import subprocess
+import ConfigParser
+
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
+
+class Ohi(object):
+    def __init__(self):
+        self.inventory = None
+        self.host_type_aliases = {}
+        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
+
+        self.parse_cli_args()
+        self.parse_config_file()
+
+        self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
+
+    def run(self):
+        if self.args.list_host_types:
+            self.aws.print_host_types()
+            return 0
+
+        hosts = None
+        if self.args.host_type is not None and \
+           self.args.env is not None:
+            # Both env and host-type specified
+            hosts = self.aws.get_host_list(host_type=self.args.host_type, \
+                                           env=self.args.env)
+
+        if self.args.host_type is None and \
+           self.args.env is not None:
+            # Only env specified
+            hosts = self.aws.get_host_list(env=self.args.env)
+
+        if self.args.host_type is not None and \
+           self.args.env is None:
+            # Only host-type specified
+            hosts = self.aws.get_host_list(host_type=self.args.host_type)
+
+        if hosts is None:
+            # We weren't able to determine what they wanted to do
+            raise ArgumentError("Invalid combination of arguments")
+
+        for host in hosts:
+            print host
+        return 0
+
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+            self.host_type_aliases = {}
+            if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                    value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+                    self.host_type_aliases[alias] = value
+
+    def parse_cli_args(self):
+        """Setup the command line parser with the options we want
+        """
+
+        parser = argparse.ArgumentParser(description='Openshift Host Inventory')
+
+        parser.add_argument('--list-host-types', default=False, action='store_true',
+                       help='List all of the host types')
+
+        parser.add_argument('-e', '--env', action="store",
+                       help="Which environment to use")
+
+        parser.add_argument('-t', '--host-type', action="store",
+                       help="Which host type to use")
+
+        self.args = parser.parse_args()
+
+
+if __name__ == '__main__':
+    if len(sys.argv) == 1:
+        print "\nError: No options given. Use --help to see the available options\n"
+        sys.exit(0)
+
+    try:
+        ohi = Ohi()
+        exitcode = ohi.run()
+        sys.exit(exitcode)
+    except ArgumentError as e:
+        print "\nError: %s\n" % e.message

+ 65 - 0
bin/openshift-ansible-bin.spec

@@ -0,0 +1,65 @@
+Summary:       OpenShift Ansible Scripts for working with metadata hosts
+Name:          openshift-ansible-bin
+Version:       0.0.8
+Release:       1%{?dist}
+License:       ASL 2.0
+URL:           https://github.com/openshift/openshift-ansible
+Source0:       %{name}-%{version}.tar.gz
+Requires:      python2, openshift-ansible-inventory
+BuildRequires: python2-devel
+BuildArch:     noarch
+
+%description
+Scripts to make it nicer when working with hosts that are defined only by metadata.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}%{_bindir}
+mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
+mkdir -p %{buildroot}/etc/bash_completion.d
+mkdir -p %{buildroot}/etc/openshift_ansible
+
+cp -p ossh oscp opssh ohi %{buildroot}%{_bindir}
+cp -p openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
+
+cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
+
+%files
+%{_bindir}/*
+%{python_sitelib}/openshift_ansible/
+/etc/bash_completion.d/*
+%config(noreplace) /etc/openshift_ansible/
+
+%changelog
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
+- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
+
+* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
+- added the ability to run opssh and ohi on all hosts in an environment, as
+  well as all hosts of the same host-type regardless of environment
+  (twiest@redhat.com)
+- added ohi (twiest@redhat.com)
+* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
+- fixed bug where opssh would throw an exception if pssh returned a non-zero
+  exit code (twiest@redhat.com)
+
+* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
+- fixed the opssh default output behavior to be consistent with pssh. Also
+  fixed a bug in how directories are named for --outdir and --errdir.
+  (twiest@redhat.com)
+* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
+- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
+- created a python package named openshift_ansible (twiest@redhat.com)
+
+* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+

+ 6 - 0
bin/openshift_ansible.conf.example

@@ -0,0 +1,6 @@
+#[main]
+#inventory = /usr/share/ansible/inventory/multi_ec2.py
+
+#[host_type_aliases]
+#host-type-one = aliasa,aliasb
+#host-type-two = aliasfortwo

+ 0 - 0
bin/openshift_ansible/__init__.py


+ 54 - 16
bin/awsutil.py

@@ -5,28 +5,36 @@ import os
 import json
 import json
 import re
 import re
 
 
+class ArgumentError(Exception):
+    def __init__(self, message):
+        self.message = message
+
 class AwsUtil(object):
 class AwsUtil(object):
-    def __init__(self):
-        self.host_type_aliases = {
-                'legacy-openshift-broker': ['broker', 'ex-srv'],
-                         'openshift-node': ['node', 'ex-node'],
-                   'openshift-messagebus': ['msg'],
-            'openshift-customer-database': ['mongo'],
-                'openshift-website-proxy': ['proxy'],
-            'openshift-community-website': ['drupal'],
-                         'package-mirror': ['mirror'],
-        }
+    def __init__(self, inventory_path=None, host_type_aliases={}):
+        self.host_type_aliases = host_type_aliases
+        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        if inventory_path is None:
+            inventory_path = os.path.realpath(os.path.join(self.file_path, \
+                                              '..', '..', 'inventory', \
+                                              'multi_ec2.py'))
+
+        if not os.path.isfile(inventory_path):
+            raise Exception("Inventory file not found [%s]" % inventory_path)
 
 
+        self.inventory_path = inventory_path
+        self.setup_host_type_alias_lookup()
+
+    def setup_host_type_alias_lookup(self):
         self.alias_lookup = {}
         self.alias_lookup = {}
         for key, values in self.host_type_aliases.iteritems():
         for key, values in self.host_type_aliases.iteritems():
             for value in values:
             for value in values:
                 self.alias_lookup[value] = key
                 self.alias_lookup[value] = key
 
 
-        self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-        self.multi_ec2_path = os.path.realpath(os.path.join(self.file_path, '..','inventory','multi_ec2.py'))
+
 
 
     def get_inventory(self,args=[]):
     def get_inventory(self,args=[]):
-        cmd = [self.multi_ec2_path]
+        cmd = [self.inventory_path]
 
 
         if args:
         if args:
             cmd.extend(args)
             cmd.extend(args)
@@ -124,15 +132,45 @@ class AwsUtil(object):
             return self.alias_lookup[host_type]
             return self.alias_lookup[host_type]
         return host_type
         return host_type
 
 
+    def gen_env_tag(self, env):
+        """Generate the environment tag
+        """
+        return "tag_environment_%s" % env
+
+    def gen_host_type_tag(self, host_type):
+        """Generate the host type tag
+        """
+        host_type = self.resolve_host_type(host_type)
+        return "tag_host-type_%s" % host_type
+
     def gen_env_host_type_tag(self, host_type, env):
     def gen_env_host_type_tag(self, host_type, env):
         """Generate the environment host type tag
         """Generate the environment host type tag
         """
         """
         host_type = self.resolve_host_type(host_type)
         host_type = self.resolve_host_type(host_type)
         return "tag_env-host-type_%s-%s" % (env, host_type)
         return "tag_env-host-type_%s-%s" % (env, host_type)
 
 
-    def get_host_list(self, host_type, env):
+    def get_host_list(self, host_type=None, env=None):
         """Get the list of hosts from the inventory using host-type and environment
         """Get the list of hosts from the inventory using host-type and environment
         """
         """
         inv = self.get_inventory()
         inv = self.get_inventory()
-        host_type_tag = self.gen_env_host_type_tag(host_type, env)
-        return inv[host_type_tag]
+
+        if host_type is not None and \
+           env is not None:
+            # Both host type and environment were specified
+            env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
+            return inv[env_host_type_tag]
+
+        if host_type is None and \
+           env is not None:
+            # Just environment was specified
+            host_type_tag = self.gen_env_tag(env)
+            return inv[host_type_tag]
+
+        if host_type is not None and \
+           env is None:
+            # Just host-type was specified
+            host_type_tag = self.gen_host_type_tag(host_type)
+            return inv[host_type_tag]
+
+        # We should never reach here!
+        raise ArgumentError("Invalid combination of parameters")

+ 57 - 22
bin/opssh

@@ -2,7 +2,6 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 
 import argparse
 import argparse
-import awsutil
 import traceback
 import traceback
 import sys
 import sys
 import os
 import os
@@ -10,54 +9,71 @@ import re
 import tempfile
 import tempfile
 import time
 import time
 import subprocess
 import subprocess
+import ConfigParser
 
 
-DEFAULT_PSSH_PAR=200
+from openshift_ansible import awsutil
+from openshift_ansible.awsutil import ArgumentError
+
+DEFAULT_PSSH_PAR = 200
 PSSH = '/usr/bin/pssh'
 PSSH = '/usr/bin/pssh'
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
+CONFIG_INVENTORY_OPTION = 'inventory'
 
 
 class Opssh(object):
 class Opssh(object):
     def __init__(self):
     def __init__(self):
+        self.inventory = None
+        self.host_type_aliases = {}
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-        self.aws = awsutil.AwsUtil()
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
 
 
         self.parse_cli_args()
         self.parse_cli_args()
+        self.parse_config_file()
+
+        self.aws = awsutil.AwsUtil(self.inventory, self.host_type_aliases)
 
 
+    def run(self):
         if self.args.list_host_types:
         if self.args.list_host_types:
             self.aws.print_host_types()
             self.aws.print_host_types()
-            return
+            return 0
 
 
-        if self.args.env and \
-           self.args.host_type and \
-           self.args.command:
-            retval = self.run_pssh()
-            if retval != 0:
-                raise ValueError("pssh run failed")
+        if self.args.host_type is not None or \
+           self.args.env is not None:
+            return self.run_pssh()
 
 
-            return
-
-        # If it makes it here, we weren't able to determine what they wanted to do
-        raise ValueError("Invalid combination of arguments")
+        # We weren't able to determine what they wanted to do
+        raise ArgumentError("Invalid combination of arguments")
 
 
     def run_pssh(self):
     def run_pssh(self):
         """Actually run the pssh command based off of the supplied options
         """Actually run the pssh command based off of the supplied options
         """
         """
 
 
         # Default set of options
         # Default set of options
-        pssh_args = [PSSH, '-i', '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+        pssh_args = [PSSH, '-t', '0', '-p', str(self.args.par), '--user', self.args.user]
+
+        if self.args.inline:
+            pssh_args.append("--inline")
 
 
         if self.args.outdir:
         if self.args.outdir:
-            pssh_args.append("--outdir='%s'" % self.args.outdir)
+            pssh_args.extend(["--outdir", self.args.outdir])
 
 
         if self.args.errdir:
         if self.args.errdir:
-            pssh_args.append("--errdir='%s'" % self.args.errdir)
+            pssh_args.extend(["--errdir", self.args.errdir])
+
+        hosts = self.aws.get_host_list(host_type=self.args.host_type,
+                                       env=self.args.env)
 
 
-        hosts = self.aws.get_host_list(self.args.host_type, self.args.env)
         with tempfile.NamedTemporaryFile(prefix='opssh-', delete=True) as f:
         with tempfile.NamedTemporaryFile(prefix='opssh-', delete=True) as f:
             for h in hosts:
             for h in hosts:
                 f.write(h + os.linesep)
                 f.write(h + os.linesep)
             f.flush()
             f.flush()
 
 
-            pssh_args.extend(["-h", "%s" % f.name])
-            pssh_args.append("%s" % self.args.command)
+            pssh_args.extend(["-h", f.name])
+            pssh_args.append(self.args.command)
 
 
             print
             print
             print "Running: %s" % ' '.join(pssh_args)
             print "Running: %s" % ' '.join(pssh_args)
@@ -66,6 +82,20 @@ class Opssh(object):
 
 
         return None
         return None
 
 
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
+            self.host_type_aliases = {}
+            if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
+                    value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
+                    self.host_type_aliases[alias] = value
 
 
     def parse_cli_args(self):
     def parse_cli_args(self):
         """Setup the command line parser with the options we want
         """Setup the command line parser with the options we want
@@ -79,7 +109,7 @@ class Opssh(object):
         parser.add_argument('-e', '--env', action="store",
         parser.add_argument('-e', '--env', action="store",
                        help="Which environment to use")
                        help="Which environment to use")
 
 
-        parser.add_argument('-t', '--host-type', action="store",
+        parser.add_argument('-t', '--host-type', action="store", default=None,
                        help="Which host type to use")
                        help="Which host type to use")
 
 
         parser.add_argument('-c', '--command', action='store',
         parser.add_argument('-c', '--command', action='store',
@@ -88,6 +118,9 @@ class Opssh(object):
         parser.add_argument('--user', action='store', default='root',
         parser.add_argument('--user', action='store', default='root',
                        help='username')
                        help='username')
 
 
+        parser.add_argument('-i', '--inline', default=False, action='store_true',
+                       help='inline aggregated output and error for each server')
+
         parser.add_argument('-p', '--par', action='store', default=DEFAULT_PSSH_PAR,
         parser.add_argument('-p', '--par', action='store', default=DEFAULT_PSSH_PAR,
                        help=('max number of parallel threads (default %s)' % DEFAULT_PSSH_PAR))
                        help=('max number of parallel threads (default %s)' % DEFAULT_PSSH_PAR))
 
 
@@ -107,5 +140,7 @@ if __name__ == '__main__':
 
 
     try:
     try:
         opssh = Opssh()
         opssh = Opssh()
-    except ValueError as e:
+        exitcode = opssh.run()
+        sys.exit(exitcode)
+    except ArgumentError as e:
         print "\nError: %s\n" % e.message
         print "\nError: %s\n" % e.message

+ 25 - 3
bin/oscp

@@ -2,21 +2,34 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 
 import argparse
 import argparse
-import awsutil
 import traceback
 import traceback
 import sys
 import sys
 import os
 import os
 import re
 import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
 
 
 class Oscp(object):
 class Oscp(object):
     def __init__(self):
     def __init__(self):
+        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
+
         self.parse_cli_args()
         self.parse_cli_args()
+        self.parse_config_file()
 
 
         # parse host and user
         # parse host and user
         self.process_host()
         self.process_host()
 
 
-        self.aws = awsutil.AwsUtil()
+        self.aws = awsutil.AwsUtil(self.inventory)
 
 
         # get a dict of host inventory
         # get a dict of host inventory
         if self.args.list:
         if self.args.list:
@@ -38,9 +51,18 @@ class Oscp(object):
         else:
         else:
             self.scp()
             self.scp()
 
 
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
     def parse_cli_args(self):
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
-        parser.add_argument('-e', '--env', 
+        parser.add_argument('-e', '--env',
                           action="store", help="Environment where this server exists.")
                           action="store", help="Environment where this server exists.")
         parser.add_argument('-d', '--debug', default=False,
         parser.add_argument('-d', '--debug', default=False,
                           action="store_true", help="debug mode")
                           action="store_true", help="debug mode")

+ 24 - 2
bin/ossh

@@ -2,18 +2,31 @@
 # vim: expandtab:tabstop=4:shiftwidth=4
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 
 import argparse
 import argparse
-import awsutil
 import traceback
 import traceback
 import sys
 import sys
 import os
 import os
 import re
 import re
+import ConfigParser
+
+from openshift_ansible import awsutil
+
+CONFIG_MAIN_SECTION = 'main'
+CONFIG_INVENTORY_OPTION = 'inventory'
 
 
 class Ossh(object):
 class Ossh(object):
     def __init__(self):
     def __init__(self):
+        self.inventory = None
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
+
+        # Default the config path to /etc
+        self.config_path = os.path.join(os.path.sep, 'etc',  \
+                                        'openshift_ansible', \
+                                        'openshift_ansible.conf')
+
         self.parse_cli_args()
         self.parse_cli_args()
+        self.parse_config_file()
 
 
-        self.aws = awsutil.AwsUtil()
+        self.aws = awsutil.AwsUtil(self.inventory)
 
 
         # get a dict of host inventory
         # get a dict of host inventory
         if self.args.list:
         if self.args.list:
@@ -37,6 +50,15 @@ class Ossh(object):
         else:
         else:
             self.ssh()
             self.ssh()
 
 
+    def parse_config_file(self):
+        if os.path.isfile(self.config_path):
+            config = ConfigParser.ConfigParser()
+            config.read(self.config_path)
+
+            if config.has_section(CONFIG_MAIN_SECTION) and \
+               config.has_option(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION):
+                self.inventory = config.get(CONFIG_MAIN_SECTION, CONFIG_INVENTORY_OPTION)
+
     def parse_cli_args(self):
     def parse_cli_args(self):
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
         parser.add_argument('-e', '--env', action="store",
         parser.add_argument('-e', '--env', action="store",

+ 22 - 1
bin/ossh_bash_completion

@@ -1,6 +1,7 @@
 __ossh_known_hosts(){
 __ossh_known_hosts(){
     if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
     if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])'
+        /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
     fi
     fi
 }
 }
 
 
@@ -16,3 +17,23 @@ _ossh()
     return 0
     return 0
 }
 }
 complete -F _ossh ossh oscp
 complete -F _ossh ossh oscp
+
+__opssh_known_hosts(){
+    if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+                /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+    fi
+}
+
+_opssh()
+{
+    local cur prev known_hosts
+    COMPREPLY=()
+    cur="${COMP_WORDS[COMP_CWORD]}"
+    prev="${COMP_WORDS[COMP_CWORD-1]}"
+    known_hosts="$(__opssh_known_hosts)"
+    COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
+
+    return 0
+}
+complete -F _opssh opssh
+

+ 0 - 113
cluster.sh

@@ -1,113 +0,0 @@
-#!/bin/bash -eu
-
-NODES=2
-MASTERS=1
-
-# If the environment variable OO_PROVDER is defined, it used for the provider
-PROVIDER=${OO_PROVIDER:-''}
-# Otherwise, default is gce (Google Compute Engine)
-if [ "x$PROVIDER" == "x" ];then
-   PROVIDER=gce
-fi
-
-UPPER_CASE_PROVIDER=$(echo $PROVIDER | tr '[:lower:]' '[:upper:]')
-
-
-# Use OO_MASTER_PLAYBOOK/OO_NODE_PLAYBOOK environment variables for playbooks if defined,
-# otherwise use openshift default values.
-MASTER_PLAYBOOK=${OO_MASTER_PLAYBOOK:-'openshift-master'}
-NODE_PLAYBOOK=${OO_NODE_PLAYBOOK:-'openshift-node'}
-
-
-# @formatter:off
-function usage {
-    cat 1>&2 <<-EOT
-        ${0} : [create|terminate|update|list] { ${UPPER_CASE_PROVIDER} environment tag}
-
-        Supported environment tags:
-        $(grep --no-messages 'SUPPORTED_ENVS.*=' ./lib/${PROVIDER}_command.rb)
-        $([ $? -ne 0 ] && echo "No supported environment tags found for ${PROVIDER}")
-
-        Optional arguments for create:
-        [-p|--provider, -m|--masters, -n|--nodes, --master-playbook, --node-playbook]
-
-        Optional arguments for terminate|update:
-        [-p|--provider, --master-playbook, --node-playbook]
-EOT
-}
-# @formatter:on
-
-function create_cluster {
-    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$MASTER_PLAYBOOK -c $MASTERS
-
-    ./cloud.rb "${PROVIDER}" launch -e "${ENV}" --type=$NODE_PLAYBOOK -c $NODES
-
-    update_cluster
-
-    echo -e "\nCreated ${MASTERS}/${MASTER_PLAYBOOK} masters and ${NODES}/${NODE_PLAYBOOK} nodes using ${PROVIDER} provider\n"
-}
-
-function update_cluster {
-    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" config -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-function terminate_cluster {
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$MASTER_PLAYBOOK
-    ./cloud.rb "${PROVIDER}" terminate -e "${ENV}" --type=$NODE_PLAYBOOK
-}
-
-[ -f ./cloud.rb ] || (echo 1>&2 'Cannot find ./cloud.rb' && exit 1)
-
-function check_argval {
-    if [[ $1 == -* ]]; then
-        echo "Invalid value: '$1'"
-        usage
-        exit 1
-    fi
-}
-
-# Using GNU getopt to support both small and long formats
-OPTIONS=`getopt -o p:m:n:h --long provider:,masters:,nodes:,master-playbook:,node-playbook:,help \
-	        -n "$0" -- "$@"`
-eval set -- "$OPTIONS"
-
-while true; do
-    case "$1" in
-        -h|--help) (usage; exit 1) ; shift ;;
-        -p|--provider) PROVIDER="$2" ; check_argval $2 ; shift 2 ;;
-        -m|--masters) MASTERS="$2" ; check_argval $2 ; shift 2 ;;
-        -n|--nodes) NODES="$2" ; check_argval $2 ; shift 2 ;;
-        --master-playbook) MASTER_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
-        --node-playbook) NODE_PLAYBOOK="$2" ; check_argval $2 ; shift 2 ;;
-        --) shift ; break ;;
-        *) break ;;
-    esac
-done
-
-shift $((OPTIND-1))
-
-[ -z "${1:-}" ] && (usage; exit 1)
-
-case "${1}" in
-    'create')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        create_cluster ;;
-    'update')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        update_cluster ;;
-    'terminate')
-        [ -z "${2:-}" ] && (usage; exit 1)
-        ENV="${2}"
-        terminate_cluster ;;
-    'list')   ./cloud.rb "${PROVIDER}" list ;;
-    'help')   usage; exit 0 ;;
-    *)
-        echo -n 1>&2 "${1} is not a supported operation";
-        usage;
-        exit 1 ;;
-esac
-
-exit 0

+ 164 - 53
filter_plugins/oo_filters.py

@@ -1,39 +1,57 @@
-from ansible import errors, runner
-import json
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+from ansible import errors
+from operator import itemgetter
 import pdb
 import pdb
 
 
 def oo_pdb(arg):
 def oo_pdb(arg):
-  ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+    ''' This pops you into a pdb instance where arg is the data passed in
+        from the filter.
         Ex: "{{ hostvars | oo_pdb }}"
         Ex: "{{ hostvars | oo_pdb }}"
-  '''
-  pdb.set_trace()
-  return arg
+    '''
+    pdb.set_trace()
+    return arg
 
 
 def oo_len(arg):
 def oo_len(arg):
-  ''' This returns the length of the argument
+    ''' This returns the length of the argument
         Ex: "{{ hostvars | oo_len }}"
         Ex: "{{ hostvars | oo_len }}"
-  '''
-  return len(arg)
+    '''
+    return len(arg)
 
 
 def get_attr(data, attribute=None):
 def get_attr(data, attribute=None):
-  ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+    ''' This looks up dictionary attributes of the form a.b.c and returns
+        the value.
         Ex: data = {'a': {'b': {'c': 5}}}
         Ex: data = {'a': {'b': {'c': 5}}}
             attribute = "a.b.c"
             attribute = "a.b.c"
             returns 5
             returns 5
-  '''
+    '''
+    if not attribute:
+        raise errors.AnsibleFilterError("|failed expects attribute to be set")
 
 
-  if not attribute:
-    raise errors.AnsibleFilterError("|failed expects attribute to be set")
+    ptr = data
+    for attr in attribute.split('.'):
+        ptr = ptr[attr]
 
 
-  ptr = data
-  for attr in attribute.split('.'):
-    ptr = ptr[attr]
+    return ptr
 
 
-  return ptr
+def oo_flatten(data):
+    ''' This filter plugin will flatten a list of lists
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects to flatten a List")
 
 
-def oo_collect(data, attribute=None, filters={}):
-  ''' This takes a list of dict and collects all attributes specified into a list
-      If filter is specified then we will include all items that match _ALL_ of filters.
+    return [item for sublist in data for item in sublist]
+
+
+def oo_collect(data, attribute=None, filters=None):
+    ''' This takes a list of dict and collects all attributes specified into a
+        list If filter is specified then we will include all items that match
+        _ALL_ of filters.
         Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
         Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
                      {'a':2, 'z': 'z'},        # True, return
                      {'a':2, 'z': 'z'},        # True, return
                      {'a':3, 'z': 'z'},        # True, return
                      {'a':3, 'z': 'z'},        # True, return
@@ -42,44 +60,137 @@ def oo_collect(data, attribute=None, filters={}):
             attribute = 'a'
             attribute = 'a'
             filters   = {'z': 'z'}
             filters   = {'z': 'z'}
             returns [1, 2, 3]
             returns [1, 2, 3]
-  '''
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects to filter on a List")
 
 
-  if not issubclass(type(data), list):
-    raise errors.AnsibleFilterError("|failed expects to filter on a List")
+    if not attribute:
+        raise errors.AnsibleFilterError("|failed expects attribute to be set")
 
 
-  if not attribute:
-    raise errors.AnsibleFilterError("|failed expects attribute to be set")
+    if filters is not None:
+        if not issubclass(type(filters), dict):
+            raise errors.AnsibleFilterError("|fialed expects filter to be a"
+                                            " dict")
+        retval = [get_attr(d, attribute) for d in data if (
+            all([d[key] == filters[key] for key in filters]))]
+    else:
+        retval = [get_attr(d, attribute) for d in data]
 
 
-  if filters:
-    retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
-  else:
-    retval = [get_attr(d, attribute) for d in data]
-
-  return retval
+    return retval
 
 
 def oo_select_keys(data, keys):
 def oo_select_keys(data, keys):
-  ''' This returns a list, which contains the value portions for the keys
+    ''' This returns a list, which contains the value portions for the keys
         Ex: data = { 'a':1, 'b':2, 'c':3 }
         Ex: data = { 'a':1, 'b':2, 'c':3 }
             keys = ['a', 'c']
             keys = ['a', 'c']
             returns [1, 3]
             returns [1, 3]
-  '''
-
-  if not issubclass(type(data), dict):
-    raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
-
-  if not issubclass(type(keys), list):
-    raise errors.AnsibleFilterError("|failed expects first param is a list")
-
-  # Gather up the values for the list of keys passed in
-  retval = [data[key] for key in keys]
-
-  return retval
-
-class FilterModule (object):
-  def filters(self):
-    return {
-      "oo_select_keys": oo_select_keys,
-      "oo_collect": oo_collect,
-      "oo_len": oo_len,
-      "oo_pdb": oo_pdb
-    }
+    '''
+
+    if not issubclass(type(data), dict):
+        raise errors.AnsibleFilterError("|failed expects to filter on a dict")
+
+    if not issubclass(type(keys), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+    # Gather up the values for the list of keys passed in
+    retval = [data[key] for key in keys]
+
+    return retval
+
+def oo_prepend_strings_in_list(data, prepend):
+    ''' This takes a list of strings and prepends a string to each item in the
+        list
+        Ex: data = ['cart', 'tree']
+            prepend = 'apple-'
+            returns ['apple-cart', 'apple-tree']
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
+    if not all(isinstance(x, basestring) for x in data):
+        raise errors.AnsibleFilterError("|failed expects first param is a list"
+                                        " of strings")
+    retval = [prepend + s for s in data]
+    return retval
+
+def oo_ami_selector(data, image_name):
+    ''' This takes a list of amis and an image name and attempts to return
+        the latest ami.
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+    if not data:
+        return None
+    else:
+        if image_name is None or not image_name.endswith('_*'):
+            ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+            return ami['ami_id']
+        else:
+            ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+            ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+            return ami['ami_id']
+
+def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
+    ''' This takes a dictionary of volume definitions and returns a valid ec2
+        volume definition based on the host_type and the values in the
+        dictionary.
+        The dictionary should look similar to this:
+            { 'master':
+                { 'root':
+                    { 'volume_size': 10, 'device_type': 'gp2',
+                      'iops': 500
+                    }
+                },
+              'node':
+                { 'root':
+                    { 'volume_size': 10, 'device_type': 'io1',
+                      'iops': 1000
+                    },
+                  'docker':
+                    { 'volume_size': 40, 'device_type': 'gp2',
+                      'iops': 500, 'ephemeral': 'true'
+                    }
+                }
+            }
+    '''
+    if not issubclass(type(data), dict):
+        raise errors.AnsibleFilterError("|failed expects first param is a dict")
+    if host_type not in ['master', 'node']:
+        raise errors.AnsibleFilterError("|failed expects either master or node"
+                                        " host type")
+
+    root_vol = data[host_type]['root']
+    root_vol['device_name'] = '/dev/sda1'
+    root_vol['delete_on_termination'] = True
+    if root_vol['device_type'] != 'io1':
+        root_vol.pop('iops', None)
+    if host_type == 'node':
+        docker_vol = data[host_type]['docker']
+        docker_vol['device_name'] = '/dev/xvdb'
+        docker_vol['delete_on_termination'] = True
+        if docker_vol['device_type'] != 'io1':
+            docker_vol.pop('iops', None)
+        if docker_ephemeral:
+            docker_vol.pop('device_type', None)
+            docker_vol.pop('delete_on_termination', None)
+            docker_vol['ephemeral'] = 'ephemeral0'
+        return [root_vol, docker_vol]
+    return [root_vol]
+
+# disabling pylint checks for too-few-public-methods and no-self-use since we
+# need to expose a FilterModule object that has a filters method that returns
+# a mapping of filter names to methods.
+# pylint: disable=too-few-public-methods, no-self-use
+class FilterModule(object):
+    ''' FilterModule '''
+    def filters(self):
+        ''' returns a mapping of filters to methods '''
+        return {
+            "oo_select_keys": oo_select_keys,
+            "oo_collect": oo_collect,
+            "oo_flatten": oo_flatten,
+            "oo_len": oo_len,
+            "oo_pdb": oo_pdb,
+            "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+            "oo_ami_selector": oo_ami_selector,
+            "oo_ec2_volume_definition": oo_ec2_volume_definition
+        }

+ 390 - 0
git/.pylintrc

@@ -0,0 +1,390 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=no
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Deprecated. It was used to include message's id in output. Use --msg-template
+# instead.
+#include-ids=no
+
+# Deprecated. It was used to include symbolic ids of messages in output. Use
+# --msg-template instead.
+#symbols=no
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception

+ 45 - 0
git/parent.rb

@@ -0,0 +1,45 @@
+#!/usr/bin/env ruby
+#
+#
+#
+
+if __FILE__ == $0
+  # If we aren't on master we don't need to parent check
+  branch = 'prod'
+  exit(0) if ARGV[0] !~ /#{branch}/
+  commit_id = ARGV[1]
+  %x[/usr/bin/git checkout #{branch}]
+  %x[/usr/bin/git merge #{commit_id}]
+
+  count = 0
+  #lines = %x[/usr/bin/git rev-list --left-right stg...master].split("\n")
+  lines = %x[/usr/bin/git rev-list --left-right remotes/origin/stg...#{branch}].split("\n")
+  lines.each do |commit|
+    # next if they are in stage
+    next if commit =~ /^</
+    # remove the first char '>'
+    commit = commit[1..-1]
+    # check if any remote branches contain $commit
+    results = %x[/usr/bin/git branch -q -r --contains #{commit} 2>/dev/null ]
+    # if this comes back empty, nothing contains it, we can skip it as
+    # we have probably created the merge commit here locally
+    next if results.empty?
+
+    # The results generally contain origin/pr/246/merge and origin/pr/246/head
+    # this is the pull request which would contain the commit in question.
+    #
+    # If the results do not contain origin/stg then stage does not contain
+    # the commit in question.  Therefore we need to alert!
+    unless results =~ /origin\/stg/
+      puts "\nFAILED: (These commits are not in stage.)\n"
+      puts "\t#{commit}"
+      count += 1
+    end
+  end
+
+  # Exit with count of commits in #{branch} but not stg
+  exit(count)
+end
+
+__END__
+

+ 14 - 0
git/pylint.sh

@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+
+OLDREV=$1
+NEWREV=$2
+TRG_BRANCH=$3
+
+PYTHON=/var/lib/jenkins/python27/bin/python
+
+/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | \
+ grep ".py$" | \
+ xargs -r -I{} ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc  {}
+
+exit $?

+ 72 - 0
git/yaml_validation.rb

@@ -0,0 +1,72 @@
+#!/usr/bin/env ruby
+#
+#
+#
+require 'yaml'
+require 'tmpdir'
+
+class YamlValidate
+  def self.yaml_file?(filename)
+    return filename.end_with?('.yaml') || filename.end_with?('.yml')
+  end
+
+  def self.short_yaml_ext?(filename)
+    return filename.end_with?(".yml")
+  end
+
+  def self.valid_yaml?(filename)
+    YAML::load_file(filename)
+
+    return true
+  end
+end
+
+class GitCommit
+  attr_accessor :oldrev, :newrev, :refname, :tmp
+  def initialize(oldrev, newrev, refname)
+    @oldrev = oldrev
+    @newrev = newrev
+    @refname = refname
+    @tmp = Dir.mktmpdir(@newrev)
+  end
+
+  def get_file_changes()
+    files = %x[/usr/bin/git diff --name-only #{@oldrev} #{@newrev} --diff-filter=ACM].split("\n")
+
+    # if files is empty we will get a full checkout.  This happens on
+    # a git rm file.  If there are no changes then we need to skip the archive
+    return [] if files.empty?
+
+    # We only want to take the files that changed.  Archive will do that when passed
+    # the filenames.  It will export these to a tmp dir
+    system("/usr/bin/git archive #{@newrev} #{files.join(" ")} | tar x -C #{@tmp}")
+    return Dir.glob("#{@tmp}/**/*").delete_if { |file| File.directory?(file) }
+  end
+end
+
+if __FILE__ == $0
+  while data = STDIN.gets
+    oldrev, newrev, refname = data.split
+    gc = GitCommit.new(oldrev, newrev, refname)
+
+    results = []
+    gc.get_file_changes().each do |file|
+      begin
+        puts "++++++ Received:  #{file}"
+
+        #raise "Yaml file extensions must be .yaml not .yml" if YamlValidate.short_yaml_ext? file
+
+        # skip readme, other files, etc
+        next unless YamlValidate.yaml_file?(file)
+
+        results << YamlValidate.valid_yaml?(file)
+      rescue Exception => ex
+        puts "\n#{ex.message}\n\n"
+        results << false
+      end
+    end
+
+    #puts "RESULTS\n#{results.inspect}\n"
+    exit 1 if results.include?(false)
+  end
+end

inventory/aws/ec2.ini → inventory/aws/hosts/ec2.ini


inventory/aws/ec2.py → inventory/aws/hosts/ec2.py


+ 1 - 0
inventory/aws/hosts/hosts

@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2

+ 34 - 0
inventory/byo/hosts

@@ -0,0 +1,34 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# host group for masters
+[masters]
+ose3-master-ansible.test.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2]-ansible.test.example.com

inventory/gce/gce.py → inventory/gce/hosts/gce.py


+ 1 - 0
inventory/gce/hosts/hosts

@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2

+ 1 - 0
inventory/libvirt/hosts/hosts

@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2

+ 20 - 0
inventory/libvirt/hosts/libvirt.ini

@@ -0,0 +1,20 @@
+# Ansible libvirt external inventory script settings
+#
+
+[libvirt]
+
+uri = qemu:///system
+
+# API calls to libvirt can be slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+#   - ansible-libvirt.cache
+#   - ansible-libvirt.index
+cache_path = /tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 900
+
+
+

+ 179 - 0
inventory/libvirt/hosts/libvirt_generic.py

@@ -0,0 +1,179 @@
+#!/usr/bin/env python2
+
+"""
+libvirt external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
+This, more or less, allows you to keep one central database containing
+info about all of your managed instances.
+
+"""
+
+# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import argparse
+import ConfigParser
+import os
+import re
+import sys
+from time import time
+import libvirt
+import xml.etree.ElementTree as ET
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+
+
+class LibvirtInventory(object):
+
+    def __init__(self):
+        self.inventory = dict()  # A list of groups and the hosts in that group
+        self.cache = dict()  # Details about hosts in the inventory
+
+        # Read settings and parse CLI arguments
+        self.read_settings()
+        self.parse_cli_args()
+
+        if self.args.host:
+            print self.json_format_dict(self.get_host_info(), self.args.pretty)
+        elif self.args.list:
+            print self.json_format_dict(self.get_inventory(), self.args.pretty)
+        else:  # default action with no options
+            print self.json_format_dict(self.get_inventory(), self.args.pretty)
+
+    def read_settings(self):
+        config = ConfigParser.SafeConfigParser()
+        config.read(
+            os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
+        )
+        self.libvirt_uri = config.get('libvirt', 'uri')
+
+    def parse_cli_args(self):
+        parser = argparse.ArgumentParser(
+            description='Produce an Ansible Inventory file based on libvirt'
+        )
+        parser.add_argument(
+            '--list',
+            action='store_true',
+            default=True,
+            help='List instances (default: True)'
+        )
+        parser.add_argument(
+            '--host',
+            action='store',
+            help='Get all the variables about a specific instance'
+        )
+        parser.add_argument(
+            '--pretty',
+            action='store_true',
+            default=False,
+            help='Pretty format (default: False)'
+        )
+        self.args = parser.parse_args()
+
+    def get_host_info(self):
+        inventory = self.get_inventory()
+        if self.args.host in inventory['_meta']['hostvars']:
+            return inventory['_meta']['hostvars'][self.args.host]
+
+    def get_inventory(self):
+        inventory = dict(_meta=dict(hostvars=dict()))
+
+        conn = libvirt.openReadOnly(self.libvirt_uri)
+        if conn is None:
+            print "Failed to open connection to %s" % libvirt_uri
+            sys.exit(1)
+
+        domains = conn.listAllDomains()
+        if domains is None:
+            print "Failed to list domains for connection %s" % libvirt_uri
+            sys.exit(1)
+
+        arp_entries = self.parse_arp_entries()
+
+        for domain in domains:
+            hostvars = dict(libvirt_name=domain.name(),
+                            libvirt_id=domain.ID(),
+                            libvirt_uuid=domain.UUIDString())
+            domain_name = domain.name()
+
+            # TODO: add support for guests that are not in a running state
+            state, _ = domain.state()
+            # 2 is the state for a running guest
+            if state != 1:
+                continue
+
+            hostvars['libvirt_status'] = 'running'
+
+            root = ET.fromstring(domain.XMLDesc())
+            ns = {'ansible': 'https://github.com/ansible/ansible'}
+            for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ns):
+                tag = tag_elem.text
+                self.push(inventory, "tag_%s" % tag, domain_name)
+                self.push(hostvars, 'libvirt_tags', tag)
+
+            # TODO: support more than one network interface, also support
+            # interface types other than 'network'
+            interface = root.find("./devices/interface[@type='network']")
+            if interface is not None:
+                mac_elem = interface.find('mac')
+                if mac_elem is not None:
+                    mac = mac_elem.get('address')
+                    if mac in arp_entries:
+                        ip_address = arp_entries[mac]['ip_address']
+                        hostvars['ansible_ssh_host'] = ip_address
+                        hostvars['libvirt_ip_address'] = ip_address
+
+            inventory['_meta']['hostvars'][domain_name] = hostvars
+
+        return inventory
+
+    def parse_arp_entries(self):
+        arp_entries = dict()
+        with open('/proc/net/arp', 'r') as f:
+            # throw away the header
+            f.readline()
+
+            for line in f:
+                ip_address, _, _, mac, _, device = line.strip().split()
+                arp_entries[mac] = dict(ip_address=ip_address, device=device)
+
+        return arp_entries
+
+    def push(self, my_dict, key, element):
+        if key in my_dict:
+            my_dict[key].append(element)
+        else:
+            my_dict[key] = [element]
+
+    def json_format_dict(self, data, pretty=False):
+        if pretty:
+            return json.dumps(data, sort_keys=True, indent=2)
+        else:
+            return json.dumps(data)
+
+LibvirtInventory()

+ 65 - 45
inventory/multi_ec2.py

@@ -1,37 +1,58 @@
 #!/usr/bin/env python2
 #!/usr/bin/env python2
+'''
+    Fetch and combine multiple ec2 account settings into a single
+    json hash.
+'''
 # vim: expandtab:tabstop=4:shiftwidth=4
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 
 from time import time
 from time import time
 import argparse
 import argparse
 import yaml
 import yaml
 import os
 import os
-import sys
-import pdb
 import subprocess
 import subprocess
 import json
 import json
-import pprint
 
 
 
 
+CONFIG_FILE_NAME = 'multi_ec2.yaml'
+
 class MultiEc2(object):
 class MultiEc2(object):
+    '''
+       MultiEc2 class:
+            Opens a yaml config file and reads aws credentials.
+            Stores a json hash of resources in result.
+    '''
 
 
     def __init__(self):
     def __init__(self):
+        self.args = None
         self.config = None
         self.config = None
         self.all_ec2_results = {}
         self.all_ec2_results = {}
         self.result = {}
         self.result = {}
         self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
         self.cache_path = os.path.expanduser('~/.ansible/tmp/multi_ec2_inventory.cache')
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-        self.config_file = os.path.join(self.file_path,"multi_ec2.yaml")
+
+        same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
+        etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
+
+        # Prefer a file in the same directory, fall back to a file in etc
+        if os.path.isfile(same_dir_config_file):
+            self.config_file = same_dir_config_file
+        elif os.path.isfile(etc_dir_config_file):
+            self.config_file = etc_dir_config_file
+        else:
+            self.config_file = None # expect env vars
+
         self.parse_cli_args()
         self.parse_cli_args()
 
 
         # load yaml
         # load yaml
-        if os.path.isfile(self.config_file):
+        if self.config_file and os.path.isfile(self.config_file):
             self.config = self.load_yaml_config()
             self.config = self.load_yaml_config()
-        elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+        elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
+             os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
             self.config = {}
             self.config = {}
             self.config['accounts'] = [
             self.config['accounts'] = [
                 {
                 {
                     'name': 'default',
                     'name': 'default',
-                    'provider': 'aws/ec2.py',
+                    'provider': 'aws/hosts/ec2.py',
                     'env_vars': {
                     'env_vars': {
                         'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
                         'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
                         'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
                         'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
@@ -43,13 +64,9 @@ class MultiEc2(object):
         else:
         else:
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
 
 
-        if self.args.cache_only:
-            # get data from disk
-            result = self.get_inventory_from_cache()
-
-            if not result:
-                self.get_inventory()
-                self.write_to_cache()
+        if self.args.refresh_cache:
+            self.get_inventory()
+            self.write_to_cache()
         # if its a host query, fetch and do not cache
         # if its a host query, fetch and do not cache
         elif self.args.host:
         elif self.args.host:
             self.get_inventory()
             self.get_inventory()
@@ -61,7 +78,7 @@ class MultiEc2(object):
             # get data from disk
             # get data from disk
             self.get_inventory_from_cache()
             self.get_inventory_from_cache()
 
 
-    def load_yaml_config(self,conf_file=None):
+    def load_yaml_config(self, conf_file=None):
         """Load a yaml config file with credentials to query the
         """Load a yaml config file with credentials to query the
         respective cloud for inventory.
         respective cloud for inventory.
         """
         """
@@ -75,7 +92,7 @@ class MultiEc2(object):
 
 
         return config
         return config
 
 
-    def get_provider_tags(self,provider, env={}):
+    def get_provider_tags(self, provider, env=None):
         """Call <provider> and query all of the tags that are usuable
         """Call <provider> and query all of the tags that are usuable
         by ansible.  If environment is empty use the default env.
         by ansible.  If environment is empty use the default env.
         """
         """
@@ -140,7 +157,8 @@ class MultiEc2(object):
                     self.all_ec2_results[result['name']] = json.loads(result['out'])
                     self.all_ec2_results[result['name']] = json.loads(result['out'])
             values = self.all_ec2_results.values()
             values = self.all_ec2_results.values()
             values.insert(0, self.result)
             values.insert(0, self.result)
-            [MultiEc2.merge_destructively(self.result, x) for x in  values]
+            for result in  values:
+                MultiEc2.merge_destructively(self.result, result)
         else:
         else:
             # For any 0 result, return it
             # For any 0 result, return it
             count = 0
             count = 0
@@ -152,30 +170,30 @@ class MultiEc2(object):
                     raise RuntimeError("Found > 1 results for --host %s. \
                     raise RuntimeError("Found > 1 results for --host %s. \
                                        This is an invalid state." % self.args.host)
                                        This is an invalid state." % self.args.host)
     @staticmethod
     @staticmethod
-    def merge_destructively(a, b):
-        "merges b into a"
-        for key in b:
-            if key in a:
-                if isinstance(a[key], dict) and isinstance(b[key], dict):
-                    MultiEc2.merge_destructively(a[key], b[key])
-                elif a[key] == b[key]:
+    def merge_destructively(input_a, input_b):
+        "merges b into input_a"
+        for key in input_b:
+            if key in input_a:
+                if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
+                    MultiEc2.merge_destructively(input_a[key], input_b[key])
+                elif input_a[key] == input_b[key]:
                     pass # same leaf value
                     pass # same leaf value
                 # both lists so add each element in b to a if it does ! exist
                 # both lists so add each element in b to a if it does ! exist
-                elif isinstance(a[key], list) and isinstance(b[key],list):
-                    for x in b[key]:
-                        if x not in a[key]:
-                            a[key].append(x)
+                elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
+                    for result in input_b[key]:
+                        if result not in input_a[key]:
+                            input_a[key].input_append(result)
                 # a is a list and not b
                 # a is a list and not b
-                elif isinstance(a[key], list):
-                    if b[key] not in a[key]:
-                        a[key].append(b[key])
-                elif isinstance(b[key], list):
-                    a[key] = [a[key]] + [k for k in b[key] if k != a[key]]
+                elif isinstance(input_a[key], list):
+                    if input_b[key] not in input_a[key]:
+                        input_a[key].append(input_b[key])
+                elif isinstance(input_b[key], list):
+                    input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
                 else:
                 else:
-                    a[key] = [a[key],b[key]]
+                    input_a[key] = [input_a[key], input_b[key]]
             else:
             else:
-                a[key] = b[key]
-        return a
+                input_a[key] = input_b[key]
+        return input_a
 
 
     def is_cache_valid(self):
     def is_cache_valid(self):
         ''' Determines if the cache files have expired, or if it is still valid '''
         ''' Determines if the cache files have expired, or if it is still valid '''
@@ -191,19 +209,20 @@ class MultiEc2(object):
     def parse_cli_args(self):
     def parse_cli_args(self):
         ''' Command line argument processing '''
         ''' Command line argument processing '''
 
 
-        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on a provider')
-        parser.add_argument('--cache-only', action='store_true', default=False,
-                           help='Fetch cached only instances (default: False)')
+        parser = argparse.ArgumentParser(
+            description='Produce an Ansible Inventory file based on a provider')
+        parser.add_argument('--refresh-cache', action='store_true', default=False,
+                            help='Fetch cached only instances (default: False)')
         parser.add_argument('--list', action='store_true', default=True,
         parser.add_argument('--list', action='store_true', default=True,
-                           help='List instances (default: True)')
+                            help='List instances (default: True)')
         parser.add_argument('--host', action='store', default=False,
         parser.add_argument('--host', action='store', default=False,
-                           help='Get all the variables about a specific instance')
+                            help='Get all the variables about a specific instance')
         self.args = parser.parse_args()
         self.args = parser.parse_args()
 
 
     def write_to_cache(self):
     def write_to_cache(self):
         ''' Writes data in JSON format to a file '''
         ''' Writes data in JSON format to a file '''
 
 
-        json_data = self.json_format_dict(self.result, True)
+        json_data = MultiEc2.json_format_dict(self.result, True)
         with open(self.cache_path, 'w') as cache:
         with open(self.cache_path, 'w') as cache:
             cache.write(json_data)
             cache.write(json_data)
 
 
@@ -219,7 +238,8 @@ class MultiEc2(object):
 
 
         return True
         return True
 
 
-    def json_format_dict(self, data, pretty=False):
+    @classmethod
+    def json_format_dict(cls, data, pretty=False):
         ''' Converts a dict to a JSON object and dumps it as a formatted
         ''' Converts a dict to a JSON object and dumps it as a formatted
         string '''
         string '''
 
 
@@ -229,9 +249,9 @@ class MultiEc2(object):
             return json.dumps(data)
             return json.dumps(data)
 
 
     def result_str(self):
     def result_str(self):
+        '''Return cache string stored in self.result'''
         return self.json_format_dict(self.result, True)
         return self.json_format_dict(self.result, True)
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
-    mi = MultiEc2()
-    print mi.result_str()
+    print MultiEc2().result_str()

+ 2 - 2
inventory/multi_ec2.yaml.example

@@ -1,13 +1,13 @@
 # multi ec2 inventory configs
 # multi ec2 inventory configs
 accounts:
 accounts:
   - name: aws1
   - name: aws1
-    provider: aws/ec2.py
+    provider: aws/hosts/ec2.py
     env_vars:
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 
 
   - name: aws2
   - name: aws2
-    provider: aws/ec2.py
+    provider: aws/hosts/ec2.py
     env_vars:
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

+ 50 - 0
inventory/openshift-ansible-inventory.spec

@@ -0,0 +1,50 @@
+Summary:       OpenShift Ansible Inventories
+Name:          openshift-ansible-inventory
+Version:       0.0.2
+Release:       1%{?dist}
+License:       ASL 2.0
+URL:           https://github.com/openshift/openshift-ansible
+Source0:       %{name}-%{version}.tar.gz
+Requires:      python2
+BuildRequires: python2-devel
+BuildArch:     noarch
+
+%description
+Ansible Inventories used with the openshift-ansible scripts and playbooks.
+
+%prep
+%setup -q
+
+%build
+
+%install
+mkdir -p %{buildroot}/etc/ansible
+mkdir -p %{buildroot}/usr/share/ansible/inventory
+mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
+mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
+
+cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
+cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p aws/ec2.py aws/ec2.ini %{buildroot}/usr/share/ansible/inventory/aws
+cp -p gce/gce.py %{buildroot}/usr/share/ansible/inventory/gce
+
+%files
+%config(noreplace) /etc/ansible/*
+%dir /usr/share/ansible/inventory
+/usr/share/ansible/inventory/multi_ec2.py*
+/usr/share/ansible/inventory/aws/ec2.py*
+%config(noreplace) /usr/share/ansible/inventory/aws/ec2.ini
+/usr/share/ansible/inventory/gce/gce.py*
+
+%changelog
+* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
+- added the ability to have a config file in /etc/openshift_ansible to
+  multi_ec2.py. (twiest@redhat.com)
+- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
+- gce inventory/playbook updates for node registration changes
+  (jdetiber@redhat.com)
+- Various fixes (jdetiber@redhat.com)
+
+* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
+- new package built with tito
+

+ 1 - 0
playbooks/adhoc/noc/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 41 - 0
playbooks/adhoc/noc/get_zabbix_problems.yml

@@ -0,0 +1,41 @@
+---
+- name: 'Get current hosts who have triggers that are alerting by trigger description'
+  hosts: localhost
+  gather_facts: no
+  roles:
+    - os_zabbix
+  post_tasks:
+    - assert:
+        that: oo_desc is defined
+
+    - zbxapi:
+        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+        zbx_class: Trigger
+        action: get
+        params:
+          only_true: true
+          output: extend
+          selectHosts: extend
+          searchWildCardsEnabled: 1
+          search:
+            description: "{{ oo_desc }}"
+      register: problems
+
+    - debug: var=problems
+
+    - set_fact:
+        problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
+
+    - debug: var=problem_hosts
+
+    - add_host:
+        name: "{{ item }}"
+        groups: problem_hosts_group
+      with_items: problem_hosts
+
+- name: "Run on problem hosts"
+  hosts: problem_hosts_group
+  gather_facts: no
+  tasks:
+    - command: "{{ oo_cmd }}"
+      when: oo_cmd is defined

+ 1 - 0
playbooks/adhoc/noc/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 1
playbooks/aws/ansible-tower/launch.yml

@@ -6,7 +6,7 @@
 
 
   vars:
   vars:
     inst_region: us-east-1
     inst_region: us-east-1
-    rhel7_ami: ami-a24e30ca
+    rhel7_ami: ami-906240f8
     user_data_file: user_data.txt
     user_data_file: user_data.txt
 
 
   vars_files:
   vars_files:

+ 36 - 0
playbooks/aws/openshift-cluster/config.yml

@@ -0,0 +1,36 @@
+---
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 1 - 0
playbooks/aws/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 30 - 0
playbooks/aws/openshift-cluster/launch.yml

@@ -0,0 +1,30 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
+  tasks:
+  - fail:
+      msg: Deployment type not supported for aws provider yet
+    when: deployment_type == 'enterprise'
+
+  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ master_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ node_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+- include: update.yml
+
+- include: list.yml

+ 302 - 0
playbooks/aws/openshift-cluster/library/ec2_ami_find.py

@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#pylint: skip-file
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_find
+version_added: 2.0
+short_description: Searches for AMIs to obtain the AMI ID and other information
+description:
+  - Returns list of matching AMIs with AMI ID, along with other useful information
+  - Can search AMIs with different owners
+  - Can search by matching tag(s), by AMI name and/or other criteria
+  - Results can be sorted and sliced
+author: Tom Bamford
+notes:
+  - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
+  - See the example below for a suggestion of how to search by distro/release.
+options:
+  region:
+    description:
+      - The AWS region to use.
+    required: true
+    aliases: [ 'aws_region', 'ec2_region' ]
+  owner:
+    description:
+      - Search AMIs owned by the specified owner
+      - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
+      - If not specified, all EC2 AMIs in the specified region will be searched.
+      - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
+    required: false
+    default: null
+  ami_id:
+    description:
+      - An AMI ID to match.
+    default: null
+    required: false
+  ami_tags:
+    description:
+      - A hash/dictionary of tags to match for the AMI.
+    default: null
+    required: false
+  architecture:
+    description:
+      - An architecture type to match (e.g. x86_64).
+    default: null
+    required: false
+  hypervisor:
+    description:
+      - A hypervisor type type to match (e.g. xen).
+    default: null
+    required: false
+  is_public:
+    description:
+      - Whether or not the image(s) are public.
+    choices: ['yes', 'no']
+    default: null
+    required: false
+  name:
+    description:
+      - An AMI name to match.
+    default: null
+    required: false
+  platform:
+    description:
+      - Platform type to match.
+    default: null
+    required: false
+  sort:
+    description:
+      - Optional attribute which with to sort the results.
+      - If specifying 'tag', the 'tag_name' parameter is required.
+    choices: ['name', 'description', 'tag']
+    default: null
+    required: false
+  sort_tag:
+    description:
+      - Tag name with which to sort results.
+      - Required when specifying 'sort=tag'.
+    default: null
+    required: false
+  sort_order:
+    description:
+      - Order in which to sort results.
+      - Only used when the 'sort' parameter is specified.
+    choices: ['ascending', 'descending']
+    default: 'ascending'
+    required: false
+  sort_start:
+    description:
+      - Which result to start with (when sorting).
+      - Corresponds to Python slice notation.
+    default: null
+    required: false
+  sort_end:
+    description:
+      - Which result to end with (when sorting).
+      - Corresponds to Python slice notation.
+    default: null
+    required: false
+  state:
+    description:
+      - AMI state to match.
+    default: 'available'
+    required: false
+  virtualization_type:
+    description:
+      - Virtualization type to match (e.g. hvm).
+    default: null
+    required: false
+  no_result_action:
+    description:
+      - What to do when no results are found.
+      - "'success' reports success and returns an empty array"
+      - "'fail' causes the module to report failure"
+    choices: ['success', 'fail']
+    default: 'success'
+    required: false
+requirements:
+  - boto
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the AMI tagged "project:website"
+- ec2_ami_find:
+    owner: self
+    tags:
+      project: website
+    no_result_action: fail
+  register: ami_find
+
+# Search for the latest Ubuntu 14.04 AMI
+- ec2_ami_find:
+    name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
+    owner: 099720109477
+    sort: name
+    sort_order: descending
+    sort_end: 1
+  register: ami_find
+
+# Launch an EC2 instance
+- ec2:
+    image: "{{ ami_search.results[0].ami_id }}"
+    instance_type: m3.medium
+    key_name: mykey
+    wait: yes
+'''
+
+try:
+    import boto.ec2
+    HAS_BOTO=True
+except ImportError:
+    HAS_BOTO=False
+
+import json
+
+def main():
+    argument_spec = ec2_argument_spec()
+    argument_spec.update(dict(
+            region = dict(required=True,
+                aliases = ['aws_region', 'ec2_region']),
+            owner = dict(required=False, default=None),
+            ami_id = dict(required=False),
+            ami_tags = dict(required=False, type='dict',
+                aliases = ['search_tags', 'image_tags']),
+            architecture = dict(required=False),
+            hypervisor = dict(required=False),
+            is_public = dict(required=False),
+            name = dict(required=False),
+            platform = dict(required=False),
+            sort = dict(required=False, default=None,
+                choices=['name', 'description', 'tag']),
+            sort_tag = dict(required=False),
+            sort_order = dict(required=False, default='ascending',
+                choices=['ascending', 'descending']),
+            sort_start = dict(required=False),
+            sort_end = dict(required=False),
+            state = dict(required=False, default='available'),
+            virtualization_type = dict(required=False),
+            no_result_action = dict(required=False, default='success',
+                choices = ['success', 'fail']),
+        )
+    )
+
+    module = AnsibleModule(
+        argument_spec=argument_spec,
+    )
+
+    if not HAS_BOTO:
+        module.fail_json(msg='boto required for this module, install via pip or your package manager')
+
+    ami_id = module.params.get('ami_id')
+    ami_tags = module.params.get('ami_tags')
+    architecture = module.params.get('architecture')
+    hypervisor = module.params.get('hypervisor')
+    is_public = module.params.get('is_public')
+    name = module.params.get('name')
+    owner = module.params.get('owner')
+    platform = module.params.get('platform')
+    sort = module.params.get('sort')
+    sort_tag = module.params.get('sort_tag')
+    sort_order = module.params.get('sort_order')
+    sort_start = module.params.get('sort_start')
+    sort_end = module.params.get('sort_end')
+    state = module.params.get('state')
+    virtualization_type = module.params.get('virtualization_type')
+    no_result_action = module.params.get('no_result_action')
+
+    filter = {'state': state}
+
+    if ami_id:
+        filter['image_id'] = ami_id
+    if ami_tags:
+        for tag in ami_tags:
+            filter['tag:'+tag] = ami_tags[tag]
+    if architecture:
+        filter['architecture'] = architecture
+    if hypervisor:
+        filter['hypervisor'] = hypervisor
+    if is_public:
+        filter['is_public'] = is_public
+    if name:
+        filter['name'] = name
+    if platform:
+        filter['platform'] = platform
+    if virtualization_type:
+        filter['virtualization_type'] = virtualization_type
+
+    ec2 = ec2_connect(module)
+
+    images_result = ec2.get_all_images(owners=owner, filters=filter)
+
+    if no_result_action == 'fail' and len(images_result) == 0:
+        module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
+
+    results = []
+    for image in images_result:
+        data = {
+            'ami_id': image.id,
+            'architecture': image.architecture,
+            'description': image.description,
+            'is_public': image.is_public,
+            'name': image.name,
+            'owner_id': image.owner_id,
+            'platform': image.platform,
+            'root_device_name': image.root_device_name,
+            'root_device_type': image.root_device_type,
+            'state': image.state,
+            'tags': image.tags,
+            'virtualization_type': image.virtualization_type,
+        }
+
+        if image.kernel_id:
+            data['kernel_id'] = image.kernel_id
+        if image.ramdisk_id:
+            data['ramdisk_id'] = image.ramdisk_id
+
+        results.append(data)
+
+    if sort == 'tag':
+        if not sort_tag:
+            module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
+        results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
+    elif sort:
+        results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+
+    try:
+        if sort and sort_start and sort_end:
+            results = results[int(sort_start):int(sort_end)]
+        elif sort and sort_start:
+            results = results[int(sort_start):]
+        elif sort and sort_end:
+            results = results[:int(sort_end)]
+    except TypeError:
+        module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
+
+    module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+    main()
+

+ 24 - 0
playbooks/aws/openshift-cluster/list.yml

@@ -0,0 +1,24 @@
+---
+- name: Generate oo_list_hosts group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - set_fact: scratch_group=tag_env_{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: cluster_id == ''
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_list_hosts
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+
+- name: List Hosts
+  hosts: oo_list_hosts
+  gather_facts: no
+  tasks:
+  - debug:
+      msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"

+ 1 - 0
playbooks/aws/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 132 - 0
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -0,0 +1,132 @@
+---
+- set_fact:
+    created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+    docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
+    env: "{{ cluster }}"
+    env_host_type: "{{ cluster }}-openshift-{{ type }}"
+    host_type: "{{ type }}"
+
+- set_fact:
+    ec2_region: "{{ lookup('env', 'ec2_region')
+                    | default(deployment_vars[deployment_type].region, true) }}"
+  when: ec2_region is not defined
+- set_fact:
+    ec2_image_name: "{{ lookup('env', 'ec2_image_name')
+                        | default(deployment_vars[deployment_type].image_name, true) }}"
+  when: ec2_image_name is not defined and ec2_image is not defined
+- set_fact:
+    ec2_image: "{{ lookup('env', 'ec2_image')
+                   | default(deployment_vars[deployment_type].image, true) }}"
+  when: ec2_image is not defined and not ec2_image_name
+- set_fact:
+    ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+                    | default(deployment_vars[deployment_type].type, true) }}"
+  when: ec2_instance_type is not defined
+- set_fact:
+    ec2_keypair: "{{ lookup('env', 'ec2_keypair')
+                    | default(deployment_vars[deployment_type].keypair, true) }}"
+  when: ec2_keypair is not defined
+- set_fact:
+    ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
+                    | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
+  when: ec2_vpc_subnet is not defined
+- set_fact:
+    ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
+                    | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
+  when: ec2_assign_public_ip is not defined
+- set_fact:
+    ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
+                    | default(deployment_vars[deployment_type].security_groups, true) }}"
+  when: ec2_security_groups is not defined
+
+- name: Find amis for deployment_type
+  ec2_ami_find:
+    region: "{{ ec2_region }}"
+    ami_id: "{{ ec2_image | default(omit, true) }}"
+    name: "{{ ec2_image_name | default(omit, true) }}"
+  register: ami_result
+
+- fail: msg="Could not find requested ami"
+  when: not ami_result.results
+
+- set_fact:
+    latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+    user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}"
+    volume_defs:
+      master:
+        root:
+          volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
+          device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
+      node:
+        root:
+          volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+          device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
+        docker:
+          volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
+          device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
+
+- set_fact:
+    volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
+
+- name: Launch instance(s)
+  ec2:
+    state: present
+    region: "{{ ec2_region }}"
+    keypair: "{{ ec2_keypair }}"
+    group: "{{ ec2_security_groups }}"
+    instance_type: "{{ ec2_instance_type }}"
+    image: "{{ latest_ami }}"
+    count: "{{ instances | oo_len }}"
+    vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
+    assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+    user_data: "{{ user_data }}"
+    wait: yes
+    instance_tags:
+      created-by: "{{ created_by }}"
+      env: "{{ env }}"
+      host-type: "{{ host_type }}"
+      env-host-type: "{{ env_host_type }}"
+    volumes: "{{ volumes }}"
+  register: ec2
+
+- name: Add Name tag to instances
+  ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+  with_together:
+  - instances
+  - ec2.instances
+  args:
+    tags:
+      Name: "{{ item.0 }}"
+
+- set_fact:
+    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+  add_host:
+    hostname: "{{ item.0 }}"
+    ansible_ssh_host: "{{ item.1.dns_name }}"
+    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    groups: "{{ instance_groups }}"
+    ec2_private_ip_address: "{{ item.1.private_ip }}"
+    ec2_ip_address: "{{ item.1.public_ip }}"
+  with_together:
+  - instances
+  - ec2.instances
+
+- name: Wait for ssh
+  wait_for: "port=22 host={{ item.dns_name }}"
+  with_items: ec2.instances
+
+- name: Wait for user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 10
+  with_together:
+  - instances
+  - ec2.instances

+ 29 - 0
playbooks/aws/openshift-cluster/templates/user_data.j2

@@ -0,0 +1,29 @@
+#cloud-config
+yum_repos:
+  jdetiber-copr:
+    name: Copr repo for origin owned by jdetiber
+    baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/
+    skip_if_unavailable: true
+    gpgcheck: true
+    gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg
+    enabled: true
+
+packages:
+- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8
+- docker-storage-setup
+
+mounts:
+- [ xvdb ]
+- [ ephemeral0 ]
+
+write_files:
+- content: |
+    DEVS=/dev/xvdb
+    VG=docker_vg
+  path: /etc/sysconfig/docker-storage-setup
+  owner: root:root
+  permissions: '0644'
+
+runcmd:
+- systemctl daemon-reload
+- systemctl enable lvm2-lvmetad.service docker-storage-setup.service

+ 16 - 0
playbooks/aws/openshift-cluster/terminate.yml

@@ -0,0 +1,16 @@
+---
+- name: Terminate instance(s)
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - set_fact: scratch_group=tag_env_{{ cluster_id }}
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost'])
+
+- include: ../terminate.yml

+ 18 - 0
playbooks/aws/openshift-cluster/update.yml

@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml

+ 1 - 0
playbooks/aws/openshift-cluster/vars.defaults.yml

@@ -0,0 +1 @@
+---

+ 9 - 0
playbooks/aws/openshift-cluster/vars.online.int.yml

@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes

+ 9 - 0
playbooks/aws/openshift-cluster/vars.online.prod.yml

@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes

+ 9 - 0
playbooks/aws/openshift-cluster/vars.online.stage.yml

@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes

+ 38 - 0
playbooks/aws/openshift-cluster/vars.yml

@@ -0,0 +1,38 @@
+---
+deployment_vars:
+  origin:
+    # fedora, since centos requires marketplace
+    image: ami-acd999c4
+    image_name:
+    region: us-east-1
+    ssh_user: fedora
+    sudo: yes
+    keypair: libra
+    type: m3.large
+    security_groups: [ 'public' ]
+    vpc_subnet:
+    assign_public_ip:
+  online:
+    # private ami
+    image: ami-7a9e9812
+    image_name: openshift-rhel7_*
+    region: us-east-1
+    ssh_user: root
+    sudo: no
+    keypair: libra
+    type: m3.large
+    security_groups: [ 'public' ]
+    vpc_subnet:
+    assign_public_ip:
+  enterprise:
+    # rhel-7.1, requires cloud access subscription
+    image: ami-10663b78
+    image_name:
+    region: us-east-1
+    ssh_user: ec2-user
+    sudo: yes
+    keypair: libra
+    type: m3.large
+    security_groups: [ 'public' ]
+    vpc_subnet:
+    assign_public_ip:

+ 14 - 37
playbooks/aws/openshift-master/config.yml

@@ -1,42 +1,19 @@
 ---
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_masters_to_config host group
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
 
 
-- name: "Gather facts for nodes in {{ oo_env }}"
-  hosts: "tag_env-host-type_{{ oo_env }}-openshift-node"
-  connection: ssh
-  user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_node_ips fact on localhost
-      set_fact:
-        openshift_node_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-node'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type_' + oo_env + '-openshift-node'] is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
-  vars_files:
-    - vars.yml
-  roles:
-    - repos
-    - {
-        role: openshift_master,
-        openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
-        openshift_env: "{{ oo_env }}"
-        openshift_public_ip: "{{ ec2_ip_address }}"
-      }
-    - pods
-    - os_env_extras
+- include: ../../common/openshift-master/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 10 - 9
playbooks/aws/openshift-master/launch.yml

@@ -4,14 +4,12 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
 
 
+# TODO: modify atomic_ami based on deployment_type
   vars:
   vars:
     inst_region: us-east-1
     inst_region: us-east-1
     atomic_ami: ami-86781fee
     atomic_ami: ami-86781fee
     user_data_file: user_data.txt
     user_data_file: user_data.txt
 
 
-  vars_files:
-    - vars.yml
-
   tasks:
   tasks:
     - name: Launch instances
     - name: Launch instances
       ec2:
       ec2:
@@ -40,21 +38,24 @@
           Name: "{{ item.0 }}"
           Name: "{{ item.0 }}"
 
 
     - name: Add other tags to instances
     - name: Add other tags to instances
-      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
       with_items: ec2.instances
       with_items: ec2.instances
       args:
       args:
         tags: "{{ oo_new_inst_tags }}"
         tags: "{{ oo_new_inst_tags }}"
 
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_masters_to_config
+      add_host:
+        hostname: "{{ item.0 }}"
+        ansible_ssh_host: "{{ item.1.dns_name }}"
+        groupname: oo_masters_to_config
+        ec2_private_ip_address: "{{ item.1.private_ip }}"
+        ec2_ip_address: "{{ item.1.public_ip }}"
       with_together:
       with_together:
         - oo_new_inst_names
         - oo_new_inst_names
         - ec2.instances
         - ec2.instances
 
 
-    - debug: var=ec2
-
     - name: Wait for ssh
     - name: Wait for ssh
-      wait_for: "port=22 host={{ item.dns_name }}"
+      wait_for: port=22 host={{ item.dns_name }}
       with_items: ec2.instances
       with_items: ec2.instances
 
 
     - name: Wait for root user setup
     - name: Wait for root user setup

+ 2 - 0
playbooks/aws/openshift-master/terminate.yml

@@ -0,0 +1,2 @@
+---
+- include: ../terminate.yml

+ 0 - 2
playbooks/aws/openshift-master/vars.yml

@@ -1,2 +0,0 @@
----
-openshift_debug_level: 4

+ 19 - 43
playbooks/aws/openshift-node/config.yml

@@ -1,49 +1,25 @@
 ---
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_nodes_to_config and oo_first_master host groups
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: root
 
 
-- name: "Gather facts for masters in {{ oo_env }}"
-  hosts: "tag_env-host-type_{{ oo_env }}-openshift-master"
-  connection: ssh
-  user: root
 
 
-- name: "Set OO sepcific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_master_ips fact on localhost
-      set_fact:
-        openshift_master_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type_' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type_' + oo_env + '-openshift-master'] is defined
-    - name: Setting openshift_master_public_ips fact on localhost
-      set_fact:
-        openshift_master_public_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ec2_ip_address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
-  vars_files:
-    - vars.yml
-  roles:
-    - repos
-    - docker
-    - {
-        role: openshift_node,
-        openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
-        openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
-        openshift_env: "{{ oo_env }}"
-        openshift_public_ip: "{{ ec2_ip_address }}"
-      }
-    - os_env_extras
+- include: ../../common/openshift-node/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 14 - 11
playbooks/aws/openshift-node/launch.yml

@@ -4,14 +4,12 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
 
 
+# TODO: modify atomic_ami based on deployment_type
   vars:
   vars:
     inst_region: us-east-1
     inst_region: us-east-1
     atomic_ami: ami-86781fee
     atomic_ami: ami-86781fee
     user_data_file: user_data.txt
     user_data_file: user_data.txt
 
 
-  vars_files:
-    - vars.yml
-
   tasks:
   tasks:
     - name: Launch instances
     - name: Launch instances
       ec2:
       ec2:
@@ -27,11 +25,13 @@
       register: ec2
       register: ec2
 
 
     - name: Add new instances public IPs to the atomic proxy host group
     - name: Add new instances public IPs to the atomic proxy host group
-      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
+      add_host:
+        hostname: "{{ item.public_ip }}"
+        groupname: new_ec2_instances"
       with_items: ec2.instances
       with_items: ec2.instances
 
 
     - name: Add Name and environment tags to instances
     - name: Add Name and environment tags to instances
-      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+      ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
       with_together:
       with_together:
         - oo_new_inst_names
         - oo_new_inst_names
         - ec2.instances
         - ec2.instances
@@ -40,21 +40,24 @@
           Name: "{{ item.0 }}"
           Name: "{{ item.0 }}"
 
 
     - name: Add other tags to instances
     - name: Add other tags to instances
-      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
       with_items: ec2.instances
       with_items: ec2.instances
       args:
       args:
         tags: "{{ oo_new_inst_tags }}"
         tags: "{{ oo_new_inst_tags }}"
 
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.0 }} ansible_ssh_host={{ item.1.dns_name }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_nodes_to_config
+      add_host:
+        hostname: "{{ item.0 }}"
+        ansible_ssh_host: "{{ item.1.dns_name }}"
+        groupname: oo_nodes_to_config
+        ec2_private_ip_address: "{{ item.1.private_ip }}"
+        ec2_ip_address: "{{ item.1.public_ip }}"
       with_together:
       with_together:
         - oo_new_inst_names
         - oo_new_inst_names
         - ec2.instances
         - ec2.instances
 
 
-    - debug: var=ec2
-
     - name: Wait for ssh
     - name: Wait for ssh
-      wait_for: "port=22 host={{ item.dns_name }}"
+      wait_for: port=22 host={{ item.dns_name }}
       with_items: ec2.instances
       with_items: ec2.instances
 
 
     - name: Wait for root user setup
     - name: Wait for root user setup

+ 2 - 0
playbooks/aws/openshift-node/terminate.yml

@@ -0,0 +1,2 @@
+---
+- include: ../terminate.yml

+ 0 - 2
playbooks/aws/openshift-node/vars.yml

@@ -1,2 +0,0 @@
----
-openshift_debug_level: 4

+ 64 - 0
playbooks/aws/terminate.yml

@@ -0,0 +1,64 @@
+---
+- name: Populate oo_hosts_to_terminate host group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Evaluate oo_hosts_to_terminate
+      add_host: name={{ item }} groups=oo_hosts_to_terminate
+      with_items: oo_host_group_exp | default([])
+
+- name: Gather dynamic inventory variables for hosts to terminate
+  hosts: oo_hosts_to_terminate
+  gather_facts: no
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+        | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+  tasks:
+    - name: Remove tags from instances
+      ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+      args:
+        tags:
+          env: "{{ item['ec2_tag_env'] }}"
+          host-type: "{{ item['ec2_tag_host-type'] }}"
+          env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+      when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: item.failed
+      with_items: ec2_term.results
+      when: "'oo_hosts_to_terminate' in groups"
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+      when: "'oo_hosts_to_terminate' in groups"

+ 6 - 0
playbooks/byo/config.yml

@@ -0,0 +1,6 @@
+---
+- name: Run the openshift-master config playbook
+  include: openshift-master/config.yml
+
+- name: Run the openshift-node config playbook
+  include: openshift-node/config.yml

+ 1 - 0
playbooks/byo/filter_plugins

@@ -0,0 +1 @@
+../../filter_plugins

+ 15 - 0
playbooks/byo/openshift-master/config.yml

@@ -0,0 +1,15 @@
+---
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+    with_items: groups['masters']
+
+- include: ../../common/openshift-master/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"

+ 1 - 0
playbooks/byo/openshift-master/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/byo/openshift-master/roles

@@ -0,0 +1 @@
+../../../roles

+ 21 - 0
playbooks/byo/openshift-node/config.yml

@@ -0,0 +1,21 @@
+---
+- name: Populate oo_nodes_to_config and oo_first_master host groups
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+    with_items: groups.nodes
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups.masters[0] }}"
+      groups: oo_first_master
+
+
+- include: ../../common/openshift-node/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"

+ 1 - 0
playbooks/byo/openshift-node/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/byo/openshift-node/roles

@@ -0,0 +1 @@
+../../../roles

+ 10 - 0
playbooks/byo/openshift_facts.yml

@@ -0,0 +1,10 @@
+---
+- name: Gather OpenShift facts
+  hosts: all
+  gather_facts: no
+  roles:
+  - openshift_facts
+  tasks:
+  - openshift_facts:
+    register: result
+  - debug: var=result

+ 1 - 0
playbooks/byo/roles

@@ -0,0 +1 @@
+../../roles

+ 4 - 0
playbooks/common/openshift-cluster/config.yml

@@ -0,0 +1,4 @@
+---
+- include: ../openshift-master/config.yml
+
+- include: ../openshift-node/config.yml

+ 1 - 0
playbooks/common/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 11 - 0
playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml

@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="master"
+
+- name: Generate master instance names(s)
+  set_fact:
+    scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+  register: master_names_output
+  with_sequence: start=1 end={{ num_masters }}
+
+- set_fact:
+    master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"

+ 11 - 0
playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml

@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="node"
+
+- name: Generate node instance names(s)
+  set_fact:
+    scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+  register: node_names_output
+  with_sequence: start=1 end={{ num_nodes }}
+
+- set_fact:
+    node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"

+ 7 - 0
playbooks/common/openshift-cluster/update_repos_and_packages.yml

@@ -0,0 +1,7 @@
+---
+- hosts: oo_hosts_to_update
+  vars:
+    openshift_deployment_type: "{{ deployment_type }}"
+  roles:
+  - openshift_repos
+  - os_update_latest

+ 19 - 0
playbooks/common/openshift-master/config.yml

@@ -0,0 +1,19 @@
+---
+- name: Configure master instances
+  hosts: oo_masters_to_config
+  vars:
+    openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001
+  roles:
+  - openshift_master
+  - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool }
+  tasks:
+  - name: Create group for deployment type
+    group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
+    changed_when: False
+
+# Additional instance config for online deployments
+- name: Additional instance config
+  hosts: oo_masters_deployment_type_online
+  roles:
+  - pods
+  - os_env_extras

+ 1 - 0
playbooks/common/openshift-master/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-master/roles

@@ -0,0 +1 @@
+../../../roles/

+ 127 - 0
playbooks/common/openshift-node/config.yml

@@ -0,0 +1,127 @@
+---
+- name: Gather and set facts for node hosts
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+      - role: common
+        local_facts:
+          hostname: "{{ openshift_hostname | default(None) }}"
+          public_hostname: "{{ openshift_public_hostname | default(None) }}"
+      - role: node
+        local_facts:
+          external_id: "{{ openshift_node_external_id | default(None) }}"
+          resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
+          resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
+          pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
+          labels: "{{ openshift_node_labels | default(None) }}"
+          annotations: "{{ openshift_node_annotations | default(None) }}"
+          deployment_type: "{{ openshift_deployment_type }}"
+
+
+- name: Create temp directory for syncing certs
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Create local temp directory for syncing certs
+    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
+    changed_when: False
+
+
+- name: Register nodes
+  hosts: oo_first_master
+  vars:
+    openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+  roles:
+  - openshift_register_nodes
+  tasks:
+  # TODO: update so that we only sync necessary configs/directories, currently
+  # we sync for all nodes in oo_nodes_to_config.  We will need to inspect the
+  # configs on the nodes to make the determination on whether to sync or not.
+  - name: Create the temp directory on the master
+    file:
+      path: "{{ sync_tmpdir }}"
+      owner: "{{ ansible_ssh_user }}"
+      mode: 0700
+      state: directory
+    changed_when: False
+
+  - name: Create a tarball of the node config directories
+    command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./
+    args:
+      chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
+    with_items: openshift_nodes
+    changed_when: False
+
+  - name: Retrieve the node config tarballs from the master
+    fetch:
+      src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
+      dest: "{{ sync_tmpdir }}/"
+      flat: yes
+      fail_on_missing: yes
+      validate_checksum: yes
+    with_items: openshift_nodes
+    changed_when: False
+
+
+- name: Configure node instances
+  hosts: oo_nodes_to_config
+  gather_facts: no
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+    openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
+  pre_tasks:
+  - name: Ensure certificate directory exists
+    file:
+      path: "{{ openshift_node_cert_dir }}"
+      state: directory
+
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  - name: Unarchive the tarball on the node
+    unarchive:
+      src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz"
+      dest: "{{ openshift_node_cert_dir }}"
+  roles:
+  - openshift_node
+  - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool }
+  tasks:
+  - name: Create group for deployment type
+    group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+    changed_when: False
+
+- name: Delete the temporary directory on the master
+  hosts: oo_first_master
+  gather_facts: no
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+  tasks:
+  - file: name={{ sync_tmpdir }} state=absent
+    changed_when: False
+
+
+- name: Delete temporary directory on localhost
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - file: name={{ mktemp.stdout }} state=absent
+    changed_when: False
+
+
+# Additional config for online type deployments
+- name: Additional instance config
+  hosts: oo_nodes_deployment_type_online
+  gather_facts: no
+  roles:
+  - os_env_extras
+  - os_env_extras_node

+ 1 - 0
playbooks/common/openshift-node/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-node/roles

@@ -0,0 +1 @@
+../../../roles/

+ 37 - 0
playbooks/gce/openshift-cluster/config.yml

@@ -0,0 +1,37 @@
+---
+# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
+# /etc/sysconfig/iptables
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ gce_private_ip }}"

+ 1 - 0
playbooks/gce/openshift-cluster/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 28 - 0
playbooks/gce/openshift-cluster/launch.yml

@@ -0,0 +1,28 @@
+---
+- name: Launch instance(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - fail: msg="Deployment type not supported for gce provider yet"
+    when: deployment_type == 'enterprise'
+
+  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ master_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ node_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+- include: update.yml
+
+- include: list.yml

+ 24 - 0
playbooks/gce/openshift-cluster/list.yml

@@ -0,0 +1,24 @@
+---
+- name: Generate oo_list_hosts group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: cluster_id == ''
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_list_hosts
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+- name: List Hosts
+  hosts: oo_list_hosts
+  gather_facts: no
+  tasks:
+  - debug:
+      msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"

+ 1 - 0
playbooks/gce/openshift-cluster/roles

@@ -0,0 +1 @@
+../../../roles

+ 41 - 0
playbooks/gce/openshift-cluster/tasks/launch_instances.yml

@@ -0,0 +1,41 @@
+---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+- name: Launch instance(s)
+  gce:
+    instance_names: "{{ instances }}"
+    machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
+    image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
+    service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    project_id: "{{ lookup('env', 'gce_project_id') }}"
+    tags:
+      - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+      - env-{{ cluster }}
+      - host-type-{{ type }}
+      - env-host-type-{{ cluster }}-openshift-{{ type }}
+  register: gce
+
+- name: Add new instances to groups and set variables needed
+  add_host:
+    hostname: "{{ item.name }}"
+    ansible_ssh_host: "{{ item.public_ip }}"
+    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
+    gce_public_ip: "{{ item.public_ip }}"
+    gce_private_ip: "{{ item.private_ip }}"
+  with_items: gce.instance_data
+
+- name: Wait for ssh
+  wait_for: port=22 host={{ item.public_ip }}
+  with_items: gce.instance_data
+
+- name: Wait for user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 10
+  with_items: gce.instance_data

+ 34 - 0
playbooks/gce/openshift-cluster/terminate.yml

@@ -0,0 +1,34 @@
+---
+- name: Terminate instance(s)
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+- include: ../openshift-node/terminate.yml
+  vars:
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+
+- include: ../openshift-master/terminate.yml
+  vars:
+    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"

+ 18 - 0
playbooks/gce/openshift-cluster/update.yml

@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml

+ 15 - 0
playbooks/gce/openshift-cluster/vars.yml

@@ -0,0 +1,15 @@
+---
+deployment_vars:
+  origin:
+    image: centos-7
+    ssh_user:
+    sudo: yes
+  online:
+    image: libra-rhel7
+    ssh_user: root
+    sudo: no
+  enterprise:
+    image: rhel-7
+    ssh_user:
+    sudo: yes
+

+ 13 - 37
playbooks/gce/openshift-master/config.yml

@@ -1,42 +1,18 @@
 ---
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_masters_to_config host group
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
 
 
-- name: "Gather facts for nodes in {{ oo_env }}"
-  hosts: "tag_env-host-type-{{ oo_env }}-openshift-node"
-  connection: ssh
-  user: root
-
-- name: "Set Origin specific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_node_ips fact on localhost
-      set_fact:
-        openshift_node_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-node'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-node'] is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
-  vars_files:
-    - vars.yml
-  roles:
-    - repos
-    - {
-        role: openshift_master,
-        openshift_node_ips: "{{ hostvars['localhost'].openshift_node_ips | default(['']) }}",
-        openshift_public_ip: "{{ gce_public_ip }}",
-        openshift_env: "{{ oo_env }}",
-      }
-    - pods
-    - os_env_extras
+- include: ../../common/openshift-master/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ gce_private_ip }}"

+ 12 - 8
playbooks/gce/openshift-master/launch.yml

@@ -1,17 +1,19 @@
 ---
 ---
+# TODO: when we are ready to go to ansible 1.9+ support only, we can update to
+# the gce task to use the disk_auto_delete parameter to avoid having to delete
+# the disk as a separate step on termination
+
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
 
 
+# TODO: modify image based on deployment_type
   vars:
   vars:
     inst_names: "{{ oo_new_inst_names }}"
     inst_names: "{{ oo_new_inst_names }}"
     machine_type: n1-standard-1
     machine_type: n1-standard-1
     image: libra-rhel7
     image: libra-rhel7
 
 
-  vars_files:
-      - vars.yml
-
   tasks:
   tasks:
     - name: Launch instances
     - name: Launch instances
       gce:
       gce:
@@ -24,16 +26,18 @@
         tags: "{{ oo_new_inst_tags }}"
         tags: "{{ oo_new_inst_tags }}"
       register: gce
       register: gce
 
 
-    - name: Add new instances public IPs to oo_hosts_to_config
-      add_host: "hostname={{ item.name }} ansible_ssh_host={{ item.public_ip }} groupname=oo_hosts_to_config"
+    - name: Add new instances public IPs to oo_masters_to_config
+      add_host:
+        hostname: "{{ item.name }}"
+        ansible_ssh_host: "{{ item.public_ip }}"
+        groupname: oo_masters_to_config
+        gce_private_ip: "{{ item.private_ip }}"
       with_items: gce.instance_data
       with_items: gce.instance_data
 
 
     - name: Wait for ssh
     - name: Wait for ssh
-      wait_for: "port=22 host={{ item.public_ip }}"
+      wait_for: port=22 host={{ item.public_ip }}
       with_items: gce.instance_data
       with_items: gce.instance_data
 
 
-    - debug: var=gce
-
     - name: Wait for root user setup
     - name: Wait for root user setup
       command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
       command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
       register: result
       register: result

+ 11 - 17
playbooks/gce/openshift-master/terminate.yml

@@ -1,20 +1,16 @@
-- name: "populate oo_hosts_to_terminate host group if needed"
+---
+- name: Populate oo_masters_to_terminate host group if needed
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-    - debug: var=oo_host_group_exp
+    - name: Evaluate oo_masters_to_terminate
+      add_host: name={{ item }} groups=oo_masters_to_terminate
+      with_items: oo_host_group_exp | default([])
 
 
-    - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_hosts_to_terminate"
-      with_items: "{{ oo_host_group_exp | default('') }}"
-      when: oo_host_group_exp is defined
-
-    - debug: msg="{{ groups['oo_hosts_to_terminate'] }}"
-
-
-- name: Terminate instances
+- name: Terminate master instances
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
+  gather_facts: no
   tasks:
   tasks:
     - name: Terminate master instances
     - name: Terminate master instances
       gce:
       gce:
@@ -22,11 +18,10 @@
         pem_file: "{{ gce_pem_file }}"
         pem_file: "{{ gce_pem_file }}"
         project_id: "{{ gce_project_id }}"
         project_id: "{{ gce_project_id }}"
         state: 'absent'
         state: 'absent'
-        instance_names: "{{ groups['oo_hosts_to_terminate'] }}"
-        disks: "{{ groups['oo_hosts_to_terminate'] }}"
+        instance_names: "{{ groups['oo_masters_to_terminate'] }}"
+        disks: "{{ groups['oo_masters_to_terminate'] }}"
       register: gce
       register: gce
-
-    - debug: var=gce
+      when: "'oo_masters_to_terminate' in groups"
 
 
     - name: Remove disks of instances
     - name: Remove disks of instances
       gce_pd:
       gce_pd:
@@ -37,5 +32,4 @@
         zone: "{{ gce.zone }}"
         zone: "{{ gce.zone }}"
         state: absent
         state: absent
       with_items: gce.instance_names
       with_items: gce.instance_names
-
-
+      when: "'oo_masters_to_terminate' in groups"

+ 0 - 2
playbooks/gce/openshift-master/vars.yml

@@ -1,2 +0,0 @@
----
-openshift_debug_level: 4

+ 18 - 43
playbooks/gce/openshift-node/config.yml

@@ -1,49 +1,24 @@
 ---
 ---
-- name: "populate oo_hosts_to_config host group if needed"
+- name: Populate oo_nodes_to_config and oo_first_master host groups
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_hosts_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: root
 
 
-- name: "Gather facts for masters in {{ oo_env }}"
-  hosts: "tag_env-host-type-{{ oo_env }}-openshift-master"
-  connection: ssh
-  user: root
 
 
-- name: "Set OO sepcific facts on localhost (for later use)"
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Setting openshift_master_ips fact on localhost
-      set_fact:
-        openshift_master_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='ansible_default_ipv4.address') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-    - name: Setting openshift_master_public_ips fact on localhost
-      set_fact:
-        openshift_master_public_ips: "{{ hostvars
-            | oo_select_keys(groups['tag_env-host-type-' + oo_env + '-openshift-master'])
-            | oo_collect(attribute='gce_public_ip') }}"
-      when: groups['tag_env-host-type-' + oo_env + '-openshift-master'] is defined
-
-- name: "Configure instances"
-  hosts: oo_hosts_to_config
-  connection: ssh
-  user: root
-  vars_files:
-    - vars.yml
-  roles:
-    - repos
-    - docker
-    - {
-        role: openshift_node,
-        openshift_master_ips: "{{ hostvars['localhost'].openshift_master_ips | default(['']) }}",
-        openshift_master_public_ips: "{{ hostvars['localhost'].openshift_master_public_ips | default(['']) }}",
-        openshift_public_ip: "{{ gce_public_ip }}",
-        openshift_env: "{{ oo_env }}",
-      }
-    - os_env_extras
+- include: ../../common/openshift-node/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ gce_private_ip }}"

+ 0 - 0
playbooks/gce/openshift-node/launch.yml


Some files were not shown because too many files changed in this diff