Browse Source

Merge pull request #187 from openshift/master

Merge master into INT
Wesley Hearn 10 years ago
parent
commit
196d37e2ff
100 changed files with 2619 additions and 1157 deletions
  1. 20 1
      README_AWS.md
  2. 145 46
      README_OSE.md
  3. 58 20
      README_libvirt.md
  4. 71 20
      bin/cluster
  5. 104 21
      filter_plugins/oo_filters.py
  6. 390 0
      git/.pylintrc
  7. 45 0
      git/parent.rb
  8. 14 0
      git/pylint.sh
  9. 72 0
      git/yaml_validation.rb
  10. 0 2
      inventory/aws/group_vars/all
  11. 0 0
      inventory/aws/hosts/ec2.ini
  12. 0 0
      inventory/aws/hosts/ec2.py
  13. 1 0
      inventory/aws/hosts/hosts
  14. 0 28
      inventory/byo/group_vars/all
  15. 25 1
      inventory/byo/hosts
  16. 0 2
      inventory/gce/group_vars/all
  17. 0 0
      inventory/gce/hosts/gce.py
  18. 1 0
      inventory/gce/hosts/hosts
  19. 0 2
      inventory/libvirt/group_vars/all
  20. 0 2
      inventory/libvirt/hosts
  21. 1 0
      inventory/libvirt/hosts/hosts
  22. 20 0
      inventory/libvirt/hosts/libvirt.ini
  23. 179 0
      inventory/libvirt/hosts/libvirt_generic.py
  24. 51 44
      inventory/multi_ec2.py
  25. 2 2
      inventory/multi_ec2.yaml.example
  26. 36 0
      playbooks/aws/openshift-cluster/config.yml
  27. 21 53
      playbooks/aws/openshift-cluster/launch.yml
  28. 0 63
      playbooks/aws/openshift-cluster/launch_instances.yml
  29. 302 0
      playbooks/aws/openshift-cluster/library/ec2_ami_find.py
  30. 11 4
      playbooks/aws/openshift-cluster/list.yml
  31. 132 0
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  32. 29 0
      playbooks/aws/openshift-cluster/templates/user_data.j2
  33. 11 9
      playbooks/aws/openshift-cluster/terminate.yml
  34. 15 10
      playbooks/aws/openshift-cluster/update.yml
  35. 1 0
      playbooks/aws/openshift-cluster/vars.defaults.yml
  36. 9 0
      playbooks/aws/openshift-cluster/vars.online.int.yml
  37. 9 0
      playbooks/aws/openshift-cluster/vars.online.prod.yml
  38. 9 0
      playbooks/aws/openshift-cluster/vars.online.stage.yml
  39. 37 0
      playbooks/aws/openshift-cluster/vars.yml
  40. 11 16
      playbooks/aws/openshift-master/config.yml
  41. 3 5
      playbooks/aws/openshift-master/launch.yml
  42. 1 51
      playbooks/aws/openshift-master/terminate.yml
  43. 0 3
      playbooks/aws/openshift-master/vars.yml
  44. 14 96
      playbooks/aws/openshift-node/config.yml
  45. 4 6
      playbooks/aws/openshift-node/launch.yml
  46. 1 51
      playbooks/aws/openshift-node/terminate.yml
  47. 0 3
      playbooks/aws/openshift-node/vars.yml
  48. 64 0
      playbooks/aws/terminate.yml
  49. 13 7
      playbooks/byo/openshift-master/config.yml
  50. 16 74
      playbooks/byo/openshift-node/config.yml
  51. 10 0
      playbooks/byo/openshift_facts.yml
  52. 4 0
      playbooks/common/openshift-cluster/config.yml
  53. 0 0
      playbooks/common/openshift-cluster/filter_plugins
  54. 0 0
      playbooks/common/openshift-cluster/roles
  55. 11 0
      playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml
  56. 11 0
      playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml
  57. 7 0
      playbooks/common/openshift-cluster/update_repos_and_packages.yml
  58. 19 0
      playbooks/common/openshift-master/config.yml
  59. 0 0
      playbooks/common/openshift-master/filter_plugins
  60. 1 0
      playbooks/common/openshift-master/roles
  61. 127 0
      playbooks/common/openshift-node/config.yml
  62. 1 0
      playbooks/common/openshift-node/filter_plugins
  63. 1 0
      playbooks/common/openshift-node/roles
  64. 37 0
      playbooks/gce/openshift-cluster/config.yml
  65. 19 53
      playbooks/gce/openshift-cluster/launch.yml
  66. 11 4
      playbooks/gce/openshift-cluster/list.yml
  67. 11 14
      playbooks/gce/openshift-cluster/launch_instances.yml
  68. 18 4
      playbooks/gce/openshift-cluster/terminate.yml
  69. 15 10
      playbooks/gce/openshift-cluster/update.yml
  70. 14 0
      playbooks/gce/openshift-cluster/vars.yml
  71. 11 13
      playbooks/gce/openshift-master/config.yml
  72. 2 4
      playbooks/gce/openshift-master/launch.yml
  73. 5 6
      playbooks/gce/openshift-master/terminate.yml
  74. 0 3
      playbooks/gce/openshift-master/vars.yml
  75. 15 91
      playbooks/gce/openshift-node/config.yml
  76. 2 4
      playbooks/gce/openshift-node/launch.yml
  77. 5 6
      playbooks/gce/openshift-node/terminate.yml
  78. 0 3
      playbooks/gce/openshift-node/vars.yml
  79. 38 0
      playbooks/libvirt/openshift-cluster/config.yml
  80. 26 55
      playbooks/libvirt/openshift-cluster/launch.yml
  81. 15 35
      playbooks/libvirt/openshift-cluster/list.yml
  82. 6 0
      playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml
  83. 27 0
      playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml
  84. 23 0
      playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml
  85. 34 29
      playbooks/libvirt/openshift-cluster/launch_instances.yml
  86. 10 4
      playbooks/libvirt/templates/domain.xml
  87. 3 0
      playbooks/libvirt/openshift-cluster/templates/meta-data
  88. 23 0
      playbooks/libvirt/openshift-cluster/templates/network.xml
  89. 23 0
      playbooks/libvirt/openshift-cluster/templates/user-data
  90. 36 33
      playbooks/libvirt/openshift-cluster/terminate.yml
  91. 18 0
      playbooks/libvirt/openshift-cluster/update.yml
  92. 32 6
      playbooks/libvirt/openshift-cluster/vars.yml
  93. 0 21
      playbooks/libvirt/openshift-master/config.yml
  94. 0 1
      playbooks/libvirt/openshift-master/roles
  95. 0 1
      playbooks/libvirt/openshift-master/vars.yml
  96. 0 102
      playbooks/libvirt/openshift-node/config.yml
  97. 0 1
      playbooks/libvirt/openshift-node/vars.yml
  98. 0 2
      playbooks/libvirt/templates/meta-data
  99. 0 10
      playbooks/libvirt/templates/user-data
  100. 0 0
      roles/openshift_common/tasks/main.yml

+ 20 - 1
README_AWS.md

@@ -40,11 +40,25 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne
 By default, a cluster is launched with the following configuration:
 By default, a cluster is launched with the following configuration:
 
 
 - Instance type: m3.large
 - Instance type: m3.large
-- AMI: ami-307b3658
+- AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments)
 - Region: us-east-1
 - Region: us-east-1
 - Keypair name: libra
 - Keypair name: libra
 - Security group: public
 - Security group: public
 
 
+Master specific defaults:
+- Master root volume size: 10 (in GiBs)
+- Master root volume type: gp2
+- Master root volume iops: 500 (only applicable when volume type is io1)
+
+Node specific defaults:
+- Node root volume size: 10 (in GiBs)
+- Node root volume type: gp2
+- Node root volume iops: 500 (only applicable when volume type is io1)
+- Docker volume size: 25 (in GiBs)
+- Docker volume ephemeral: true (Whether the docker volume is ephemeral)
+- Docker volume type: gp2 (only applicable if ephemeral is false)
+- Docker volume iops: 500 (only applicable when volume type is io1)
+
 If needed, these values can be changed by setting environment variables on your system.
 If needed, these values can be changed by setting environment variables on your system.
 
 
 - export ec2_instance_type='m3.large'
 - export ec2_instance_type='m3.large'
@@ -52,6 +66,11 @@ If needed, these values can be changed by setting environment variables on your
 - export ec2_region='us-east-1'
 - export ec2_region='us-east-1'
 - export ec2_keypair='libra'
 - export ec2_keypair='libra'
 - export ec2_security_group='public'
 - export ec2_security_group='public'
+- export os_master_root_vol_size='20'
+- export os_master_root_vol_type='standard'
+- export os_node_root_vol_size='15'
+- export os_docker_vol_size='50'
+- export os_docker_vol_ephemeral='false'
 
 
 Install Dependencies
 Install Dependencies
 --------------------
 --------------------

+ 145 - 46
README_OSE.md

@@ -7,15 +7,17 @@
 * [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
 * [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
 * [Running the ansible playbooks](#running-the-ansible-playbooks)
 * [Running the ansible playbooks](#running-the-ansible-playbooks)
 * [Post-ansible steps](#post-ansible-steps)
 * [Post-ansible steps](#post-ansible-steps)
+* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
 
 
 ## Requirements
 ## Requirements
 * ansible
 * ansible
-  * Tested using ansible-1.8.2-1.fc20.noarch, but should work with version 1.8+
+  * Tested using ansible-1.8.4-1.fc20.noarch, but should work with version 1.8+
+  * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the bulids from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
   * Available in Fedora channels
   * Available in Fedora channels
   * Available for EL with EPEL and Optional channel
   * Available for EL with EPEL and Optional channel
 * One or more RHEL 7.1 VMs
 * One or more RHEL 7.1 VMs
-* ssh key based auth for the root user needs to be pre-configured from the host
-  running ansible to the remote hosts
+* Either ssh key based auth for the root user or ssh key based auth for a user
+  with sudo access (no password)
 * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
 * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
   
   
   ```sh
   ```sh
@@ -48,9 +50,6 @@ subscription-manager repos \
 ```
 ```
 * Configuration of router is not automated yet
 * Configuration of router is not automated yet
 * Configuration of docker-registry is not automated yet
 * Configuration of docker-registry is not automated yet
-* End-to-end testing has not been completed yet using this module
-* root user is used for all ansible actions; eventually we will support using
-  a non-root user with sudo.
 
 
 ## Configuring the host inventory
 ## Configuring the host inventory
 [Ansible docs](http://docs.ansible.com/intro_inventory.html)
 [Ansible docs](http://docs.ansible.com/intro_inventory.html)
@@ -64,6 +63,38 @@ option to ansible-playbook.
 ```ini
 ```ini
 # This is an example of a bring your own (byo) host inventory
 # This is an example of a bring your own (byo) host inventory
 
 
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
+'baseurl':
+'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
+'OpenShift Origin COPR', 'baseurl':
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
+'enabled': 1, 'gpgcheck': 1, gpgkey:
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
 # host group for masters
 # host group for masters
 [masters]
 [masters]
 ose3-master.example.com
 ose3-master.example.com
@@ -76,51 +107,13 @@ ose3-node[1:2].example.com
 The hostnames above should resolve both from the hosts themselves and
 The hostnames above should resolve both from the hosts themselves and
 the host where ansible is running (if different).
 the host where ansible is running (if different).
 
 
-## Creating the default variables for the hosts and host groups
-[Ansible docs](http://docs.ansible.com/intro_inventory.html#id9)
-
-#### Group vars for all hosts
-/etc/ansible/group_vars/all:
-```yaml
----
-# Assume that we want to use the root as the ssh user for all hosts
-ansible_ssh_user: root
-
-# Default debug level for all OpenShift hosts
-openshift_debug_level: 4
-
-# Set the OpenShift deployment type for all hosts
-openshift_deployment_type: enterprise
-
-# Override the default registry for development
-openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
-
-# To use the latest OpenShift Enterprise Errata puddle:
-#openshift_additional_repos:
-#- id: ose-devel
-#  name: ose-devel
-#  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-#  enabled: 1
-#  gpgcheck: 0
-# To use the latest OpenShift Enterprise Whitelist puddle:
-openshift_additional_repos:
-- id: ose-devel
-  name: ose-devel
-  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-  enabled: 1
-  gpgcheck: 0
-
-```
-
 ## Running the ansible playbooks
 ## Running the ansible playbooks
 From the openshift-ansible checkout run:
 From the openshift-ansible checkout run:
 ```sh
 ```sh
 ansible-playbook playbooks/byo/config.yml
 ansible-playbook playbooks/byo/config.yml
 ```
 ```
-**Note:** this assumes that the host inventory is /etc/ansible/hosts and the
-group_vars are defined in /etc/ansible/group_vars, if using a different
-inventory file (and a group_vars directory that is in the same directory as
-the directory as the inventory) use the -i option for ansible-playbook.
+**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
+inventory file use the -i option for ansible-playbook.
 
 
 ## Post-ansible steps
 ## Post-ansible steps
 #### Create the default router
 #### Create the default router
@@ -140,3 +133,109 @@ openshift ex registry --create=true \
   --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
   --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
   --mount-host=/var/lib/openshift/docker-registry
   --mount-host=/var/lib/openshift/docker-registry
 ```
 ```
+
+## Overriding detected ip addresses and hostnames
+Some deployments will require that the user override the detected hostnames
+and ip addresses for the hosts. To see what the default values will be you can
+run the openshift_facts playbook:
+```sh
+ansible-playbook playbooks/byo/openshift_facts.yml
+```
+The output will be similar to:
+```
+ok: [10.3.9.45] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+                    "ip": "172.16.4.79",
+                    "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.45",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                  ... <snip> ...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+ok: [10.3.9.42] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+                    "ip": "172.16.4.75",
+                    "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.42",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                  ...<snip>...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+ok: [10.3.9.36] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+                    "ip": "172.16.4.73",
+                    "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.36",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                    ...<snip>...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+```
+Now, we want to verify the detected common settings to verify that they are
+what we expect them to be (if not, we can override them).
+
+* hostname
+  * Should resolve to the internal ip from the instances themselves.
+  * openshift_hostname will override.
+* ip
+  * Should be the internal ip of the instance.
+  * openshift_ip will override.
+* public hostname
+  * Should resolve to the external ip from hosts outside of the cloud
+  * provider openshift_public_hostname will override.
+* public_ip
+  * Should be the externally accessible ip associated with the instance
+  * openshift_public_ip will override
+* use_openshift_sdn
+  * Should be true unless the cloud is GCE.
+  * openshift_use_openshift_sdn overrides
+
+To override the the defaults, you can set the variables in your inventory:
+```
+...snip...
+[masters]
+ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
+...snip...
+```

+ 58 - 20
README_libvirt.md

@@ -1,4 +1,3 @@
-
 LIBVIRT Setup instructions
 LIBVIRT Setup instructions
 ==========================
 ==========================
 
 
@@ -9,19 +8,21 @@ This makes `libvirt` useful to develop, test and debug Openshift and openshift-a
 Install dependencies
 Install dependencies
 --------------------
 --------------------
 
 
-1. Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-2. Install [ebtables](http://ebtables.netfilter.org/)
-3. Install [qemu](http://wiki.qemu.org/Main_Page)
-4. Install [libvirt](http://libvirt.org/)
-5. Enable and start the libvirt daemon, e.g:
-   * ``systemctl enable libvirtd``
-   * ``systemctl start libvirtd``
-6. [Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-7. Check that your `$HOME` is accessible to the qemu user²
+1.	Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+2.	Install [ebtables](http://ebtables.netfilter.org/)
+3.	Install [qemu](http://wiki.qemu.org/Main_Page)
+4.	Install [libvirt](http://libvirt.org/)
+5.	Enable and start the libvirt daemon, e.g:
+	-	`systemctl enable libvirtd`
+	-	`systemctl start libvirtd`
+6.	[Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+7.	Check that your `$HOME` is accessible to the qemu user²
+8.	Configure dns resolution on the host³
 
 
 #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
 #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
 
 
 You can test it with the following command:
 You can test it with the following command:
+
 ```
 ```
 virsh -c qemu:///system pool-list
 virsh -c qemu:///system pool-list
 ```
 ```
@@ -67,12 +68,7 @@ If your `$HOME` is world readable, everything is fine. If your `$HOME` is privat
 error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
 error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
 ```
 ```
 
 
-In order to fix that issue, you have several possibilities:
-* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
-  * backed by a filesystem with a lot of free disk space
-  * writable by your user;
-  * accessible by the qemu user.
-* Grant the qemu user access to the storage pool.
+In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool.
 
 
 On Arch:
 On Arch:
 
 
@@ -80,13 +76,55 @@ On Arch:
 setfacl -m g:kvm:--x ~
 setfacl -m g:kvm:--x ~
 ```
 ```
 
 
-Test the setup
+#### ³ Enabling DNS resolution to your guest VMs with NetworkManager
+
+-	Verify NetworkManager is configured to use dnsmasq:
+
+```sh
+$ sudo vi /etc/NetworkManager/NetworkManager.conf
+[main]
+dns=dnsmasq
+```
+
+-	Configure dnsmasq to use the Virtual Network router for example.com:
+
+```sh
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+```
+
+Test The Setup
 --------------
 --------------
 
 
+1.	cd openshift-ansible/
+2.	Try to list all instances (Passing an empty string as the cluster_id argument will result in all libvirt instances being listed)
+
+```
+  bin/cluster list libvirt ''
 ```
 ```
-cd openshift-ansible
 
 
-bin/cluster create -m 1 -n 3 libvirt lenaic
+Creating a cluster
+------------------
+
+1.	To create a cluster with one master and two nodes
 
 
-bin/cluster terminate libvirt lenaic
+```
+  bin/cluster create libvirt lenaic
+```
+
+Updating a cluster
+------------------
+
+1.	To update the cluster
+
+```
+  bin/cluster update libvirt lenaic
+```
+
+Terminating a cluster
+---------------------
+
+1.	To terminate the cluster
+
+```
+  bin/cluster terminate libvirt lenaic
 ```
 ```

+ 71 - 20
bin/cluster

@@ -22,13 +22,28 @@ class Cluster(object):
                 '-o ControlPersist=600s '
                 '-o ControlPersist=600s '
             )
             )
 
 
+    def get_deployment_type(self, args):
+        """
+        Get the deployment_type based on the environment variables and the
+        command line arguments
+        :param args: command line arguments provided by the user
+        :return: string representing the deployment type
+        """
+        deployment_type = 'origin'
+        if args.deployment_type:
+            deployment_type = args.deployment_type
+        elif 'OS_DEPLOYMENT_TYPE' in os.environ:
+            deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
+        return deployment_type
+
     def create(self, args):
     def create(self, args):
         """
         """
         Create an OpenShift cluster for given provider
         Create an OpenShift cluster for given provider
         :param args: command line arguments provided by user
         :param args: command line arguments provided by user
         :return: exit status from run command
         :return: exit status from run command
         """
         """
-        env = {'cluster_id': args.cluster_id}
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
         playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
         inventory = self.setup_provider(args.provider)
 
 
@@ -43,7 +58,8 @@ class Cluster(object):
         :param args: command line arguments provided by user
         :param args: command line arguments provided by user
         :return: exit status from run command
         :return: exit status from run command
         """
         """
-        env = {'cluster_id': args.cluster_id}
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
         playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
         inventory = self.setup_provider(args.provider)
 
 
@@ -55,19 +71,34 @@ class Cluster(object):
         :param args: command line arguments provided by user
         :param args: command line arguments provided by user
         :return: exit status from run command
         :return: exit status from run command
         """
         """
-        env = {'cluster_id': args.cluster_id}
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
         playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
         inventory = self.setup_provider(args.provider)
 
 
         return self.action(args, inventory, env, playbook)
         return self.action(args, inventory, env, playbook)
 
 
+    def config(self, args):
+        """
+        Configure or reconfigure OpenShift across clustered VMs
+        :param args: command line arguments provided by user
+        :return: exit status from run command
+        """
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
+        playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+        inventory = self.setup_provider(args.provider)
+
+        return self.action(args, inventory, env, playbook)
+
     def update(self, args):
     def update(self, args):
         """
         """
         Update to latest OpenShift across clustered VMs
         Update to latest OpenShift across clustered VMs
         :param args: command line arguments provided by user
         :param args: command line arguments provided by user
         :return: exit status from run command
         :return: exit status from run command
         """
         """
-        env = {'cluster_id': args.cluster_id}
+        env = {'cluster_id': args.cluster_id,
+               'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
         playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
         inventory = self.setup_provider(args.provider)
 
 
@@ -81,19 +112,19 @@ class Cluster(object):
         """
         """
         config = ConfigParser.ConfigParser()
         config = ConfigParser.ConfigParser()
         if 'gce' == provider:
         if 'gce' == provider:
-            config.readfp(open('inventory/gce/gce.ini'))
+            config.readfp(open('inventory/gce/hosts/gce.ini'))
 
 
             for key in config.options('gce'):
             for key in config.options('gce'):
                 os.environ[key] = config.get('gce', key)
                 os.environ[key] = config.get('gce', key)
 
 
-            inventory = '-i inventory/gce/gce.py'
+            inventory = '-i inventory/gce/hosts'
         elif 'aws' == provider:
         elif 'aws' == provider:
-            config.readfp(open('inventory/aws/ec2.ini'))
+            config.readfp(open('inventory/aws/hosts/ec2.ini'))
 
 
             for key in config.options('ec2'):
             for key in config.options('ec2'):
                 os.environ[key] = config.get('ec2', key)
                 os.environ[key] = config.get('ec2', key)
 
 
-            inventory = '-i inventory/aws/ec2.py'
+            inventory = '-i inventory/aws/hosts'
         elif 'libvirt' == provider:
         elif 'libvirt' == provider:
             inventory = '-i inventory/libvirt/hosts'
             inventory = '-i inventory/libvirt/hosts'
         else:
         else:
@@ -145,29 +176,49 @@ if __name__ == '__main__':
     parser = argparse.ArgumentParser(
     parser = argparse.ArgumentParser(
         description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
         description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
     )
     )
-    parser.add_argument('-v', '--verbose', action='count', help='Multiple -v options increase the verbosity')
+    parser.add_argument('-v', '--verbose', action='count',
+                        help='Multiple -v options increase the verbosity')
     parser.add_argument('--version', action='version', version='%(prog)s 0.2')
     parser.add_argument('--version', action='version', version='%(prog)s 0.2')
 
 
     meta_parser = argparse.ArgumentParser(add_help=False)
     meta_parser = argparse.ArgumentParser(add_help=False)
     meta_parser.add_argument('provider', choices=providers, help='provider')
     meta_parser.add_argument('provider', choices=providers, help='provider')
     meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
     meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
-
-    action_parser = parser.add_subparsers(dest='action', title='actions', description='Choose from valid actions')
-
-    create_parser = action_parser.add_parser('create', help='Create a cluster', parents=[meta_parser])
-    create_parser.add_argument('-m', '--masters', default=1, type=int, help='number of masters to create in cluster')
-    create_parser.add_argument('-n', '--nodes', default=2, type=int, help='number of nodes to create in cluster')
+    meta_parser.add_argument('-t', '--deployment-type',
+                             choices=['origin', 'online', 'enterprise'],
+                             help='Deployment type. (default: origin)')
+
+    action_parser = parser.add_subparsers(dest='action', title='actions',
+                                          description='Choose from valid actions')
+
+    create_parser = action_parser.add_parser('create', help='Create a cluster',
+                                             parents=[meta_parser])
+    create_parser.add_argument('-m', '--masters', default=1, type=int,
+                               help='number of masters to create in cluster')
+    create_parser.add_argument('-n', '--nodes', default=2, type=int,
+                               help='number of nodes to create in cluster')
     create_parser.set_defaults(func=cluster.create)
     create_parser.set_defaults(func=cluster.create)
 
 
-    terminate_parser = action_parser.add_parser('terminate', help='Destroy a cluster', parents=[meta_parser])
-    terminate_parser.add_argument('-f', '--force', action='store_true', help='Destroy cluster without confirmation')
+    config_parser = action_parser.add_parser('config',
+                                             help='Configure or reconfigure a cluster',
+                                             parents=[meta_parser])
+    config_parser.set_defaults(func=cluster.config)
+
+    terminate_parser = action_parser.add_parser('terminate',
+                                                help='Destroy a cluster',
+                                                parents=[meta_parser])
+    terminate_parser.add_argument('-f', '--force', action='store_true',
+                                  help='Destroy cluster without confirmation')
     terminate_parser.set_defaults(func=cluster.terminate)
     terminate_parser.set_defaults(func=cluster.terminate)
 
 
-    update_parser = action_parser.add_parser('update', help='Update OpenShift across cluster', parents=[meta_parser])
-    update_parser.add_argument('-f', '--force', action='store_true', help='Update cluster without confirmation')
+    update_parser = action_parser.add_parser('update',
+                                             help='Update OpenShift across cluster',
+                                             parents=[meta_parser])
+    update_parser.add_argument('-f', '--force', action='store_true',
+                               help='Update cluster without confirmation')
     update_parser.set_defaults(func=cluster.update)
     update_parser.set_defaults(func=cluster.update)
 
 
-    list_parser = action_parser.add_parser('list', help='List VMs in cluster', parents=[meta_parser])
+    list_parser = action_parser.add_parser('list', help='List VMs in cluster',
+                                           parents=[meta_parser])
     list_parser.set_defaults(func=cluster.list)
     list_parser.set_defaults(func=cluster.list)
 
 
     args = parser.parse_args()
     args = parser.parse_args()

+ 104 - 21
filter_plugins/oo_filters.py

@@ -1,13 +1,17 @@
 #!/usr/bin/python
 #!/usr/bin/python
 # -*- coding: utf-8 -*-
 # -*- coding: utf-8 -*-
 # vim: expandtab:tabstop=4:shiftwidth=4
 # vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
 
 
-from ansible import errors, runner
-import json
+from ansible import errors
+from operator import itemgetter
 import pdb
 import pdb
 
 
 def oo_pdb(arg):
 def oo_pdb(arg):
-    ''' This pops you into a pdb instance where arg is the data passed in from the filter.
+    ''' This pops you into a pdb instance where arg is the data passed in
+        from the filter.
         Ex: "{{ hostvars | oo_pdb }}"
         Ex: "{{ hostvars | oo_pdb }}"
     '''
     '''
     pdb.set_trace()
     pdb.set_trace()
@@ -20,7 +24,8 @@ def oo_len(arg):
     return len(arg)
     return len(arg)
 
 
 def get_attr(data, attribute=None):
 def get_attr(data, attribute=None):
-    ''' This looks up dictionary attributes of the form a.b.c and returns the value.
+    ''' This looks up dictionary attributes of the form a.b.c and returns
+        the value.
         Ex: data = {'a': {'b': {'c': 5}}}
         Ex: data = {'a': {'b': {'c': 5}}}
             attribute = "a.b.c"
             attribute = "a.b.c"
             returns 5
             returns 5
@@ -40,12 +45,13 @@ def oo_flatten(data):
     if not issubclass(type(data), list):
     if not issubclass(type(data), list):
         raise errors.AnsibleFilterError("|failed expects to flatten a List")
         raise errors.AnsibleFilterError("|failed expects to flatten a List")
 
 
-    return [ item for sublist in data for item in sublist ]
+    return [item for sublist in data for item in sublist]
 
 
 
 
-def oo_collect(data, attribute=None, filters={}):
-    ''' This takes a list of dict and collects all attributes specified into a list
-        If filter is specified then we will include all items that match _ALL_ of filters.
+def oo_collect(data, attribute=None, filters=None):
+    ''' This takes a list of dict and collects all attributes specified into a
+        list If filter is specified then we will include all items that match
+        _ALL_ of filters.
         Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
         Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
                      {'a':2, 'z': 'z'},        # True, return
                      {'a':2, 'z': 'z'},        # True, return
                      {'a':3, 'z': 'z'},        # True, return
                      {'a':3, 'z': 'z'},        # True, return
@@ -55,15 +61,18 @@ def oo_collect(data, attribute=None, filters={}):
             filters   = {'z': 'z'}
             filters   = {'z': 'z'}
             returns [1, 2, 3]
             returns [1, 2, 3]
     '''
     '''
-
     if not issubclass(type(data), list):
     if not issubclass(type(data), list):
         raise errors.AnsibleFilterError("|failed expects to filter on a List")
         raise errors.AnsibleFilterError("|failed expects to filter on a List")
 
 
     if not attribute:
     if not attribute:
         raise errors.AnsibleFilterError("|failed expects attribute to be set")
         raise errors.AnsibleFilterError("|failed expects attribute to be set")
 
 
-    if filters:
-        retval = [get_attr(d, attribute) for d in data if all([ d[key] == filters[key] for key in filters ]) ]
+    if filters is not None:
+        if not issubclass(type(filters), dict):
+            raise errors.AnsibleFilterError("|fialed expects filter to be a"
+                                            " dict")
+        retval = [get_attr(d, attribute) for d in data if (
+            all([d[key] == filters[key] for key in filters]))]
     else:
     else:
         retval = [get_attr(d, attribute) for d in data]
         retval = [get_attr(d, attribute) for d in data]
 
 
@@ -77,7 +86,7 @@ def oo_select_keys(data, keys):
     '''
     '''
 
 
     if not issubclass(type(data), dict):
     if not issubclass(type(data), dict):
-        raise errors.AnsibleFilterError("|failed expects to filter on a Dictionary")
+        raise errors.AnsibleFilterError("|failed expects to filter on a dict")
 
 
     if not issubclass(type(keys), list):
     if not issubclass(type(keys), list):
         raise errors.AnsibleFilterError("|failed expects first param is a list")
         raise errors.AnsibleFilterError("|failed expects first param is a list")
@@ -97,17 +106,91 @@ def oo_prepend_strings_in_list(data, prepend):
     if not issubclass(type(data), list):
     if not issubclass(type(data), list):
         raise errors.AnsibleFilterError("|failed expects first param is a list")
         raise errors.AnsibleFilterError("|failed expects first param is a list")
     if not all(isinstance(x, basestring) for x in data):
     if not all(isinstance(x, basestring) for x in data):
-        raise errors.AnsibleFilterError("|failed expects first param is a list of strings")
+        raise errors.AnsibleFilterError("|failed expects first param is a list"
+                                        " of strings")
     retval = [prepend + s for s in data]
     retval = [prepend + s for s in data]
     return retval
     return retval
 
 
-class FilterModule (object):
+def oo_ami_selector(data, image_name):
+    ''' This takes a list of amis and an image name and attempts to return
+        the latest ami.
+    '''
+    if not issubclass(type(data), list):
+        raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+    if not data:
+        return None
+    else:
+        if image_name is None or not image_name.endswith('_*'):
+            ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+            return ami['ami_id']
+        else:
+            ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+            ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+            return ami['ami_id']
+
+def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
+    ''' This takes a dictionary of volume definitions and returns a valid ec2
+        volume definition based on the host_type and the values in the
+        dictionary.
+        The dictionary should look similar to this:
+            { 'master':
+                { 'root':
+                    { 'volume_size': 10, 'device_type': 'gp2',
+                      'iops': 500
+                    }
+                },
+              'node':
+                { 'root':
+                    { 'volume_size': 10, 'device_type': 'io1',
+                      'iops': 1000
+                    },
+                  'docker':
+                    { 'volume_size': 40, 'device_type': 'gp2',
+                      'iops': 500, 'ephemeral': 'true'
+                    }
+                }
+            }
+    '''
+    if not issubclass(type(data), dict):
+        raise errors.AnsibleFilterError("|failed expects first param is a dict")
+    if host_type not in ['master', 'node']:
+        raise errors.AnsibleFilterError("|failed expects either master or node"
+                                        " host type")
+
+    root_vol = data[host_type]['root']
+    root_vol['device_name'] = '/dev/sda1'
+    root_vol['delete_on_termination'] = True
+    if root_vol['device_type'] != 'io1':
+        root_vol.pop('iops', None)
+    if host_type == 'node':
+        docker_vol = data[host_type]['docker']
+        docker_vol['device_name'] = '/dev/xvdb'
+        docker_vol['delete_on_termination'] = True
+        if docker_vol['device_type'] != 'io1':
+            docker_vol.pop('iops', None)
+        if docker_ephemeral:
+            docker_vol.pop('device_type', None)
+            docker_vol.pop('delete_on_termination', None)
+            docker_vol['ephemeral'] = 'ephemeral0'
+        return [root_vol, docker_vol]
+    return [root_vol]
+
+# disabling pylint checks for too-few-public-methods and no-self-use since we
+# need to expose a FilterModule object that has a filters method that returns
+# a mapping of filter names to methods.
+# pylint: disable=too-few-public-methods, no-self-use
+class FilterModule(object):
+    ''' FilterModule '''
     def filters(self):
     def filters(self):
+        ''' returns a mapping of filters to methods '''
         return {
         return {
-                "oo_select_keys": oo_select_keys,
-                "oo_collect": oo_collect,
-                "oo_flatten": oo_flatten,
-                "oo_len": oo_len,
-                "oo_pdb": oo_pdb,
-                "oo_prepend_strings_in_list": oo_prepend_strings_in_list
-                }
+            "oo_select_keys": oo_select_keys,
+            "oo_collect": oo_collect,
+            "oo_flatten": oo_flatten,
+            "oo_len": oo_len,
+            "oo_pdb": oo_pdb,
+            "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+            "oo_ami_selector": oo_ami_selector,
+            "oo_ec2_volume_definition": oo_ec2_volume_definition
+        }

+ 390 - 0
git/.pylintrc

@@ -0,0 +1,390 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Profiled execution.
+profile=no
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=no
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Deprecated. It was used to include message's id in output. Use --msg-template
+# instead.
+#include-ids=no
+
+# Deprecated. It was used to include symbolic ids of messages in output. Use
+# --msg-template instead.
+#symbols=no
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636
+
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=no
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (RP0004).
+comment=no
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=__.*__
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set).
+ignored-classes=SQLObject
+
+# When zope mode is activated, add a predefined set of Zope acquired attributes
+# to generated-members.
+zope=no
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E0201 when accessed. Python regular
+# expressions are accepted.
+generated-members=REQUEST,acl_users,aq_parent
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )?<?https?://\S+>?$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string='    '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception

+ 45 - 0
git/parent.rb

@@ -0,0 +1,45 @@
+#!/usr/bin/env ruby
+#
+#
+#
+
+if __FILE__ == $0
+  # If we aren't on master we don't need to parent check
+  branch = 'prod'
+  exit(0) if ARGV[0] !~ /#{branch}/
+  commit_id = ARGV[1]
+  %x[/usr/bin/git checkout #{branch}]
+  %x[/usr/bin/git merge #{commit_id}]
+
+  count = 0
+  #lines = %x[/usr/bin/git rev-list --left-right stg...master].split("\n")
+  lines = %x[/usr/bin/git rev-list --left-right remotes/origin/stg...#{branch}].split("\n")
+  lines.each do |commit|
+    # next if they are in stage
+    next if commit =~ /^</
+    # remove the first char '>'
+    commit = commit[1..-1]
+    # check if any remote branches contain $commit
+    results = %x[/usr/bin/git branch -q -r --contains #{commit} 2>/dev/null ]
+    # if this comes back empty, nothing contains it, we can skip it as
+    # we have probably created the merge commit here locally
+    next if results.empty?
+
+    # The results generally contain origin/pr/246/merge and origin/pr/246/head
+    # this is the pull request which would contain the commit in question.
+    #
+    # If the results do not contain origin/stg then stage does not contain
+    # the commit in question.  Therefore we need to alert!
+    unless results =~ /origin\/stg/
+      puts "\nFAILED: (These commits are not in stage.)\n"
+      puts "\t#{commit}"
+      count += 1
+    end
+  end
+
+  # Exit with count of commits in #{branch} but not stg
+  exit(count)
+end
+
+__END__
+

+ 14 - 0
git/pylint.sh

@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+
+OLDREV=$1
+NEWREV=$2
+TRG_BRANCH=$3
+
+PYTHON=/var/lib/jenkins/python27/bin/python
+
+/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | \
+ grep ".py$" | \
+ xargs -r -I{} ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc  {}
+
+exit $?

+ 72 - 0
git/yaml_validation.rb

@@ -0,0 +1,72 @@
+#!/usr/bin/env ruby
+#
+#
+#
+require 'yaml'
+require 'tmpdir'
+
+class YamlValidate
+  def self.yaml_file?(filename)
+    return filename.end_with?('.yaml') || filename.end_with?('.yml')
+  end
+
+  def self.short_yaml_ext?(filename)
+    return filename.end_with?(".yml")
+  end
+
+  def self.valid_yaml?(filename)
+    YAML::load_file(filename)
+
+    return true
+  end
+end
+
+class GitCommit
+  attr_accessor :oldrev, :newrev, :refname, :tmp
+  def initialize(oldrev, newrev, refname)
+    @oldrev = oldrev
+    @newrev = newrev
+    @refname = refname
+    @tmp = Dir.mktmpdir(@newrev)
+  end
+
+  def get_file_changes()
+    files = %x[/usr/bin/git diff --name-only #{@oldrev} #{@newrev} --diff-filter=ACM].split("\n")
+
+    # if files is empty we will get a full checkout.  This happens on
+    # a git rm file.  If there are no changes then we need to skip the archive
+    return [] if files.empty?
+
+    # We only want to take the files that changed.  Archive will do that when passed
+    # the filenames.  It will export these to a tmp dir
+    system("/usr/bin/git archive #{@newrev} #{files.join(" ")} | tar x -C #{@tmp}")
+    return Dir.glob("#{@tmp}/**/*").delete_if { |file| File.directory?(file) }
+  end
+end
+
+if __FILE__ == $0
+  while data = STDIN.gets
+    oldrev, newrev, refname = data.split
+    gc = GitCommit.new(oldrev, newrev, refname)
+
+    results = []
+    gc.get_file_changes().each do |file|
+      begin
+        puts "++++++ Received:  #{file}"
+
+        #raise "Yaml file extensions must be .yaml not .yml" if YamlValidate.short_yaml_ext? file
+
+        # skip readme, other files, etc
+        next unless YamlValidate.yaml_file?(file)
+
+        results << YamlValidate.valid_yaml?(file)
+      rescue Exception => ex
+        puts "\n#{ex.message}\n\n"
+        results << false
+      end
+    end
+
+    #puts "RESULTS\n#{results.inspect}\n"
+    exit 1 if results.include?(false)
+  end
+end

+ 0 - 2
inventory/aws/group_vars/all

@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root

inventory/aws/ec2.ini → inventory/aws/hosts/ec2.ini


inventory/aws/ec2.py → inventory/aws/hosts/ec2.py


+ 1 - 0
inventory/aws/hosts/hosts

@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2

+ 0 - 28
inventory/byo/group_vars/all

@@ -1,28 +0,0 @@
----
-# lets assume that we want to use the root as the ssh user for all hosts
-ansible_ssh_user: root
-
-# default debug level for all OpenShift hosts
-openshift_debug_level: 4
-
-# set the OpenShift deployment type for all hosts
-openshift_deployment_type: enterprise
-
-# Override the default registry for development
-openshift_registry_url: docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
-
-# Use latest Errata puddle as an additional repo:
-#openshift_additional_repos:
-#- id: ose-devel
-#  name: ose-devel
-#  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-#  enabled: 1
-#  gpgcheck: 0
-
-# Use latest Whitelist puddle as an additional repo:
-openshift_additional_repos:
-- id: ose-devel
-  name: ose-devel
-  baseurl: http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os
-  enabled: 1
-  gpgcheck: 0

+ 25 - 1
inventory/byo/hosts

@@ -1,5 +1,30 @@
 # This is an example of a bring your own (byo) host inventory
 # This is an example of a bring your own (byo) host inventory
 
 
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+openshift_registry_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
 # host group for masters
 # host group for masters
 [masters]
 [masters]
 ose3-master-ansible.test.example.com
 ose3-master-ansible.test.example.com
@@ -7,4 +32,3 @@ ose3-master-ansible.test.example.com
 # host group for nodes
 # host group for nodes
 [nodes]
 [nodes]
 ose3-node[1:2]-ansible.test.example.com
 ose3-node[1:2]-ansible.test.example.com
-

+ 0 - 2
inventory/gce/group_vars/all

@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root

inventory/gce/gce.py → inventory/gce/hosts/gce.py


+ 1 - 0
inventory/gce/hosts/hosts

@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2

+ 0 - 2
inventory/libvirt/group_vars/all

@@ -1,2 +0,0 @@
----
-ansible_ssh_user: root

+ 0 - 2
inventory/libvirt/hosts

@@ -1,2 +0,0 @@
-# Eventually we'll add the GCE, AWS, etc dynamic inventories, but for now...
-localhost ansible_python_interpreter=/usr/bin/python2

+ 1 - 0
inventory/libvirt/hosts/hosts

@@ -0,0 +1 @@
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2

+ 20 - 0
inventory/libvirt/hosts/libvirt.ini

@@ -0,0 +1,20 @@
+# Ansible libvirt external inventory script settings
+#
+
+[libvirt]
+
+uri = qemu:///system
+
+# API calls to libvirt can be slow. For this reason, we cache the results of an API
+# call. Set this to the path you want cache files to be written to. Two files
+# will be written to this directory:
+#   - ansible-libvirt.cache
+#   - ansible-libvirt.index
+cache_path = /tmp
+
+# The number of seconds a cache file is considered valid. After this many
+# seconds, a new API call will be made, and the cache file will be updated.
+cache_max_age = 900
+
+
+

+ 179 - 0
inventory/libvirt/hosts/libvirt_generic.py

@@ -0,0 +1,179 @@
+#!/usr/bin/env python2
+
+"""
+libvirt external inventory script
+=================================
+
+Ansible has a feature where instead of reading from /etc/ansible/hosts
+as a text file, it can query external programs to obtain the list
+of hosts, groups the hosts are in, and even variables to assign to each host.
+
+To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
+This, more or less, allows you to keep one central database containing
+info about all of your managed instances.
+
+"""
+
+# (c) 2015, Jason DeTiberus <jdetiber@redhat.com>
+#
+# This file is part of Ansible,
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+######################################################################
+
+import argparse
+import ConfigParser
+import os
+import re
+import sys
+from time import time
+import libvirt
+import xml.etree.ElementTree as ET
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+
+
+class LibvirtInventory(object):
+
+    def __init__(self):
+        self.inventory = dict()  # A list of groups and the hosts in that group
+        self.cache = dict()  # Details about hosts in the inventory
+
+        # Read settings and parse CLI arguments
+        self.read_settings()
+        self.parse_cli_args()
+
+        if self.args.host:
+            print self.json_format_dict(self.get_host_info(), self.args.pretty)
+        elif self.args.list:
+            print self.json_format_dict(self.get_inventory(), self.args.pretty)
+        else:  # default action with no options
+            print self.json_format_dict(self.get_inventory(), self.args.pretty)
+
+    def read_settings(self):
+        config = ConfigParser.SafeConfigParser()
+        config.read(
+            os.path.dirname(os.path.realpath(__file__)) + '/libvirt.ini'
+        )
+        self.libvirt_uri = config.get('libvirt', 'uri')
+
+    def parse_cli_args(self):
+        parser = argparse.ArgumentParser(
+            description='Produce an Ansible Inventory file based on libvirt'
+        )
+        parser.add_argument(
+            '--list',
+            action='store_true',
+            default=True,
+            help='List instances (default: True)'
+        )
+        parser.add_argument(
+            '--host',
+            action='store',
+            help='Get all the variables about a specific instance'
+        )
+        parser.add_argument(
+            '--pretty',
+            action='store_true',
+            default=False,
+            help='Pretty format (default: False)'
+        )
+        self.args = parser.parse_args()
+
+    def get_host_info(self):
+        inventory = self.get_inventory()
+        if self.args.host in inventory['_meta']['hostvars']:
+            return inventory['_meta']['hostvars'][self.args.host]
+
+    def get_inventory(self):
+        inventory = dict(_meta=dict(hostvars=dict()))
+
+        conn = libvirt.openReadOnly(self.libvirt_uri)
+        if conn is None:
+            print "Failed to open connection to %s" % libvirt_uri
+            sys.exit(1)
+
+        domains = conn.listAllDomains()
+        if domains is None:
+            print "Failed to list domains for connection %s" % libvirt_uri
+            sys.exit(1)
+
+        arp_entries = self.parse_arp_entries()
+
+        for domain in domains:
+            hostvars = dict(libvirt_name=domain.name(),
+                            libvirt_id=domain.ID(),
+                            libvirt_uuid=domain.UUIDString())
+            domain_name = domain.name()
+
+            # TODO: add support for guests that are not in a running state
+            state, _ = domain.state()
+            # 2 is the state for a running guest
+            if state != 1:
+                continue
+
+            hostvars['libvirt_status'] = 'running'
+
+            root = ET.fromstring(domain.XMLDesc())
+            ns = {'ansible': 'https://github.com/ansible/ansible'}
+            for tag_elem in root.findall('./metadata/ansible:tags/ansible:tag', ns):
+                tag = tag_elem.text
+                self.push(inventory, "tag_%s" % tag, domain_name)
+                self.push(hostvars, 'libvirt_tags', tag)
+
+            # TODO: support more than one network interface, also support
+            # interface types other than 'network'
+            interface = root.find("./devices/interface[@type='network']")
+            if interface is not None:
+                mac_elem = interface.find('mac')
+                if mac_elem is not None:
+                    mac = mac_elem.get('address')
+                    if mac in arp_entries:
+                        ip_address = arp_entries[mac]['ip_address']
+                        hostvars['ansible_ssh_host'] = ip_address
+                        hostvars['libvirt_ip_address'] = ip_address
+
+            inventory['_meta']['hostvars'][domain_name] = hostvars
+
+        return inventory
+
+    def parse_arp_entries(self):
+        arp_entries = dict()
+        with open('/proc/net/arp', 'r') as f:
+            # throw away the header
+            f.readline()
+
+            for line in f:
+                ip_address, _, _, mac, _, device = line.strip().split()
+                arp_entries[mac] = dict(ip_address=ip_address, device=device)
+
+        return arp_entries
+
+    def push(self, my_dict, key, element):
+        if key in my_dict:
+            my_dict[key].append(element)
+        else:
+            my_dict[key] = [element]
+
+    def json_format_dict(self, data, pretty=False):
+        if pretty:
+            return json.dumps(data, sort_keys=True, indent=2)
+        else:
+            return json.dumps(data)
+
+LibvirtInventory()

+ 51 - 44
inventory/multi_ec2.py

@@ -1,22 +1,29 @@
 #!/usr/bin/env python2
 #!/usr/bin/env python2
+'''
+    Fetch and combine multiple ec2 account settings into a single
+    json hash.
+'''
 # vim: expandtab:tabstop=4:shiftwidth=4
 # vim: expandtab:tabstop=4:shiftwidth=4
 
 
 from time import time
 from time import time
 import argparse
 import argparse
 import yaml
 import yaml
 import os
 import os
-import sys
-import pdb
 import subprocess
 import subprocess
 import json
 import json
-import pprint
 
 
 
 
 CONFIG_FILE_NAME = 'multi_ec2.yaml'
 CONFIG_FILE_NAME = 'multi_ec2.yaml'
 
 
 class MultiEc2(object):
 class MultiEc2(object):
+    '''
+       MultiEc2 class:
+            Opens a yaml config file and reads aws credentials.
+            Stores a json hash of resources in result.
+    '''
 
 
     def __init__(self):
     def __init__(self):
+        self.args = None
         self.config = None
         self.config = None
         self.all_ec2_results = {}
         self.all_ec2_results = {}
         self.result = {}
         self.result = {}
@@ -24,7 +31,7 @@ class MultiEc2(object):
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
         self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
 
 
         same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
         same_dir_config_file = os.path.join(self.file_path, CONFIG_FILE_NAME)
-        etc_dir_config_file = os.path.join(os.path.sep, 'etc','ansible', CONFIG_FILE_NAME)
+        etc_dir_config_file = os.path.join(os.path.sep, 'etc', 'ansible', CONFIG_FILE_NAME)
 
 
         # Prefer a file in the same directory, fall back to a file in etc
         # Prefer a file in the same directory, fall back to a file in etc
         if os.path.isfile(same_dir_config_file):
         if os.path.isfile(same_dir_config_file):
@@ -39,12 +46,13 @@ class MultiEc2(object):
         # load yaml
         # load yaml
         if self.config_file and os.path.isfile(self.config_file):
         if self.config_file and os.path.isfile(self.config_file):
             self.config = self.load_yaml_config()
             self.config = self.load_yaml_config()
-        elif os.environ.has_key("AWS_ACCESS_KEY_ID") and os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
+        elif os.environ.has_key("AWS_ACCESS_KEY_ID") and \
+             os.environ.has_key("AWS_SECRET_ACCESS_KEY"):
             self.config = {}
             self.config = {}
             self.config['accounts'] = [
             self.config['accounts'] = [
                 {
                 {
                     'name': 'default',
                     'name': 'default',
-                    'provider': 'aws/ec2.py',
+                    'provider': 'aws/hosts/ec2.py',
                     'env_vars': {
                     'env_vars': {
                         'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
                         'AWS_ACCESS_KEY_ID':     os.environ["AWS_ACCESS_KEY_ID"],
                         'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
                         'AWS_SECRET_ACCESS_KEY': os.environ["AWS_SECRET_ACCESS_KEY"],
@@ -56,13 +64,9 @@ class MultiEc2(object):
         else:
         else:
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
 
 
-        if self.args.cache_only:
-            # get data from disk
-            result = self.get_inventory_from_cache()
-
-            if not result:
-                self.get_inventory()
-                self.write_to_cache()
+        if self.args.refresh_cache:
+            self.get_inventory()
+            self.write_to_cache()
         # if its a host query, fetch and do not cache
         # if its a host query, fetch and do not cache
         elif self.args.host:
         elif self.args.host:
             self.get_inventory()
             self.get_inventory()
@@ -74,7 +78,7 @@ class MultiEc2(object):
             # get data from disk
             # get data from disk
             self.get_inventory_from_cache()
             self.get_inventory_from_cache()
 
 
-    def load_yaml_config(self,conf_file=None):
+    def load_yaml_config(self, conf_file=None):
         """Load a yaml config file with credentials to query the
         """Load a yaml config file with credentials to query the
         respective cloud for inventory.
         respective cloud for inventory.
         """
         """
@@ -88,7 +92,7 @@ class MultiEc2(object):
 
 
         return config
         return config
 
 
-    def get_provider_tags(self,provider, env={}):
+    def get_provider_tags(self, provider, env=None):
         """Call <provider> and query all of the tags that are usuable
         """Call <provider> and query all of the tags that are usuable
         by ansible.  If environment is empty use the default env.
         by ansible.  If environment is empty use the default env.
         """
         """
@@ -153,7 +157,8 @@ class MultiEc2(object):
                     self.all_ec2_results[result['name']] = json.loads(result['out'])
                     self.all_ec2_results[result['name']] = json.loads(result['out'])
             values = self.all_ec2_results.values()
             values = self.all_ec2_results.values()
             values.insert(0, self.result)
             values.insert(0, self.result)
-            [MultiEc2.merge_destructively(self.result, x) for x in  values]
+            for result in  values:
+                MultiEc2.merge_destructively(self.result, result)
         else:
         else:
             # For any 0 result, return it
             # For any 0 result, return it
             count = 0
             count = 0
@@ -165,30 +170,30 @@ class MultiEc2(object):
                     raise RuntimeError("Found > 1 results for --host %s. \
                     raise RuntimeError("Found > 1 results for --host %s. \
                                        This is an invalid state." % self.args.host)
                                        This is an invalid state." % self.args.host)
     @staticmethod
     @staticmethod
-    def merge_destructively(a, b):
-        "merges b into a"
-        for key in b:
-            if key in a:
-                if isinstance(a[key], dict) and isinstance(b[key], dict):
-                    MultiEc2.merge_destructively(a[key], b[key])
-                elif a[key] == b[key]:
+    def merge_destructively(input_a, input_b):
+        "merges b into input_a"
+        for key in input_b:
+            if key in input_a:
+                if isinstance(input_a[key], dict) and isinstance(input_b[key], dict):
+                    MultiEc2.merge_destructively(input_a[key], input_b[key])
+                elif input_a[key] == input_b[key]:
                     pass # same leaf value
                     pass # same leaf value
                 # both lists so add each element in b to a if it does ! exist
                 # both lists so add each element in b to a if it does ! exist
-                elif isinstance(a[key], list) and isinstance(b[key],list):
-                    for x in b[key]:
-                        if x not in a[key]:
-                            a[key].append(x)
+                elif isinstance(input_a[key], list) and isinstance(input_b[key], list):
+                    for result in input_b[key]:
+                        if result not in input_a[key]:
+                            input_a[key].input_append(result)
                 # a is a list and not b
                 # a is a list and not b
-                elif isinstance(a[key], list):
-                    if b[key] not in a[key]:
-                        a[key].append(b[key])
-                elif isinstance(b[key], list):
-                    a[key] = [a[key]] + [k for k in b[key] if k != a[key]]
+                elif isinstance(input_a[key], list):
+                    if input_b[key] not in input_a[key]:
+                        input_a[key].append(input_b[key])
+                elif isinstance(input_b[key], list):
+                    input_a[key] = [input_a[key]] + [k for k in input_b[key] if k != input_a[key]]
                 else:
                 else:
-                    a[key] = [a[key],b[key]]
+                    input_a[key] = [input_a[key], input_b[key]]
             else:
             else:
-                a[key] = b[key]
-        return a
+                input_a[key] = input_b[key]
+        return input_a
 
 
     def is_cache_valid(self):
     def is_cache_valid(self):
         ''' Determines if the cache files have expired, or if it is still valid '''
         ''' Determines if the cache files have expired, or if it is still valid '''
@@ -204,19 +209,20 @@ class MultiEc2(object):
     def parse_cli_args(self):
     def parse_cli_args(self):
         ''' Command line argument processing '''
         ''' Command line argument processing '''
 
 
-        parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on a provider')
-        parser.add_argument('--cache-only', action='store_true', default=False,
-                           help='Fetch cached only instances (default: False)')
+        parser = argparse.ArgumentParser(
+            description='Produce an Ansible Inventory file based on a provider')
+        parser.add_argument('--refresh-cache', action='store_true', default=False,
+                            help='Fetch cached only instances (default: False)')
         parser.add_argument('--list', action='store_true', default=True,
         parser.add_argument('--list', action='store_true', default=True,
-                           help='List instances (default: True)')
+                            help='List instances (default: True)')
         parser.add_argument('--host', action='store', default=False,
         parser.add_argument('--host', action='store', default=False,
-                           help='Get all the variables about a specific instance')
+                            help='Get all the variables about a specific instance')
         self.args = parser.parse_args()
         self.args = parser.parse_args()
 
 
     def write_to_cache(self):
     def write_to_cache(self):
         ''' Writes data in JSON format to a file '''
         ''' Writes data in JSON format to a file '''
 
 
-        json_data = self.json_format_dict(self.result, True)
+        json_data = MultiEc2.json_format_dict(self.result, True)
         with open(self.cache_path, 'w') as cache:
         with open(self.cache_path, 'w') as cache:
             cache.write(json_data)
             cache.write(json_data)
 
 
@@ -232,7 +238,8 @@ class MultiEc2(object):
 
 
         return True
         return True
 
 
-    def json_format_dict(self, data, pretty=False):
+    @classmethod
+    def json_format_dict(cls, data, pretty=False):
         ''' Converts a dict to a JSON object and dumps it as a formatted
         ''' Converts a dict to a JSON object and dumps it as a formatted
         string '''
         string '''
 
 
@@ -242,9 +249,9 @@ class MultiEc2(object):
             return json.dumps(data)
             return json.dumps(data)
 
 
     def result_str(self):
     def result_str(self):
+        '''Return cache string stored in self.result'''
         return self.json_format_dict(self.result, True)
         return self.json_format_dict(self.result, True)
 
 
 
 
 if __name__ == "__main__":
 if __name__ == "__main__":
-    mi = MultiEc2()
-    print mi.result_str()
+    print MultiEc2().result_str()

+ 2 - 2
inventory/multi_ec2.yaml.example

@@ -1,13 +1,13 @@
 # multi ec2 inventory configs
 # multi ec2 inventory configs
 accounts:
 accounts:
   - name: aws1
   - name: aws1
-    provider: aws/ec2.py
+    provider: aws/hosts/ec2.py
     env_vars:
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 
 
   - name: aws2
   - name: aws2
-    provider: aws/ec2.py
+    provider: aws/hosts/ec2.py
     env_vars:
     env_vars:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX

+ 36 - 0
playbooks/aws/openshift-cluster/config.yml

@@ -0,0 +1,36 @@
+---
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ ec2_private_ip_address }}"
+    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 21 - 53
playbooks/aws/openshift-cluster/launch.yml

@@ -4,59 +4,27 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
-      - vars.yml
+  - vars.yml
+  - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
   tasks:
   tasks:
-    - set_fact: k8s_type="master"
-
-    - name: Generate master instance names(s)
-      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
-      register: master_names_output
-      with_sequence: start=1 end={{ num_masters }}
-
-    # These set_fact's cannot be combined
-    - set_fact:
-        master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
-    - set_fact:
-        master_names: "{{ master_names_string.strip().split(' ') }}"
-
-    - include: launch_instances.yml
-      vars:
-        instances: "{{ master_names }}"
-        cluster: "{{ cluster_id }}"
-        type: "{{ k8s_type }}"
-
-    - set_fact: k8s_type="node"
-
-    - name: Generate node instance names(s)
-      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
-      register: node_names_output
-      with_sequence: start=1 end={{ num_nodes }}
-
-    # These set_fact's cannot be combined
-    - set_fact:
-        node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
-    - set_fact:
-        node_names: "{{ node_names_string.strip().split(' ') }}"
-
-    - include: launch_instances.yml
-      vars:
-        instances: "{{ node_names }}"
-        cluster: "{{ cluster_id }}"
-        type: "{{ k8s_type }}"
-
-- hosts: "tag_env_{{ cluster_id }}"
-  roles:
-  - openshift_repos
-  - os_update_latest
-
-- include: ../openshift-master/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+  - fail:
+      msg: Deployment type not supported for aws provider yet
+    when: deployment_type == 'enterprise'
+
+  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ master_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ node_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+- include: update.yml
 
 
 - include: list.yml
 - include: list.yml

+ 0 - 63
playbooks/aws/openshift-cluster/launch_instances.yml

@@ -1,63 +0,0 @@
----
-- set_fact:
-    machine_type: "{{ lookup('env', 'ec2_instance_type')|default('m3.large', true) }}"
-    machine_image: "{{ lookup('env', 'ec2_ami')|default('ami-307b3658', true) }}"
-    machine_region: "{{ lookup('env', 'ec2_region')|default('us-east-1', true) }}"
-    machine_keypair: "{{ lookup('env', 'ec2_keypair')|default('libra', true) }}"
-    created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
-    security_group: "{{ lookup('env', 'ec2_security_group')|default('public', true) }}"
-    env: "{{ cluster }}"
-    host_type: "{{ type }}"
-    env_host_type: "{{ cluster }}-openshift-{{ type }}"
-
-- name: Launch instance(s)
-  ec2:
-    state: present
-    region: "{{ machine_region }}"
-    keypair: "{{ machine_keypair }}"
-    group: "{{ security_group }}"
-    instance_type: "{{ machine_type }}"
-    image: "{{ machine_image }}"
-    count: "{{ instances | oo_len }}"
-    wait: yes
-    instance_tags:
-      created-by: "{{ created_by }}"
-      env: "{{ env }}"
-      host-type: "{{ host_type }}"
-      env-host-type: "{{ env_host_type }}"
-  register: ec2
-
-- name: Add Name tag to instances
-  ec2_tag: resource={{ item.1.id }} region={{ machine_region }} state=present
-  with_together:
-  - instances
-  - ec2.instances
-  args:
-    tags:
-      Name: "{{ item.0 }}"
-
-- set_fact:
-    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
-
-- name: Add new instances groups and variables
-  add_host:
-    hostname: "{{ item.0 }}"
-    ansible_ssh_host: "{{ item.1.dns_name }}"
-    groups: "{{ instance_groups }}"
-    ec2_private_ip_address: "{{ item.1.private_ip }}"
-    ec2_ip_address: "{{ item.1.public_ip }}"
-  with_together:
-  - instances
-  - ec2.instances
-
-- name: Wait for ssh
-  wait_for: "port=22 host={{ item.dns_name }}"
-  with_items: ec2.instances
-
-- name: Wait for root user setup
-  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
-  register: result
-  until: result.rc == 0
-  retries: 20
-  delay: 10
-  with_items: ec2.instances

+ 302 - 0
playbooks/aws/openshift-cluster/library/ec2_ami_find.py

@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#pylint: skip-file
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_find
+version_added: 2.0
+short_description: Searches for AMIs to obtain the AMI ID and other information
+description:
+  - Returns list of matching AMIs with AMI ID, along with other useful information
+  - Can search AMIs with different owners
+  - Can search by matching tag(s), by AMI name and/or other criteria
+  - Results can be sorted and sliced
+author: Tom Bamford
+notes:
+  - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
+  - See the example below for a suggestion of how to search by distro/release.
+options:
+  region:
+    description:
+      - The AWS region to use.
+    required: true
+    aliases: [ 'aws_region', 'ec2_region' ]
+  owner:
+    description:
+      - Search AMIs owned by the specified owner
+      - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
+      - If not specified, all EC2 AMIs in the specified region will be searched.
+      - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
+    required: false
+    default: null
+  ami_id:
+    description:
+      - An AMI ID to match.
+    default: null
+    required: false
+  ami_tags:
+    description:
+      - A hash/dictionary of tags to match for the AMI.
+    default: null
+    required: false
+  architecture:
+    description:
+      - An architecture type to match (e.g. x86_64).
+    default: null
+    required: false
+  hypervisor:
+    description:
+      - A hypervisor type type to match (e.g. xen).
+    default: null
+    required: false
+  is_public:
+    description:
+      - Whether or not the image(s) are public.
+    choices: ['yes', 'no']
+    default: null
+    required: false
+  name:
+    description:
+      - An AMI name to match.
+    default: null
+    required: false
+  platform:
+    description:
+      - Platform type to match.
+    default: null
+    required: false
+  sort:
+    description:
+      - Optional attribute which with to sort the results.
+      - If specifying 'tag', the 'tag_name' parameter is required.
+    choices: ['name', 'description', 'tag']
+    default: null
+    required: false
+  sort_tag:
+    description:
+      - Tag name with which to sort results.
+      - Required when specifying 'sort=tag'.
+    default: null
+    required: false
+  sort_order:
+    description:
+      - Order in which to sort results.
+      - Only used when the 'sort' parameter is specified.
+    choices: ['ascending', 'descending']
+    default: 'ascending'
+    required: false
+  sort_start:
+    description:
+      - Which result to start with (when sorting).
+      - Corresponds to Python slice notation.
+    default: null
+    required: false
+  sort_end:
+    description:
+      - Which result to end with (when sorting).
+      - Corresponds to Python slice notation.
+    default: null
+    required: false
+  state:
+    description:
+      - AMI state to match.
+    default: 'available'
+    required: false
+  virtualization_type:
+    description:
+      - Virtualization type to match (e.g. hvm).
+    default: null
+    required: false
+  no_result_action:
+    description:
+      - What to do when no results are found.
+      - "'success' reports success and returns an empty array"
+      - "'fail' causes the module to report failure"
+    choices: ['success', 'fail']
+    default: 'success'
+    required: false
+requirements:
+  - boto
+
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the AMI tagged "project:website"
+- ec2_ami_find:
+    owner: self
+    tags:
+      project: website
+    no_result_action: fail
+  register: ami_find
+
+# Search for the latest Ubuntu 14.04 AMI
+- ec2_ami_find:
+    name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"
+    owner: 099720109477
+    sort: name
+    sort_order: descending
+    sort_end: 1
+  register: ami_find
+
+# Launch an EC2 instance
+- ec2:
+    image: "{{ ami_search.results[0].ami_id }}"
+    instance_type: m3.medium
+    key_name: mykey
+    wait: yes
+'''
+
+try:
+    import boto.ec2
+    HAS_BOTO=True
+except ImportError:
+    HAS_BOTO=False
+
+import json
+
+def main():
+    argument_spec = ec2_argument_spec()
+    argument_spec.update(dict(
+            region = dict(required=True,
+                aliases = ['aws_region', 'ec2_region']),
+            owner = dict(required=False, default=None),
+            ami_id = dict(required=False),
+            ami_tags = dict(required=False, type='dict',
+                aliases = ['search_tags', 'image_tags']),
+            architecture = dict(required=False),
+            hypervisor = dict(required=False),
+            is_public = dict(required=False),
+            name = dict(required=False),
+            platform = dict(required=False),
+            sort = dict(required=False, default=None,
+                choices=['name', 'description', 'tag']),
+            sort_tag = dict(required=False),
+            sort_order = dict(required=False, default='ascending',
+                choices=['ascending', 'descending']),
+            sort_start = dict(required=False),
+            sort_end = dict(required=False),
+            state = dict(required=False, default='available'),
+            virtualization_type = dict(required=False),
+            no_result_action = dict(required=False, default='success',
+                choices = ['success', 'fail']),
+        )
+    )
+
+    module = AnsibleModule(
+        argument_spec=argument_spec,
+    )
+
+    if not HAS_BOTO:
+        module.fail_json(msg='boto required for this module, install via pip or your package manager')
+
+    ami_id = module.params.get('ami_id')
+    ami_tags = module.params.get('ami_tags')
+    architecture = module.params.get('architecture')
+    hypervisor = module.params.get('hypervisor')
+    is_public = module.params.get('is_public')
+    name = module.params.get('name')
+    owner = module.params.get('owner')
+    platform = module.params.get('platform')
+    sort = module.params.get('sort')
+    sort_tag = module.params.get('sort_tag')
+    sort_order = module.params.get('sort_order')
+    sort_start = module.params.get('sort_start')
+    sort_end = module.params.get('sort_end')
+    state = module.params.get('state')
+    virtualization_type = module.params.get('virtualization_type')
+    no_result_action = module.params.get('no_result_action')
+
+    filter = {'state': state}
+
+    if ami_id:
+        filter['image_id'] = ami_id
+    if ami_tags:
+        for tag in ami_tags:
+            filter['tag:'+tag] = ami_tags[tag]
+    if architecture:
+        filter['architecture'] = architecture
+    if hypervisor:
+        filter['hypervisor'] = hypervisor
+    if is_public:
+        filter['is_public'] = is_public
+    if name:
+        filter['name'] = name
+    if platform:
+        filter['platform'] = platform
+    if virtualization_type:
+        filter['virtualization_type'] = virtualization_type
+
+    ec2 = ec2_connect(module)
+
+    images_result = ec2.get_all_images(owners=owner, filters=filter)
+
+    if no_result_action == 'fail' and len(images_result) == 0:
+        module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter))
+
+    results = []
+    for image in images_result:
+        data = {
+            'ami_id': image.id,
+            'architecture': image.architecture,
+            'description': image.description,
+            'is_public': image.is_public,
+            'name': image.name,
+            'owner_id': image.owner_id,
+            'platform': image.platform,
+            'root_device_name': image.root_device_name,
+            'root_device_type': image.root_device_type,
+            'state': image.state,
+            'tags': image.tags,
+            'virtualization_type': image.virtualization_type,
+        }
+
+        if image.kernel_id:
+            data['kernel_id'] = image.kernel_id
+        if image.ramdisk_id:
+            data['ramdisk_id'] = image.ramdisk_id
+
+        results.append(data)
+
+    if sort == 'tag':
+        if not sort_tag:
+            module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'")
+        results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending'))
+    elif sort:
+        results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+
+    try:
+        if sort and sort_start and sort_end:
+            results = results[int(sort_start):int(sort_end)]
+        elif sort and sort_start:
+            results = results[int(sort_start):]
+        elif sort and sort_end:
+            results = results[:int(sort_end)]
+    except TypeError:
+        module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
+
+    module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+    main()
+

+ 11 - 4
playbooks/aws/openshift-cluster/list.yml

@@ -2,16 +2,23 @@
 - name: Generate oo_list_hosts group
 - name: Generate oo_list_hosts group
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
+  vars_files:
+  - vars.yml
   tasks:
   tasks:
   - set_fact: scratch_group=tag_env_{{ cluster_id }}
   - set_fact: scratch_group=tag_env_{{ cluster_id }}
     when: cluster_id != ''
     when: cluster_id != ''
   - set_fact: scratch_group=all
   - set_fact: scratch_group=all
-    when: scratch_group is not defined
-  - add_host: name={{ item }} groups=oo_list_hosts
-    with_items: groups[scratch_group] | difference(['localhost'])
+    when: cluster_id == ''
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_list_hosts
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost'])
 
 
 - name: List Hosts
 - name: List Hosts
   hosts: oo_list_hosts
   hosts: oo_list_hosts
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - debug: msg="public:{{hostvars[inventory_hostname].ec2_ip_address}} private:{{hostvars[inventory_hostname].ec2_private_ip_address}}"
+  - debug:
+      msg: "public ip:{{ hostvars[inventory_hostname].ec2_ip_address }} private ip:{{ hostvars[inventory_hostname].ec2_private_ip_address }}"

+ 132 - 0
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -0,0 +1,132 @@
+---
+- set_fact:
+    created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
+    docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
+    env: "{{ cluster }}"
+    env_host_type: "{{ cluster }}-openshift-{{ type }}"
+    host_type: "{{ type }}"
+
+- set_fact:
+    ec2_region: "{{ lookup('env', 'ec2_region')
+                    | default(deployment_vars[deployment_type].region, true) }}"
+  when: ec2_region is not defined
+- set_fact:
+    ec2_image_name: "{{ lookup('env', 'ec2_image_name')
+                        | default(deployment_vars[deployment_type].image_name, true) }}"
+  when: ec2_image_name is not defined and ec2_image is not defined
+- set_fact:
+    ec2_image: "{{ lookup('env', 'ec2_image')
+                   | default(deployment_vars[deployment_type].image, true) }}"
+  when: ec2_image is not defined and not ec2_image_name
+- set_fact:
+    ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+                    | default(deployment_vars[deployment_type].type, true) }}"
+  when: ec2_instance_type is not defined
+- set_fact:
+    ec2_keypair: "{{ lookup('env', 'ec2_keypair')
+                    | default(deployment_vars[deployment_type].keypair, true) }}"
+  when: ec2_keypair is not defined
+- set_fact:
+    ec2_vpc_subnet: "{{ lookup('env', 'ec2_vpc_subnet')
+                    | default(deployment_vars[deployment_type].vpc_subnet, true) }}"
+  when: ec2_vpc_subnet is not defined
+- set_fact:
+    ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
+                    | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
+  when: ec2_assign_public_ip is not defined
+- set_fact:
+    ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
+                    | default(deployment_vars[deployment_type].security_groups, true) }}"
+  when: ec2_security_groups is not defined
+
+- name: Find amis for deployment_type
+  ec2_ami_find:
+    region: "{{ ec2_region }}"
+    ami_id: "{{ ec2_image | default(omit, true) }}"
+    name: "{{ ec2_image_name | default(omit, true) }}"
+  register: ami_result
+
+- fail: msg="Could not find requested ami"
+  when: not ami_result.results
+
+- set_fact:
+    latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
+    user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}"
+    volume_defs:
+      master:
+        root:
+          volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
+          device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
+      node:
+        root:
+          volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+          device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
+        docker:
+          volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}"
+          device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
+
+- set_fact:
+    volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}"
+
+- name: Launch instance(s)
+  ec2:
+    state: present
+    region: "{{ ec2_region }}"
+    keypair: "{{ ec2_keypair }}"
+    group: "{{ ec2_security_groups }}"
+    instance_type: "{{ ec2_instance_type }}"
+    image: "{{ latest_ami }}"
+    count: "{{ instances | oo_len }}"
+    vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
+    assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
+    user_data: "{{ user_data }}"
+    wait: yes
+    instance_tags:
+      created-by: "{{ created_by }}"
+      env: "{{ env }}"
+      host-type: "{{ host_type }}"
+      env-host-type: "{{ env_host_type }}"
+    volumes: "{{ volumes }}"
+  register: ec2
+
+- name: Add Name tag to instances
+  ec2_tag: resource={{ item.1.id }} region={{ ec2_region }} state=present
+  with_together:
+  - instances
+  - ec2.instances
+  args:
+    tags:
+      Name: "{{ item.0 }}"
+
+- set_fact:
+    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+
+- name: Add new instances groups and variables
+  add_host:
+    hostname: "{{ item.0 }}"
+    ansible_ssh_host: "{{ item.1.dns_name }}"
+    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    groups: "{{ instance_groups }}"
+    ec2_private_ip_address: "{{ item.1.private_ip }}"
+    ec2_ip_address: "{{ item.1.public_ip }}"
+  with_together:
+  - instances
+  - ec2.instances
+
+- name: Wait for ssh
+  wait_for: "port=22 host={{ item.dns_name }}"
+  with_items: ec2.instances
+
+- name: Wait for user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 10
+  with_together:
+  - instances
+  - ec2.instances

+ 29 - 0
playbooks/aws/openshift-cluster/templates/user_data.j2

@@ -0,0 +1,29 @@
+#cloud-config
+yum_repos:
+  jdetiber-copr:
+    name: Copr repo for origin owned by jdetiber
+    baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/
+    skip_if_unavailable: true
+    gpgcheck: true
+    gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg
+    enabled: true
+
+packages:
+- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8
+- docker-storage-setup
+
+mounts:
+- [ xvdb ]
+- [ ephemeral0 ]
+
+write_files:
+- content: |
+    DEVS=/dev/xvdb
+    VG=docker_vg
+  path: /etc/sysconfig/docker-storage-setup
+  owner: root:root
+  permissions: '0644'
+
+runcmd:
+- systemctl daemon-reload
+- systemctl enable lvm2-lvmetad.service docker-storage-setup.service

+ 11 - 9
playbooks/aws/openshift-cluster/terminate.yml

@@ -1,14 +1,16 @@
 ---
 ---
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
-
+  gather_facts: no
   vars_files:
   vars_files:
-    - vars.yml
-
-- include: ../openshift-node/terminate.yml
-  vars:
-    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]'
+  - vars.yml
+  tasks:
+  - set_fact: scratch_group=tag_env_{{ cluster_id }}
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost'])
 
 
-- include: ../openshift-master/terminate.yml
-  vars:
-    oo_host_group_exp: 'groups["tag_env-host-type_{{ cluster_id }}-openshift-master"]'
+- include: ../terminate.yml

+ 15 - 10
playbooks/aws/openshift-cluster/update.yml

@@ -1,13 +1,18 @@
 ---
 ---
-- hosts: "tag_env_{{ cluster_id }}"
-  roles:
-  - openshift_repos
-  - os_update_latest
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
 
 
-- include: ../openshift-master/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
 
 
-- include: ../openshift-node/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type_{{ cluster_id }}-openshift-node\"]"
+- include: config.yml

+ 1 - 0
playbooks/aws/openshift-cluster/vars.defaults.yml

@@ -0,0 +1 @@
+---

+ 9 - 0
playbooks/aws/openshift-cluster/vars.online.int.yml

@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes

+ 9 - 0
playbooks/aws/openshift-cluster/vars.online.prod.yml

@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes

+ 9 - 0
playbooks/aws/openshift-cluster/vars.online.stage.yml

@@ -0,0 +1,9 @@
+---
+ec2_image: ami-906240f8
+ec2_image_name: libra-ops-rhel7*
+ec2_region: us-east-1
+ec2_keypair: mmcgrath_libra
+ec2_instance_type: m3.large
+ec2_security_groups: [ 'int-v3' ]
+ec2_vpc_subnet: subnet-987c0def
+ec2_assign_public_ip: yes

+ 37 - 0
playbooks/aws/openshift-cluster/vars.yml

@@ -1 +1,38 @@
 ---
 ---
+deployment_vars:
+  origin:
+    # fedora, since centos requires marketplace
+    image: ami-acd999c4
+    image_name:
+    region: us-east-1
+    ssh_user: fedora
+    sudo: yes
+    keypair: libra
+    type: m3.large
+    security_groups: [ 'public' ]
+    vpc_subnet:
+    assign_public_ip:
+  online:
+    # private ami
+    image: ami-7a9e9812
+    image_name: openshift-rhel7_*
+    region: us-east-1
+    ssh_user: root
+    sudo: no
+    keypair: libra
+    type: m3.large
+    security_groups: [ 'public' ]
+    vpc_subnet:
+    assign_public_ip:
+  enterprise:
+    # rhel-7.1, requires cloud access subscription
+    image: ami-10663b78
+    image_name:
+    region: us-east-1
+    ssh_user: ec2-user
+    sudo: yes
+    keypair: libra
+    type: m3.large
+    security_groups: [ 'public' ]
+    vpc_subnet:
+    assign_public_ip:

+ 11 - 16
playbooks/aws/openshift-master/config.yml

@@ -1,24 +1,19 @@
 ---
 ---
-- name: Populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_masters_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
 
 
-- name: Configure instances
-  hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
   vars:
   vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"
-    # TODO: this should be removed once openshift-sdn packages are available
-    openshift_use_openshift_sdn: False
-  vars_files:
-  - vars.yml
-  roles:
-    - openshift_master
-    #- openshift_sdn_master
-    - pods
-    - os_env_extras

+ 3 - 5
playbooks/aws/openshift-master/launch.yml

@@ -4,14 +4,12 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
 
 
+# TODO: modify atomic_ami based on deployment_type
   vars:
   vars:
     inst_region: us-east-1
     inst_region: us-east-1
     atomic_ami: ami-86781fee
     atomic_ami: ami-86781fee
     user_data_file: user_data.txt
     user_data_file: user_data.txt
 
 
-  vars_files:
-    - vars.yml
-
   tasks:
   tasks:
     - name: Launch instances
     - name: Launch instances
       ec2:
       ec2:
@@ -40,7 +38,7 @@
           Name: "{{ item.0 }}"
           Name: "{{ item.0 }}"
 
 
     - name: Add other tags to instances
     - name: Add other tags to instances
-      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
       with_items: ec2.instances
       with_items: ec2.instances
       args:
       args:
         tags: "{{ oo_new_inst_tags }}"
         tags: "{{ oo_new_inst_tags }}"
@@ -57,7 +55,7 @@
         - ec2.instances
         - ec2.instances
 
 
     - name: Wait for ssh
     - name: Wait for ssh
-      wait_for: "port=22 host={{ item.dns_name }}"
+      wait_for: port=22 host={{ item.dns_name }}
       with_items: ec2.instances
       with_items: ec2.instances
 
 
     - name: Wait for root user setup
     - name: Wait for root user setup

+ 1 - 51
playbooks/aws/openshift-master/terminate.yml

@@ -1,52 +1,2 @@
 ---
 ---
-- name: Populate oo_masters_to_terminate host group if needed
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_masters_to_terminate"
-      with_items: "{{ oo_host_group_exp | default('') }}"
-      when: oo_host_group_exp is defined
-
-- name: Gather facts for instances to terminate
-  hosts: oo_masters_to_terminate
-
-- name: Terminate instances
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  vars:
-    host_vars: "{{ hostvars
-        | oo_select_keys(groups['oo_masters_to_terminate']) }}"
-  tasks:
-    - name: Terminate instances
-      ec2:
-        state: absent
-        instance_ids: ["{{ item.ec2_id }}"]
-        region: "{{ item.ec2_region }}"
-      ignore_errors: yes
-      register: ec2_term
-      with_items: host_vars
-
-    # Fail if any of the instances failed to terminate with an error other
-    # than 403 Forbidden
-    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
-      when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
-      with_items: ec2_term.results
-
-    - name: Stop instance if termination failed
-      ec2:
-        state: stopped
-        instance_ids: ["{{ item.item.ec2_id }}"]
-        region: "{{ item.item.ec2_region }}"
-      register: ec2_stop
-      when: item.failed
-      with_items: ec2_term.results
-
-    - name: Rename stopped instances
-      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
-      args:
-        tags:
-          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
-      with_items: ec2_stop.results
-
+- include: ../terminate.yml

+ 0 - 3
playbooks/aws/openshift-master/vars.yml

@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"

+ 14 - 96
playbooks/aws/openshift-node/config.yml

@@ -1,107 +1,25 @@
 ---
 ---
-- name: Populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_nodes_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
-  - add_host:
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
+  - name: Evaluate oo_first_master
+    add_host:
       name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
       name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
       groups: oo_first_master
       groups: oo_first_master
-    when: oo_host_group_exp is defined
+      ansible_ssh_user: root
 
 
 
 
-- name: Gather and set facts for hosts to configure
-  hosts: oo_nodes_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  # Since the master is registering the nodes before they are configured, we
-  # need to make sure to set the node properties beforehand if we do not want
-  # the defaults
-  - openshift_facts:
-      role: "{{ item.role }}"
-      local_facts: "{{ item.local_facts }}"
-    with_items:
-    - role: common
-      local_facts:
-        hostname: "{{ ec2_private_ip_address }}"
-        public_hostname: "{{ ec2_ip_address }}"
-        # TODO: this should be removed once openshift-sdn packages are available
-        use_openshift_sdn: False
-    - role: node
-      local_facts:
-        external_id: "{{ openshift_node_external_id | default(None) }}"
-        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
-        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
-        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
-        labels: "{{ openshfit_node_labels | default(None) }}"
-        annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
-  hosts: oo_first_master
-  vars:
-    openshift_nodes: "{{ hostvars
-          | oo_select_keys(groups['oo_nodes_to_config']) }}"
-  roles:
-  - openshift_register_nodes
-  tasks:
-  - name: Create local temp directory for syncing certs
-    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
-    register: mktemp
-
-  - name: Sync master certs to localhost
-    synchronize:
-      mode: pull
-      checksum: yes
-      src: /var/lib/openshift/openshift.local.certificates
-      dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure instances
-  hosts: oo_nodes_to_config
-  vars_files:
-  - vars.yml
+- include: ../../common/openshift-node/config.yml
   vars:
   vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"
-    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
-    cert_parent_rel_path: openshift.local.certificates
-    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
-    cert_base_path: /var/lib/openshift
-    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
-    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
-  pre_tasks:
-  - name: Ensure certificate directories exists
-    file:
-      path: "{{ item }}"
-      state: directory
-    with_items:
-    - "{{ cert_path }}"
-    - "{{ cert_parent_path }}/ca"
-
-  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
-  # possibly test service started time against certificate/config file
-  # timestamps in openshift-node or openshift-sdn-node to trigger notify
-  - name: Sync certs to nodes
-    synchronize:
-      checksum: yes
-      src: "{{ item.src }}"
-      dest: "{{ item.dest }}"
-      owner: no
-      group: no
-    with_items:
-    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
-      dest: "{{ cert_parent_path }}"
-    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
-      dest: "{{ cert_parent_path }}/ca/cert.crt"
-  - local_action: file name={{ sync_tmpdir }} state=absent
-    run_once: true
-  roles:
-    - openshift_node
-    #- openshift_sdn_node
-    - os_env_extras
-    - os_env_extras_node

+ 4 - 6
playbooks/aws/openshift-node/launch.yml

@@ -4,14 +4,12 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
 
 
+# TODO: modify atomic_ami based on deployment_type
   vars:
   vars:
     inst_region: us-east-1
     inst_region: us-east-1
     atomic_ami: ami-86781fee
     atomic_ami: ami-86781fee
     user_data_file: user_data.txt
     user_data_file: user_data.txt
 
 
-  vars_files:
-    - vars.yml
-
   tasks:
   tasks:
     - name: Launch instances
     - name: Launch instances
       ec2:
       ec2:
@@ -33,7 +31,7 @@
       with_items: ec2.instances
       with_items: ec2.instances
 
 
     - name: Add Name and environment tags to instances
     - name: Add Name and environment tags to instances
-      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
+      ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
       with_together:
       with_together:
         - oo_new_inst_names
         - oo_new_inst_names
         - ec2.instances
         - ec2.instances
@@ -42,7 +40,7 @@
           Name: "{{ item.0 }}"
           Name: "{{ item.0 }}"
 
 
     - name: Add other tags to instances
     - name: Add other tags to instances
-      ec2_tag: "resource={{ item.id }} region={{ inst_region }} state=present"
+      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
       with_items: ec2.instances
       with_items: ec2.instances
       args:
       args:
         tags: "{{ oo_new_inst_tags }}"
         tags: "{{ oo_new_inst_tags }}"
@@ -59,7 +57,7 @@
         - ec2.instances
         - ec2.instances
 
 
     - name: Wait for ssh
     - name: Wait for ssh
-      wait_for: "port=22 host={{ item.dns_name }}"
+      wait_for: port=22 host={{ item.dns_name }}
       with_items: ec2.instances
       with_items: ec2.instances
 
 
     - name: Wait for root user setup
     - name: Wait for root user setup

+ 1 - 51
playbooks/aws/openshift-node/terminate.yml

@@ -1,52 +1,2 @@
 ---
 ---
-- name: Populate oo_nodes_to_terminate host group if needed
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_nodes_to_terminate"
-      with_items: "{{ oo_host_group_exp | default('') }}"
-      when: oo_host_group_exp is defined
-
-- name: Gather facts for instances to terminate
-  hosts: oo_nodes_to_terminate
-
-- name: Terminate instances
-  hosts: localhost
-  connection: local
-  gather_facts: no
-  vars:
-    host_vars: "{{ hostvars
-        | oo_select_keys(groups['oo_nodes_to_terminate']) }}"
-  tasks:
-    - name: Terminate instances
-      ec2:
-        state: absent
-        instance_ids: ["{{ item.ec2_id }}"]
-        region: "{{ item.ec2_region }}"
-      ignore_errors: yes
-      register: ec2_term
-      with_items: host_vars
-
-    # Fail if any of the instances failed to terminate with an error other
-    # than 403 Forbidden
-    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
-      when: "item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
-      with_items: ec2_term.results
-
-    - name: Stop instance if termination failed
-      ec2:
-        state: stopped
-        instance_ids: ["{{ item.item.ec2_id }}"]
-        region: "{{ item.item.ec2_region }}"
-      register: ec2_stop
-      when: item.failed
-      with_items: ec2_term.results
-
-    - name: Rename stopped instances
-      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
-      args:
-        tags:
-          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
-      with_items: ec2_stop.results
-
+- include: ../terminate.yml

+ 0 - 3
playbooks/aws/openshift-node/vars.yml

@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"

+ 64 - 0
playbooks/aws/terminate.yml

@@ -0,0 +1,64 @@
+---
+- name: Populate oo_hosts_to_terminate host group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+    - name: Evaluate oo_hosts_to_terminate
+      add_host: name={{ item }} groups=oo_hosts_to_terminate
+      with_items: oo_host_group_exp | default([])
+
+- name: Gather dynamic inventory variables for hosts to terminate
+  hosts: oo_hosts_to_terminate
+  gather_facts: no
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+        | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+  tasks:
+    - name: Remove tags from instances
+      ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+      args:
+        tags:
+          env: "{{ item['ec2_tag_env'] }}"
+          host-type: "{{ item['ec2_tag_host-type'] }}"
+          env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.item.ec2_id }} failed with message {{ item.msg }}
+      when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: item.failed
+      with_items: ec2_term.results
+      when: "'oo_hosts_to_terminate' in groups"
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+      when: "'oo_hosts_to_terminate' in groups"

+ 13 - 7
playbooks/byo/openshift-master/config.yml

@@ -1,9 +1,15 @@
 ---
 ---
-- name: Gather facts for node hosts
-  hosts: nodes
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+    with_items: groups['masters']
 
 
-- name: Configure master instances
-  hosts: masters
-  roles:
-  - openshift_master
-  - openshift_sdn_master
+- include: ../../common/openshift-master/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"

+ 16 - 74
playbooks/byo/openshift-node/config.yml

@@ -1,79 +1,21 @@
 ---
 ---
-- name: Gather facts for node hosts
-  hosts: nodes
-  roles:
-  - openshift_facts
+- name: Populate oo_nodes_to_config and oo_first_master host groups
+  hosts: localhost
+  gather_facts: no
   tasks:
   tasks:
-  # Since the master is registering the nodes before they are configured, we
-  # need to make sure to set the node properties beforehand if we do not want
-  # the defaults
-  - openshift_facts:
-      role: 'node'
-      local_facts:
-        hostname: "{{ openshift_hostname | default(None) }}"
-        external_id: "{{ openshift_node_external_id | default(None) }}"
-        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
-        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
-        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
-        labels: "{{ openshfit_node_labels | default(None) }}"
-        annotations: "{{ openshfit_node_annotations | default(None) }}"
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+    with_items: groups.nodes
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups.masters[0] }}"
+      groups: oo_first_master
 
 
 
 
-- name: Register nodes
-  hosts: masters[0]
+- include: ../../common/openshift-node/config.yml
   vars:
   vars:
-    openshift_nodes: "{{ hostvars | oo_select_keys(groups['nodes']) }}"
-  roles:
-  - openshift_register_nodes
-  tasks:
-  - name: Create local temp directory for syncing certs
-    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
-    register: mktemp
-
-  - name: Sync master certs to localhost
-    synchronize:
-      mode: pull
-      checksum: yes
-      src: /var/lib/openshift/openshift.local.certificates
-      dest: "{{ mktemp.stdout }}"
-
-
-- name: Configure node instances
-  hosts: nodes
-  vars:
-    sync_tmpdir: "{{ hostvars[groups['masters'][0]].mktemp.stdout }}"
-    cert_parent_rel_path: openshift.local.certificates
-    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
-    cert_base_path: /var/lib/openshift
-    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
-    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
-    openshift_sdn_master_url: http://{{ hostvars[groups['masters'][0]].openshift.common.hostname }}:4001
-  pre_tasks:
-  - name: Ensure certificate directories exists
-    file:
-      path: "{{ item }}"
-      state: directory
-    with_items:
-    - "{{ cert_path }}"
-    - "{{ cert_parent_path }}/ca"
-
-  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
-  # possibly test service started time against certificate/config file
-  # timestamps in openshift-node or openshift-sdn-node to trigger notify
-  - name: Sync certs to nodes
-    synchronize:
-      checksum: yes
-      src: "{{ item.src }}"
-      dest: "{{ item.dest }}"
-      owner: no
-      group: no
-    with_items:
-    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
-      dest: "{{ cert_parent_path }}"
-    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
-      dest: "{{ cert_parent_path }}/ca/cert.crt"
-  - local_action: file name={{ sync_tmpdir }} state=absent
-    run_once: true
-  roles:
-  - openshift_node
-  - openshift_sdn_node
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"

+ 10 - 0
playbooks/byo/openshift_facts.yml

@@ -0,0 +1,10 @@
+---
+- name: Gather OpenShift facts
+  hosts: all
+  gather_facts: no
+  roles:
+  - openshift_facts
+  tasks:
+  - openshift_facts:
+    register: result
+  - debug: var=result

+ 4 - 0
playbooks/common/openshift-cluster/config.yml

@@ -0,0 +1,4 @@
+---
+- include: ../openshift-master/config.yml
+
+- include: ../openshift-node/config.yml

playbooks/libvirt/openshift-node/filter_plugins → playbooks/common/openshift-cluster/filter_plugins


playbooks/libvirt/openshift-node/roles → playbooks/common/openshift-cluster/roles


+ 11 - 0
playbooks/common/openshift-cluster/set_master_launch_facts_tasks.yml

@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="master"
+
+- name: Generate master instance names(s)
+  set_fact:
+    scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+  register: master_names_output
+  with_sequence: start=1 end={{ num_masters }}
+
+- set_fact:
+    master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"

+ 11 - 0
playbooks/common/openshift-cluster/set_node_launch_facts_tasks.yml

@@ -0,0 +1,11 @@
+---
+- set_fact: k8s_type="node"
+
+- name: Generate node instance names(s)
+  set_fact:
+    scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
+  register: node_names_output
+  with_sequence: start=1 end={{ num_nodes }}
+
+- set_fact:
+    node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"

+ 7 - 0
playbooks/common/openshift-cluster/update_repos_and_packages.yml

@@ -0,0 +1,7 @@
+---
+- hosts: oo_hosts_to_update
+  vars:
+    openshift_deployment_type: "{{ deployment_type }}"
+  roles:
+  - openshift_repos
+  - os_update_latest

+ 19 - 0
playbooks/common/openshift-master/config.yml

@@ -0,0 +1,19 @@
+---
+- name: Configure master instances
+  hosts: oo_masters_to_config
+  vars:
+    openshift_sdn_master_url: https://{{ openshift.common.hostname }}:4001
+  roles:
+  - openshift_master
+  - { role: openshift_sdn_master, when: openshift.common.use_openshift_sdn | bool }
+  tasks:
+  - name: Create group for deployment type
+    group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
+    changed_when: False
+
+# Additional instance config for online deployments
+- name: Additional instance config
+  hosts: oo_masters_deployment_type_online
+  roles:
+  - pods
+  - os_env_extras

playbooks/libvirt/openshift-master/filter_plugins → playbooks/common/openshift-master/filter_plugins


+ 1 - 0
playbooks/common/openshift-master/roles

@@ -0,0 +1 @@
+../../../roles/

+ 127 - 0
playbooks/common/openshift-node/config.yml

@@ -0,0 +1,127 @@
+---
+- name: Gather and set facts for node hosts
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  # Since the master is registering the nodes before they are configured, we
+  # need to make sure to set the node properties beforehand if we do not want
+  # the defaults
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+      - role: common
+        local_facts:
+          hostname: "{{ openshift_hostname | default(None) }}"
+          public_hostname: "{{ openshift_public_hostname | default(None) }}"
+      - role: node
+        local_facts:
+          external_id: "{{ openshift_node_external_id | default(None) }}"
+          resources_cpu: "{{ openshift_node_resources_cpu | default(None) }}"
+          resources_memory: "{{ openshift_node_resources_memory | default(None) }}"
+          pod_cidr: "{{ openshift_node_pod_cidr | default(None) }}"
+          labels: "{{ openshift_node_labels | default(None) }}"
+          annotations: "{{ openshift_node_annotations | default(None) }}"
+          deployment_type: "{{ openshift_deployment_type }}"
+
+
+- name: Create temp directory for syncing certs
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: Create local temp directory for syncing certs
+    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+    register: mktemp
+    changed_when: False
+
+
+- name: Register nodes
+  hosts: oo_first_master
+  vars:
+    openshift_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+  roles:
+  - openshift_register_nodes
+  tasks:
+  # TODO: update so that we only sync necessary configs/directories, currently
+  # we sync for all nodes in oo_nodes_to_config.  We will need to inspect the
+  # configs on the nodes to make the determination on whether to sync or not.
+  - name: Create the temp directory on the master
+    file:
+      path: "{{ sync_tmpdir }}"
+      owner: "{{ ansible_ssh_user }}"
+      mode: 0700
+      state: directory
+    changed_when: False
+
+  - name: Create a tarball of the node config directories
+    command: tar -czvf {{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz ./
+    args:
+      chdir: "{{ openshift_cert_dir }}/node-{{ item.openshift.common.hostname }}"
+    with_items: openshift_nodes
+    changed_when: False
+
+  - name: Retrieve the node config tarballs from the master
+    fetch:
+      src: "{{ sync_tmpdir }}/{{ item.openshift.common.hostname }}.tgz"
+      dest: "{{ sync_tmpdir }}/"
+      flat: yes
+      fail_on_missing: yes
+      validate_checksum: yes
+    with_items: openshift_nodes
+    changed_when: False
+
+
+- name: Configure node instances
+  hosts: oo_nodes_to_config
+  gather_facts: no
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+    openshift_sdn_master_url: "https://{{ hostvars[groups['oo_first_master'][0]].openshift.common.hostname }}:4001"
+  pre_tasks:
+  - name: Ensure certificate directory exists
+    file:
+      path: "{{ openshift_node_cert_dir }}"
+      state: directory
+
+  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
+  # possibly test service started time against certificate/config file
+  # timestamps in openshift-node or openshift-sdn-node to trigger notify
+  - name: Unarchive the tarball on the node
+    unarchive:
+      src: "{{ sync_tmpdir }}/{{ openshift.common.hostname }}.tgz"
+      dest: "{{ openshift_node_cert_dir }}"
+  roles:
+  - openshift_node
+  - { role: openshift_sdn_node, when: openshift.common.use_openshift_sdn | bool }
+  tasks:
+  - name: Create group for deployment type
+    group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
+    changed_when: False
+
+- name: Delete the temporary directory on the master
+  hosts: oo_first_master
+  gather_facts: no
+  vars:
+    sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
+  tasks:
+  - file: name={{ sync_tmpdir }} state=absent
+    changed_when: False
+
+
+- name: Delete temporary directory on localhost
+  hosts: localhost
+  gather_facts: no
+  tasks:
+  - file: name={{ mktemp.stdout }} state=absent
+    changed_when: False
+
+
+# Additional config for online type deployments
+- name: Additional instance config
+  hosts: oo_nodes_deployment_type_online
+  gather_facts: no
+  roles:
+  - os_env_extras
+  - os_env_extras_node

+ 1 - 0
playbooks/common/openshift-node/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-node/roles

@@ -0,0 +1 @@
+../../../roles/

+ 37 - 0
playbooks/gce/openshift-cluster/config.yml

@@ -0,0 +1,37 @@
+---
+# TODO: fix firewall related bug with GCE and origin, since GCE is overriding
+# /etc/sysconfig/iptables
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      groups: oo_first_master
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ gce_private_ip }}"

+ 19 - 53
playbooks/gce/openshift-cluster/launch.yml

@@ -4,59 +4,25 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
-      - vars.yml
+  - vars.yml
   tasks:
   tasks:
-    - set_fact: k8s_type="master"
-
-    - name: Generate master instance names(s)
-      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
-      register: master_names_output
-      with_sequence: start=1 end={{ num_masters }}
-
-    # These set_fact's cannot be combined
-    - set_fact:
-        master_names_string: "{% for item in master_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
-    - set_fact:
-        master_names: "{{ master_names_string.strip().split(' ') }}"
-
-    - include: launch_instances.yml
-      vars:
-        instances: "{{ master_names }}"
-        cluster: "{{ cluster_id }}"
-        type: "{{ k8s_type }}"
-
-    - set_fact: k8s_type="node"
-
-    - name: Generate node instance names(s)
-      set_fact: scratch={{ cluster_id }}-{{ k8s_type }}-{{ '%05x' |format( 1048576 |random) }}
-      register: node_names_output
-      with_sequence: start=1 end={{ num_nodes }}
-
-    # These set_fact's cannot be combined
-    - set_fact:
-        node_names_string: "{% for item in node_names_output.results %}{{ item.ansible_facts.scratch }} {% endfor %}"
-
-    - set_fact:
-        node_names: "{{ node_names_string.strip().split(' ') }}"
-
-    - include: launch_instances.yml
-      vars:
-        instances: "{{ node_names }}"
-        cluster: "{{ cluster_id }}"
-        type: "{{ k8s_type }}"
-
-- hosts: "tag_env-{{ cluster_id }}"
-  roles:
-  - openshift_repos
-  - os_update_latest
-
-- include: ../openshift-master/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
-
-- include: ../openshift-node/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+  - fail: msg="Deployment type not supported for gce provider yet"
+    when: deployment_type == 'enterprise'
+
+  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ master_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ node_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+
+- include: update.yml
 
 
 - include: list.yml
 - include: list.yml

+ 11 - 4
playbooks/gce/openshift-cluster/list.yml

@@ -2,16 +2,23 @@
 - name: Generate oo_list_hosts group
 - name: Generate oo_list_hosts group
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
+  vars_files:
+  - vars.yml
   tasks:
   tasks:
   - set_fact: scratch_group=tag_env-{{ cluster_id }}
   - set_fact: scratch_group=tag_env-{{ cluster_id }}
     when: cluster_id != ''
     when: cluster_id != ''
   - set_fact: scratch_group=all
   - set_fact: scratch_group=all
-    when: scratch_group is not defined
-  - add_host: name={{ item }} groups=oo_list_hosts
-    with_items: groups[scratch_group] | difference(['localhost']) | difference(groups.status_terminated)
+    when: cluster_id == ''
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_list_hosts
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
 
 
 - name: List Hosts
 - name: List Hosts
   hosts: oo_list_hosts
   hosts: oo_list_hosts
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - debug: msg="public:{{hostvars[inventory_hostname].gce_public_ip}} private:{{hostvars[inventory_hostname].gce_private_ip}}"
+  - debug:
+      msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"

+ 11 - 14
playbooks/gce/openshift-cluster/launch_instances.yml

@@ -2,41 +2,38 @@
 # TODO: when we are ready to go to ansible 1.9+ support only, we can update to
 # TODO: when we are ready to go to ansible 1.9+ support only, we can update to
 # the gce task to use the disk_auto_delete parameter to avoid having to delete
 # the gce task to use the disk_auto_delete parameter to avoid having to delete
 # the disk as a separate step on termination
 # the disk as a separate step on termination
-
-- set_fact:
-    machine_type: "{{ lookup('env', 'gce_machine_type') |default('n1-standard-1', true) }}"
-    machine_image: "{{ lookup('env', 'gce_machine_image') |default('libra-rhel7', true) }}"
-
 - name: Launch instance(s)
 - name: Launch instance(s)
   gce:
   gce:
     instance_names: "{{ instances }}"
     instance_names: "{{ instances }}"
-    machine_type: "{{ machine_type }}"
-    image: "{{ machine_image }}"
+    machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
+    image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
     service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     project_id: "{{ lookup('env', 'gce_project_id') }}"
     project_id: "{{ lookup('env', 'gce_project_id') }}"
     tags:
     tags:
-      - "created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}"
-      - "env-{{ cluster }}"
-      - "host-type-{{ type }}"
-      - "env-host-type-{{ cluster }}-openshift-{{ type }}"
+      - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
+      - env-{{ cluster }}
+      - host-type-{{ type }}
+      - env-host-type-{{ cluster }}-openshift-{{ type }}
   register: gce
   register: gce
 
 
 - name: Add new instances to groups and set variables needed
 - name: Add new instances to groups and set variables needed
   add_host:
   add_host:
     hostname: "{{ item.name }}"
     hostname: "{{ item.name }}"
     ansible_ssh_host: "{{ item.public_ip }}"
     ansible_ssh_host: "{{ item.public_ip }}"
+    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
     groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
     gce_public_ip: "{{ item.public_ip }}"
     gce_public_ip: "{{ item.public_ip }}"
     gce_private_ip: "{{ item.private_ip }}"
     gce_private_ip: "{{ item.private_ip }}"
   with_items: gce.instance_data
   with_items: gce.instance_data
 
 
 - name: Wait for ssh
 - name: Wait for ssh
-  wait_for: "port=22 host={{ item.public_ip }}"
+  wait_for: port=22 host={{ item.public_ip }}
   with_items: gce.instance_data
   with_items: gce.instance_data
 
 
-- name: Wait for root user setup
-  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.public_ip }} echo root user is setup"
+- name: Wait for user setup
+  command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
   register: result
   register: result
   until: result.rc == 0
   until: result.rc == 0
   retries: 20
   retries: 20

+ 18 - 4
playbooks/gce/openshift-cluster/terminate.yml

@@ -1,20 +1,34 @@
 ---
 ---
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
-
+  gather_facts: no
   vars_files:
   vars_files:
-    - vars.yml
+  - vars.yml
+  tasks:
+  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+
+  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
 
 
 - include: ../openshift-node/terminate.yml
 - include: ../openshift-node/terminate.yml
   vars:
   vars:
-    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
     gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
     gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
 
 
 - include: ../openshift-master/terminate.yml
 - include: ../openshift-master/terminate.yml
   vars:
   vars:
-    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
     gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
     gce_project_id: "{{ lookup('env', 'gce_project_id') }}"

+ 15 - 10
playbooks/gce/openshift-cluster/update.yml

@@ -1,13 +1,18 @@
 ---
 ---
-- hosts: "tag_env-{{ cluster_id }}"
-  roles:
-  - openshift_repos
-  - os_update_latest
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
 
 
-- include: ../openshift-master/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-master\"]"
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
 
 
-- include: ../openshift-node/config.yml
-  vars:
-    oo_host_group_exp: "groups[\"tag_env-host-type-{{ cluster_id }}-openshift-node\"]"
+- include: config.yml

+ 14 - 0
playbooks/gce/openshift-cluster/vars.yml

@@ -1 +1,15 @@
 ---
 ---
+deployment_vars:
+  origin:
+    image: centos-7
+    ssh_user:
+    sudo: yes
+  online:
+    image: libra-rhel7
+    ssh_user: root
+    sudo: no
+  enterprise:
+    image: rhel-7
+    ssh_user:
+    sudo: yes
+

+ 11 - 13
playbooks/gce/openshift-master/config.yml

@@ -1,20 +1,18 @@
 ---
 ---
-- name: master/config.yml, populate oo_masters_to_config host group if needed
+- name: Populate oo_masters_to_config host group
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: "Evaluate oo_host_group_exp if it's set"
-    add_host: "name={{ item }} groups=oo_masters_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_masters_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
 
 
-- name: "Configure instances"
-  hosts: oo_masters_to_config
+- include: ../../common/openshift-master/config.yml
   vars:
   vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ gce_private_ip }}"
     openshift_hostname: "{{ gce_private_ip }}"
-  vars_files:
-  - vars.yml
-  roles:
-    - openshift_master
-    - pods
-    - os_env_extras

+ 2 - 4
playbooks/gce/openshift-master/launch.yml

@@ -8,14 +8,12 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
 
 
+# TODO: modify image based on deployment_type
   vars:
   vars:
     inst_names: "{{ oo_new_inst_names }}"
     inst_names: "{{ oo_new_inst_names }}"
     machine_type: n1-standard-1
     machine_type: n1-standard-1
     image: libra-rhel7
     image: libra-rhel7
 
 
-  vars_files:
-      - vars.yml
-
   tasks:
   tasks:
     - name: Launch instances
     - name: Launch instances
       gce:
       gce:
@@ -37,7 +35,7 @@
       with_items: gce.instance_data
       with_items: gce.instance_data
 
 
     - name: Wait for ssh
     - name: Wait for ssh
-      wait_for: "port=22 host={{ item.public_ip }}"
+      wait_for: port=22 host={{ item.public_ip }}
       with_items: gce.instance_data
       with_items: gce.instance_data
 
 
     - name: Wait for root user setup
     - name: Wait for root user setup

+ 5 - 6
playbooks/gce/openshift-master/terminate.yml

@@ -3,10 +3,9 @@
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-    - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_masters_to_terminate"
-      with_items: "{{ oo_host_group_exp | default('') }}"
-      when: oo_host_group_exp is defined
+    - name: Evaluate oo_masters_to_terminate
+      add_host: name={{ item }} groups=oo_masters_to_terminate
+      with_items: oo_host_group_exp | default([])
 
 
 - name: Terminate master instances
 - name: Terminate master instances
   hosts: localhost
   hosts: localhost
@@ -22,6 +21,7 @@
         instance_names: "{{ groups['oo_masters_to_terminate'] }}"
         instance_names: "{{ groups['oo_masters_to_terminate'] }}"
         disks: "{{ groups['oo_masters_to_terminate'] }}"
         disks: "{{ groups['oo_masters_to_terminate'] }}"
       register: gce
       register: gce
+      when: "'oo_masters_to_terminate' in groups"
 
 
     - name: Remove disks of instances
     - name: Remove disks of instances
       gce_pd:
       gce_pd:
@@ -32,5 +32,4 @@
         zone: "{{ gce.zone }}"
         zone: "{{ gce.zone }}"
         state: absent
         state: absent
       with_items: gce.instance_names
       with_items: gce.instance_names
-
-
+      when: "'oo_masters_to_terminate' in groups"

+ 0 - 3
playbooks/gce/openshift-master/vars.yml

@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"

+ 15 - 91
playbooks/gce/openshift-node/config.yml

@@ -1,100 +1,24 @@
 ---
 ---
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
+- name: Populate oo_nodes_to_config and oo_first_master host groups
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-  - name: Evaluate oo_host_group_exp
-    add_host: "name={{ item }} groups=oo_nodes_to_config"
-    with_items: "{{ oo_host_group_exp | default('') }}"
-    when: oo_host_group_exp is defined
-  - add_host:
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      groups: oo_nodes_to_config
+      ansible_ssh_user: root
+    with_items: oo_host_group_exp | default([])
+  - name: Evaluate oo_first_master
+    add_host:
       name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
       name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
       groups: oo_first_master
       groups: oo_first_master
-    when: oo_host_group_exp is defined
+      ansible_ssh_user: root
 
 
 
 
-- name: Gather and set facts for hosts to configure
-  hosts: oo_nodes_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  # Since the master is registering the nodes before they are configured, we
-  # need to make sure to set the node properties beforehand if we do not want
-  # the defaults
-  - openshift_facts:
-      role: "{{ item.role }}"
-      local_facts: "{{ item.local_facts }}"
-    with_items:
-    - role: common
-      local_facts:
-        hostname: "{{ gce_private_ip }}"
-    - role: node
-      local_facts:
-        external_id: "{{ openshift_node_external_id | default(None) }}"
-        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
-        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
-        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
-        labels: "{{ openshfit_node_labels | default(None) }}"
-        annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
-  hosts: oo_first_master
-  vars:
-    openshift_nodes: "{{ hostvars
-          | oo_select_keys(groups['oo_nodes_to_config']) }}"
-  roles:
-  - openshift_register_nodes
-  tasks:
-  - name: Create local temp directory for syncing certs
-    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
-    register: mktemp
-
-  - name: Sync master certs to localhost
-    synchronize:
-      mode: pull
-      checksum: yes
-      src: /var/lib/openshift/openshift.local.certificates
-      dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
-  hosts: oo_nodes_to_config
-  vars_files:
-  - vars.yml
+- include: ../../common/openshift-node/config.yml
   vars:
   vars:
-    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
-    cert_parent_rel_path: openshift.local.certificates
-    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
-    cert_base_path: /var/lib/openshift
-    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
-    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
-  pre_tasks:
-  - name: Ensure certificate directories exists
-    file:
-      path: "{{ item }}"
-      state: directory
-    with_items:
-    - "{{ cert_path }}"
-    - "{{ cert_parent_path }}/ca"
-
-  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
-  # possibly test service started time against certificate/config file
-  # timestamps in openshift-node or openshift-sdn-node to trigger notify
-  - name: Sync certs to nodes
-    synchronize:
-      checksum: yes
-      src: "{{ item.src }}"
-      dest: "{{ item.dest }}"
-      owner: no
-      group: no
-    with_items:
-    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
-      dest: "{{ cert_parent_path }}"
-    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
-      dest: "{{ cert_parent_path }}/ca/cert.crt"
-  - local_action: file name={{ sync_tmpdir }} state=absent
-    run_once: true
-  roles:
-    - openshift_node
-    - os_env_extras
-    - os_env_extras_node
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ gce_private_ip }}"

+ 2 - 4
playbooks/gce/openshift-node/launch.yml

@@ -8,14 +8,12 @@
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
 
 
+# TODO: modify image based on deployment_type
   vars:
   vars:
     inst_names: "{{ oo_new_inst_names }}"
     inst_names: "{{ oo_new_inst_names }}"
     machine_type: n1-standard-1
     machine_type: n1-standard-1
     image: libra-rhel7
     image: libra-rhel7
 
 
-  vars_files:
-      - vars.yml
-
   tasks:
   tasks:
     - name: Launch instances
     - name: Launch instances
       gce:
       gce:
@@ -37,7 +35,7 @@
       with_items: gce.instance_data
       with_items: gce.instance_data
 
 
     - name: Wait for ssh
     - name: Wait for ssh
-      wait_for: "port=22 host={{ item.public_ip }}"
+      wait_for: port=22 host={{ item.public_ip }}
       with_items: gce.instance_data
       with_items: gce.instance_data
 
 
     - name: Wait for root user setup
     - name: Wait for root user setup

+ 5 - 6
playbooks/gce/openshift-node/terminate.yml

@@ -3,10 +3,9 @@
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
-    - name: Evaluate oo_host_group_exp if it's set
-      add_host: "name={{ item }} groups=oo_nodes_to_terminate"
-      with_items: "{{ oo_host_group_exp | default('') }}"
-      when: oo_host_group_exp is defined
+    - name: Evaluate oo_nodes_to_terminate
+      add_host: name={{ item }} groups=oo_nodes_to_terminate
+      with_items: oo_host_group_exp | default([])
 
 
 - name: Terminate node instances
 - name: Terminate node instances
   hosts: localhost
   hosts: localhost
@@ -22,6 +21,7 @@
         instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
         instance_names: "{{ groups['oo_nodes_to_terminate'] }}"
         disks: "{{ groups['oo_nodes_to_terminate'] }}"
         disks: "{{ groups['oo_nodes_to_terminate'] }}"
       register: gce
       register: gce
+      when: "'oo_nodes_to_terminate' in groups"
 
 
     - name: Remove disks of instances
     - name: Remove disks of instances
       gce_pd:
       gce_pd:
@@ -32,5 +32,4 @@
         zone: "{{ gce.zone }}"
         zone: "{{ gce.zone }}"
         state: absent
         state: absent
       with_items: gce.instance_names
       with_items: gce.instance_names
-
-
+      when: "'oo_nodes_to_terminate' in groups"

+ 0 - 3
playbooks/gce/openshift-node/vars.yml

@@ -1,3 +0,0 @@
----
-openshift_debug_level: 4
-openshift_cluster_id: "{{ cluster_id }}"

+ 38 - 0
playbooks/libvirt/openshift-cluster/config.yml

@@ -0,0 +1,38 @@
+---
+# TODO: need to figure out a plan for setting hostname, currently the default
+# is localhost, so no hostname value (or public_hostname) value is getting
+# assigned
+
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_masters_to_config
+    add_host:
+      name: "{{ item }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_masters_to_config
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ item }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_nodes_to_config
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_first_master
+    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+- include: ../../common/openshift-cluster/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"

+ 26 - 55
playbooks/libvirt/openshift-cluster/launch.yml

@@ -1,65 +1,36 @@
+---
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
-  connection: local
   gather_facts: no
   gather_facts: no
-
-  vars:
-    libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
-    libvirt_storage_pool: 'openshift'
-    libvirt_uri: 'qemu:///system'
-
   vars_files:
   vars_files:
-    - vars.yml
-
+  - vars.yml
+  vars:
+    os_libvirt_storage_pool: "{{ libvirt_storage_pool | default('images') }}"
+    os_libvirt_storage_pool_path: "{{ libvirt_storage_pool_path | default('/var/lib/libvirt/images') }}"
+    os_libvirt_network: "{{ libvirt_network | default('default') }}"
+    image_url: "{{ deployment_vars[deployment_type].image.url }}"
+    image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
+    image_name: "{{ deployment_vars[deployment_type].image.name }}"
   tasks:
   tasks:
-    - set_fact:
-        k8s_type: master
-
-    - name: Generate master instance name(s)
-      set_fact:
-        scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
-      register: master_names_output
-      with_sequence: start=1 end='{{ num_masters }}'
+  - fail: msg="Deployment type not supported for libvirt provider yet"
+    when: deployment_type in ['online', 'enterprise']
 
 
-    - set_fact:
-        master_names: "{{ master_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
+  - include: tasks/configure_libvirt.yml
 
 
-    - include: launch_instances.yml
-      vars:
-        instances: '{{ master_names }}'
-        cluster: '{{ cluster_id }}'
-        type: '{{ k8s_type }}'
-        group_name: 'tag_env-host-type-{{ cluster_id }}-openshift-master'
+  - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ master_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
 
 
-    - set_fact:
-        k8s_type: node
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ node_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
 
 
-    - name: Generate node instance name(s)
-      set_fact:
-        scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format( 1048576 | random ) }}"
-      register: node_names_output
-      with_sequence: start=1 end='{{ num_nodes }}'
+- include: update.yml
 
 
-    - set_fact:
-        node_names: "{{ node_names_output.results | oo_collect('ansible_facts') | oo_collect('scratch_name') }}"
-
-    - include: launch_instances.yml
-      vars:
-        instances: '{{ node_names }}'
-        cluster: '{{ cluster_id }}'
-        type: '{{ k8s_type }}'
-
-- hosts: 'tag_env-{{ cluster_id }}'
-  roles:
-    - openshift_repos
-    - os_update_latest
-
-- include: ../openshift-master/config.yml
-  vars:
-    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-master"]'
-    oo_env: '{{ cluster_id }}'
-
-- include: ../openshift-node/config.yml
-  vars:
-    oo_host_group_exp: 'groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]'
-    oo_env: '{{ cluster_id }}'
+- include: list.yml

+ 15 - 35
playbooks/libvirt/openshift-cluster/list.yml

@@ -1,43 +1,23 @@
+---
 - name: Generate oo_list_hosts group
 - name: Generate oo_list_hosts group
   hosts: localhost
   hosts: localhost
-  connection: local
   gather_facts: no
   gather_facts: no
-
-  vars:
-    libvirt_uri: 'qemu:///system'
-
+  vars_files:
+  - vars.yml
   tasks:
   tasks:
-    - name: List VMs
-      virt:
-        command: list_vms
-      register: list_vms
-
-    - name: Collect MAC addresses of the VMs
-      shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
-      register: scratch_mac
-      with_items: '{{ list_vms.list_vms }}'
-      when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
-
-    - name: Collect IP addresses of the VMs
-      shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
-      register: scratch_ip
-      with_items: '{{ scratch_mac.results }}'
-      when: item.skipped is not defined
-
-    - name: Add hosts
-      add_host:
-        hostname: '{{ item[0] }}'
-        ansible_ssh_host: '{{ item[1].stdout }}'
-        ansible_ssh_user: root
-        groups: oo_list_hosts
-      with_together:
-        - '{{ list_vms.list_vms }}'
-        - '{{ scratch_ip.results }}'
-      when: item[1].skipped is not defined
+  - set_fact: scratch_group=tag_env-{{ cluster_id }}
+    when: cluster_id != ''
+  - set_fact: scratch_group=all
+    when: cluster_id == ''
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_list_hosts
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[scratch_group] | default([]) | difference(['localhost'])
 
 
 - name: List Hosts
 - name: List Hosts
   hosts: oo_list_hosts
   hosts: oo_list_hosts
-
   tasks:
   tasks:
-    - debug:
-        msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+  - debug:
+      msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'

+ 6 - 0
playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml

@@ -0,0 +1,6 @@
+---
+- include: configure_libvirt_storage_pool.yml
+  when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined
+
+- include: configure_libvirt_network.yml
+  when: libvirt_network is defined

+ 27 - 0
playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml

@@ -0,0 +1,27 @@
+---
+- name: Test if libvirt network for openshift already exists
+  command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}"
+  register: net_info_result
+  changed_when: False
+  failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr"
+
+- name: Create a temp directory for the template xml file
+  command: "mktemp -d /tmp/openshift-ansible-XXXXXXX"
+  register: mktemp
+  when: net_info_result.rc == 1
+
+- name: Create network xml file
+  template:
+    src: templates/network.xml
+    dest: "{{ mktemp.stdout }}/network.xml"
+  when: net_info_result.rc == 1
+
+- name: Create libvirt network for openshift
+  command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml"
+  when: net_info_result.rc == 1
+
+- name: Remove the temp directory
+  file:
+    path: "{{ mktemp.stdout }}"
+    state: absent
+  when: net_info_result.rc == 1

+ 23 - 0
playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml

@@ -0,0 +1,23 @@
+---
+- name: Create libvirt storage directory for openshift
+  file:
+    dest: "{{ libvirt_storage_pool_path }}"
+    state: directory
+
+- acl:
+    default: yes
+    entity: kvm
+    etype: group
+    name: "{{ libvirt_storage_pool_path }}"
+    permissions: rwx
+    state: present
+
+- name: Test if libvirt storage pool for openshift already exists
+  command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}"
+  register: pool_info_result
+  changed_when: False
+  failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr"
+
+- name: Create the libvirt storage pool for openshift
+  command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
+  when: pool_info_result.rc == 1

+ 34 - 29
playbooks/libvirt/openshift-cluster/launch_instances.yml

@@ -1,45 +1,47 @@
-- name: Create the libvirt storage directory for openshift
-  file:
-    dest: '{{ libvirt_storage_pool_path }}'
-    state: directory
+---
+# TODO: Add support for choosing base image based on deployment_type and os
+# wanted (os wanted needs support added in bin/cluster with sane defaults:
+# fedora/centos for origin, rhel for online/enterprise)
+
+# TODO: create a role to encapsulate some of this complexity, possibly also
+# create a module to manage the storage tasks, network tasks, and possibly
+# even handle the libvirt tasks to set metadata in the domain xml and be able
+# to create/query data about vms without having to use xml the python libvirt
+# bindings look like a good candidate for this
 
 
 - name: Download Base Cloud image
 - name: Download Base Cloud image
   get_url:
   get_url:
-    url: '{{ base_image_url }}'
-    sha256sum: '{{ base_image_sha256 }}'
-    dest: '{{ libvirt_storage_pool_path }}/{{ base_image_name }}'
+    url: '{{ image_url }}'
+    sha256sum: '{{ image_sha256 }}'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
 
 
 - name: Create the cloud-init config drive path
 - name: Create the cloud-init config drive path
   file:
   file:
-    dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
     state: directory
     state: directory
-  with_items: '{{ instances }}'
+  with_items: instances
 
 
 - name: Create the cloud-init config drive files
 - name: Create the cloud-init config drive files
   template:
   template:
     src: '{{ item[1] }}'
     src: '{{ item[1] }}'
-    dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/openstack/latest/{{ item[1] }}'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
   with_nested:
   with_nested:
-    - '{{ instances }}'
+    - instances
     - [ user-data, meta-data ]
     - [ user-data, meta-data ]
 
 
 - name: Create the cloud-init config drive
 - name: Create the cloud-init config drive
-  command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
+  command: 'genisoimage -output {{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data'
   args:
   args:
-    chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack/latest'
-    creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
-  with_items: '{{ instances }}'
-
-- name: Create the libvirt storage pool for openshift
-  command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}'
-  ignore_errors: yes
+    chdir: '{{ os_libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+    creates: '{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+  with_items: instances
 
 
 - name: Refresh the libvirt storage pool for openshift
 - name: Refresh the libvirt storage pool for openshift
   command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
   command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
 
 
 - name: Create VMs drives
 - name: Create VMs drives
-  command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ base_image_name }} --backing-vol-format qcow2'
-  with_items: '{{ instances }}'
+  command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ os_libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
+  with_items: instances
 
 
 - name: Create VMs
 - name: Create VMs
   virt:
   virt:
@@ -47,19 +49,19 @@
     command: define
     command: define
     xml: "{{ lookup('template', '../templates/domain.xml') }}"
     xml: "{{ lookup('template', '../templates/domain.xml') }}"
     uri: '{{ libvirt_uri }}'
     uri: '{{ libvirt_uri }}'
-  with_items: '{{ instances }}'
+  with_items: instances
 
 
 - name: Start VMs
 - name: Start VMs
   virt:
   virt:
     name: '{{ item }}'
     name: '{{ item }}'
     state: running
     state: running
     uri: '{{ libvirt_uri }}'
     uri: '{{ libvirt_uri }}'
-  with_items: '{{ instances }}'
+  with_items: instances
 
 
 - name: Collect MAC addresses of the VMs
 - name: Collect MAC addresses of the VMs
   shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
   shell: 'virsh -c {{ libvirt_uri }} dumpxml {{ item }} | xmllint --xpath "string(//domain/devices/interface/mac/@address)" -'
   register: scratch_mac
   register: scratch_mac
-  with_items: '{{ instances }}'
+  with_items: instances
 
 
 - name: Wait for the VMs to get an IP
 - name: Wait for the VMs to get an IP
   command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
   command: "egrep -c '{{ scratch_mac.results | oo_collect('stdout') | join('|') }}' /proc/net/arp"
@@ -72,7 +74,7 @@
 - name: Collect IP addresses of the VMs
 - name: Collect IP addresses of the VMs
   shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
   shell: "awk '/{{ item.stdout }}/ {print $1}' /proc/net/arp"
   register: scratch_ip
   register: scratch_ip
-  with_items: '{{ scratch_mac.results }}'
+  with_items: scratch_mac.results
 
 
 - set_fact:
 - set_fact:
     ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
     ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
@@ -81,7 +83,8 @@
   add_host:
   add_host:
     hostname: '{{ item.0 }}'
     hostname: '{{ item.0 }}'
     ansible_ssh_host: '{{ item.1 }}'
     ansible_ssh_host: '{{ item.1 }}'
-    ansible_ssh_user: root
+    ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
     groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
   with_together:
   with_together:
     - instances
     - instances
@@ -93,10 +96,12 @@
     port: 22
     port: 22
   with_items: ips
   with_items: ips
 
 
-- name: Wait for root user setup
-  command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item }} echo root user is setup'
+- name: Wait for openshift user setup
+  command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
   register: result
   register: result
   until: result.rc == 0
   until: result.rc == 0
   retries: 30
   retries: 30
   delay: 1
   delay: 1
-  with_items: ips
+  with_together:
+  - instances
+  - ips

+ 10 - 4
playbooks/libvirt/templates/domain.xml

@@ -1,6 +1,13 @@
 <domain type='kvm' id='8'>
 <domain type='kvm' id='8'>
   <name>{{ item }}</name>
   <name>{{ item }}</name>
   <memory unit='GiB'>1</memory>
   <memory unit='GiB'>1</memory>
+  <metadata xmlns:ansible="https://github.com/ansible/ansible">
+    <ansible:tags>
+      <ansible:tag>env-{{ cluster }}</ansible:tag>
+      <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
+      <ansible:tag>host-type-{{ type }}</ansible:tag>
+    </ansible:tags>
+  </metadata>
   <currentMemory unit='GiB'>1</currentMemory>
   <currentMemory unit='GiB'>1</currentMemory>
   <vcpu placement='static'>2</vcpu>
   <vcpu placement='static'>2</vcpu>
   <os>
   <os>
@@ -24,18 +31,18 @@
     <emulator>/usr/bin/qemu-system-x86_64</emulator>
     <emulator>/usr/bin/qemu-system-x86_64</emulator>
     <disk type='file' device='disk'>
     <disk type='file' device='disk'>
       <driver name='qemu' type='qcow2'/>
       <driver name='qemu' type='qcow2'/>
-      <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
+      <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}.qcow2'/>
       <target dev='vda' bus='virtio'/>
       <target dev='vda' bus='virtio'/>
     </disk>
     </disk>
     <disk type='file' device='cdrom'>
     <disk type='file' device='cdrom'>
       <driver name='qemu' type='raw'/>
       <driver name='qemu' type='raw'/>
-      <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
+      <source file='{{ os_libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/>
       <target dev='vdb' bus='virtio'/>
       <target dev='vdb' bus='virtio'/>
       <readonly/>
       <readonly/>
     </disk>
     </disk>
     <controller type='usb' index='0' />
     <controller type='usb' index='0' />
     <interface type='network'>
     <interface type='network'>
-      <source network='default'/>
+      <source network='{{ os_libvirt_network }}'/>
       <model type='virtio'/>
       <model type='virtio'/>
     </interface>
     </interface>
     <serial type='pty'>
     <serial type='pty'>
@@ -49,7 +56,6 @@
     </channel>
     </channel>
     <input type='tablet' bus='usb' />
     <input type='tablet' bus='usb' />
     <input type='mouse' bus='ps2'/>
     <input type='mouse' bus='ps2'/>
-    <input type='keyboard' bus='ps2'/>
     <graphics type='spice' autoport='yes' />
     <graphics type='spice' autoport='yes' />
     <video>
     <video>
       <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>
       <model type='qxl' ram='65536' vram='65536' vgamem='16384' heads='1'/>

+ 3 - 0
playbooks/libvirt/openshift-cluster/templates/meta-data

@@ -0,0 +1,3 @@
+instance-id: {{ item[0] }}
+hostname: {{ item[0] }}
+local-hostname: {{ item[0] }}.example.com

+ 23 - 0
playbooks/libvirt/openshift-cluster/templates/network.xml

@@ -0,0 +1,23 @@
+<network>
+  <name>openshift-ansible</name>
+  <forward mode='nat'>
+    <nat>
+      <port start='1024' end='65535'/>
+    </nat>
+  </forward>
+  <!-- TODO: query for first available virbr interface available -->
+  <bridge name='virbr3' stp='on' delay='0'/>
+  <!-- TODO: make overridable -->
+  <domain name='example.com'/>
+  <dns>
+    <!-- TODO: automatically add host entries -->
+  </dns>
+  <!-- TODO: query for available address space -->
+  <ip address='192.168.55.1' netmask='255.255.255.0'>
+    <dhcp>
+      <range start='192.168.55.2' end='192.168.55.254'/>
+      <!-- TODO: add static entries addresses for the hosts to be created -->
+    </dhcp>
+  </ip>
+</network>
+

+ 23 - 0
playbooks/libvirt/openshift-cluster/templates/user-data

@@ -0,0 +1,23 @@
+#cloud-config
+disable_root: true
+
+hostname: {{ item[0] }}
+fqdn: {{ item[0] }}.example.com
+manage_etc_hosts: true
+
+users:
+  - default
+  - name: root
+    ssh_authorized_keys:
+    - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+system_info:
+  default_user:
+    name: openshift
+    sudo: ["ALL=(ALL) NOPASSWD: ALL"]
+
+ssh_authorized_keys:
+  - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
+
+bootcmd:
+  - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart

+ 36 - 33
playbooks/libvirt/openshift-cluster/terminate.yml

@@ -1,41 +1,44 @@
+---
+# TODO: does not handle a non-existant cluster gracefully
+
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
-  connection: local
   gather_facts: no
   gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - set_fact: cluster_group=tag_env-{{ cluster_id }}
+  - add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_terminate
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups[cluster_group] | default([])
 
 
-  vars:
-    libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift"
-    libvirt_storage_pool: 'openshift'
-    libvirt_uri: 'qemu:///system'
+  - name: Destroy VMs
+    virt:
+      name: '{{ item[0] }}'
+      command: '{{ item[1] }}'
+      uri: '{{ libvirt_uri }}'
+    with_nested:
+    - groups['oo_hosts_to_terminate']
+    - [ destroy, undefine ]
 
 
-  tasks:
-    - name: List VMs
-      virt:
-        command: list_vms
-      register: list_vms
+  - name: Delete VMs drives
+    command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
+    args:
+      removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
+    with_items: groups['oo_hosts_to_terminate']
 
 
-    - name: Destroy VMs
-      virt:
-        name: '{{ item[0] }}'
-        command: '{{ item[1] }}'
-        uri: '{{ libvirt_uri }}'
-      with_nested:
-        - '{{ list_vms.list_vms }}'
-        - [ destroy, undefine ]
-      when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+  - name: Delete the VM cloud-init image
+    file:
+      path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
+      state: absent
+    with_items: groups['oo_hosts_to_terminate']
 
 
-    - name: Delete VMs config drive
-      file:
-        path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/openstack'
-        state: absent
-      with_items: '{{ list_vms.list_vms }}'
-      when: item|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'
+  - name: Remove the cloud-init config directory
+    file:
+      path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
+      state: absent
+    with_items: groups['oo_hosts_to_terminate']
 
 
-    - name: Delete VMs drives
-      command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item[0] }}{{ item[1] }}'
-      args:
-        removes: '{{ libvirt_storage_pool_path }}/{{ item[0] }}{{ item[1] }}'
-      with_nested:
-        - '{{ list_vms.list_vms }}'
-        - [ '_configdrive', '_cloud-init.iso', '.qcow2' ]
-      when: item[0]|truncate(cluster_id|length+1, True) == '{{ cluster_id }}-...'

+ 18 - 0
playbooks/libvirt/openshift-cluster/update.yml

@@ -0,0 +1,18 @@
+---
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ item }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"]) | default([])
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: config.yml

+ 32 - 6
playbooks/libvirt/openshift-cluster/vars.yml

@@ -1,7 +1,33 @@
-# base_image_url: http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
-# base_image_sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86
+---
+libvirt_storage_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible"
+libvirt_storage_pool: 'openshift-ansible'
+libvirt_network: openshift-ansible
+libvirt_uri: 'qemu:///system'
 
 
-base_image_url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
-base_image_name: CentOS-7-x86_64-GenericCloud.qcow2
-base_image_sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+deployment_vars:
+  origin:
+    image:
+      url: "http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2"
+      name: CentOS-7-x86_64-GenericCloud.qcow2
+      sha256: e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab
+    ssh_user: openshift
+    sudo: yes
+  online:
+    image:
+      url:
+      name:
+      sha256:
+    ssh_user: root
+    sudo: no
+  enterprise:
+    image:
+      url:
+      name:
+      sha256:
+    ssh_user: openshift
+    sudo: yes
+#  origin:
+#    fedora:
+#      url: "http://download.fedoraproject.org/pub/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.qcow2"
+#      name: Fedora-Cloud-Base-20141203-21.x86_64.qcow2
+#      sha256: 3a99bb89f33e3d4ee826c8160053cdb8a72c80cd23350b776ce73cd244467d86

+ 0 - 21
playbooks/libvirt/openshift-master/config.yml

@@ -1,21 +0,0 @@
-- name: master/config.yml, populate oo_masters_to_config host group if needed
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: "Evaluate oo_host_group_exp if it's set"
-      add_host:
-        name: '{{ item }}'
-        groups: oo_masters_to_config
-      with_items: "{{ oo_host_group_exp | default('') }}"
-      when: oo_host_group_exp is defined
-
-- name: Configure instances
-  hosts: oo_masters_to_config
-  vars:
-    openshift_hostname: '{{ ansible_default_ipv4.address }}'
-  vars_files:
-    - vars.yml
-  roles:
-    - openshift_master
-    - pods
-    - os_env_extras

+ 0 - 1
playbooks/libvirt/openshift-master/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 1
playbooks/libvirt/openshift-master/vars.yml

@@ -1 +0,0 @@
-openshift_debug_level: 4

+ 0 - 102
playbooks/libvirt/openshift-node/config.yml

@@ -1,102 +0,0 @@
-- name: node/config.yml, populate oo_nodes_to_config host group if needed
-  hosts: localhost
-  gather_facts: no
-  tasks:
-    - name: "Evaluate oo_host_group_exp if it's set"
-      add_host:
-        name: '{{ item }}'
-        groups: oo_nodes_to_config
-      with_items: "{{ oo_host_group_exp | default('') }}"
-      when: oo_host_group_exp is defined
-
-    - add_host:
-        name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
-        groups: oo_first_master
-      when: oo_host_group_exp is defined
-
-
-- name: Gather and set facts for hosts to configure
-  hosts: oo_nodes_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  # Since the master is registering the nodes before they are configured, we
-  # need to make sure to set the node properties beforehand if we do not want
-  # the defaults
-  - openshift_facts:
-      role: "{{ item.role }}"
-      local_facts: "{{ item.local_facts }}"
-    with_items:
-    - role: common
-      local_facts:
-        hostname: "{{ ansible_default_ipv4.address }}"
-    - role: node
-      local_facts:
-        external_id: "{{ openshift_node_external_id | default(None) }}"
-        resources_cpu: "{{ openshfit_node_resources_cpu | default(None) }}"
-        resources_memory: "{{ openshfit_node_resources_memory | default(None) }}"
-        pod_cidr: "{{ openshfit_node_pod_cidr | default(None) }}"
-        labels: "{{ openshfit_node_labels | default(None) }}"
-        annotations: "{{ openshfit_node_annotations | default(None) }}"
-
-
-- name: Register nodes
-  hosts: oo_first_master
-  vars:
-    openshift_nodes: "{{ hostvars
-          | oo_select_keys(groups['oo_nodes_to_config']) }}"
-  roles:
-  - openshift_register_nodes
-  tasks:
-  - name: Create local temp directory for syncing certs
-    local_action: command /usr/bin/mktemp -d /tmp/openshift-ansible-XXXXXXX
-    register: mktemp
-
-  - name: Sync master certs to localhost
-    synchronize:
-      mode: pull
-      checksum: yes
-      src: /var/lib/openshift/openshift.local.certificates
-      dest: "{{ mktemp.stdout }}"
-
-- name: Configure instances
-  hosts: oo_nodes_to_config
-  vars_files:
-  - vars.yml
-  vars:
-    sync_tmpdir: "{{ hostvars[groups['oo_first_master'][0]].mktemp.stdout }}"
-    cert_parent_rel_path: openshift.local.certificates
-    cert_rel_path: "{{ cert_parent_rel_path }}/node-{{ openshift.common.hostname }}"
-    cert_base_path: /var/lib/openshift
-    cert_parent_path: "{{ cert_base_path }}/{{ cert_parent_rel_path }}"
-    cert_path: "{{ cert_base_path }}/{{ cert_rel_path }}"
-  pre_tasks:
-  - name: Ensure certificate directories exists
-    file:
-      path: "{{ item }}"
-      state: directory
-    with_items:
-    - "{{ cert_path }}"
-    - "{{ cert_parent_path }}/ca"
-
-  # TODO: notify restart openshift-node and/or restart openshift-sdn-node,
-  # possibly test service started time against certificate/config file
-  # timestamps in openshift-node or openshift-sdn-node to trigger notify
-  - name: Sync certs to nodes
-    synchronize:
-      checksum: yes
-      src: "{{ item.src }}"
-      dest: "{{ item.dest }}"
-      owner: no
-      group: no
-    with_items:
-    - src: "{{ sync_tmpdir }}/{{ cert_rel_path }}"
-      dest: "{{ cert_parent_path }}"
-    - src: "{{ sync_tmpdir }}/{{ cert_parent_rel_path }}/ca/cert.crt"
-      dest: "{{ cert_parent_path }}/ca/cert.crt"
-  - local_action: file name={{ sync_tmpdir }} state=absent
-    run_once: true
-  roles:
-    - openshift_node
-    - os_env_extras
-    - os_env_extras_node

+ 0 - 1
playbooks/libvirt/openshift-node/vars.yml

@@ -1 +0,0 @@
-openshift_debug_level: 4

+ 0 - 2
playbooks/libvirt/templates/meta-data

@@ -1,2 +0,0 @@
-instance-id: {{ item[0] }}
-local-hostname: {{ item[0] }}

+ 0 - 10
playbooks/libvirt/templates/user-data

@@ -1,10 +0,0 @@
-#cloud-config
-
-disable_root: 0
-
-system_info:
-  default_user:
-    name: root
-
-ssh_authorized_keys:
-  - {{ lookup('file', '~/.ssh/id_rsa.pub') }}

+ 0 - 0
roles/openshift_common/tasks/main.yml


Some files were not shown because too many files changed in this diff