فهرست منبع

Merge pull request #763 from openshift/master

Merge master into prod.
Kenny Woodson 9 سال پیش
والد
کامیت
9bbaa824da
100فایلهای تغییر یافته به همراه3556 افزوده شده و 651 حذف شده
  1. 3 0
      .tito/packages/.readme
  2. 1 0
      .tito/packages/openshift-ansible
  3. 1 0
      .tito/packages/openshift-ansible-bin
  4. 1 0
      .tito/packages/openshift-ansible-inventory
  5. 13 0
      .tito/releasers.conf
  6. 5 0
      .tito/tito.props
  7. 23 0
      DEPLOYMENT_TYPES.md
  8. 17 0
      Dockerfile
  9. 7 3
      README.md
  10. 240 0
      README_AEP.md
  11. 15 0
      README_ANSIBLE_CONTAINER.md
  12. 53 6
      README_AWS.md
  13. 14 3
      README_GCE.md
  14. 5 18
      README_OSE.md
  15. 30 12
      README_libvirt.md
  16. 9 13
      README_openstack.md
  17. 4 15
      README_origin.md
  18. 27 3
      README_vagrant.md
  19. 36 8
      Vagrantfile
  20. 6 0
      ansible-profile/README.md
  21. 83 0
      ansible-profile/callback_plugins/profile_tasks.py
  22. 74 23
      bin/cluster
  23. 1 1
      bin/ohi
  24. 0 104
      bin/openshift-ansible-bin.spec
  25. 1 0
      bin/openshift_ansible/aws
  26. 3 3
      bin/oscp
  27. 3 3
      bin/ossh
  28. 17 4
      bin/ossh_bash_completion
  29. 10 3
      bin/ossh_zsh_completion
  30. 252 2
      docs/best_practices_guide.adoc
  31. 1 1
      docs/core_concepts_guide.adoc
  32. 34 4
      docs/style_guide.adoc
  33. 118 10
      filter_plugins/oo_filters.py
  34. 108 0
      filter_plugins/oo_zabbix_filters.py
  35. 3 3
      git/.pylintrc
  36. 40 6
      git/pylint.sh
  37. 1 1
      inventory/aws/hosts/hosts
  38. 1 0
      inventory/byo/.gitignore
  39. 0 39
      inventory/byo/hosts
  40. 114 0
      inventory/byo/hosts.example
  41. 6 3
      inventory/gce/hosts/gce.py
  42. 1 1
      inventory/gce/hosts/hosts
  43. 1 1
      inventory/libvirt/hosts/hosts
  44. 20 9
      inventory/multi_ec2.py
  45. 1 1
      inventory/multi_ec2.yaml.example
  46. 0 82
      inventory/openshift-ansible-inventory.spec
  47. 1 1
      inventory/openstack/hosts/hosts
  48. 1 1
      inventory/openstack/hosts/nova.py
  49. 73 0
      lookup_plugins/oo_option.py
  50. 215 0
      lookup_plugins/sequence.py
  51. 272 0
      openshift-ansible.spec
  52. 29 0
      playbooks/adhoc/atomic_openshift_tutorial_reset.yml
  53. 159 0
      playbooks/adhoc/create_pv/create_pv.yaml
  54. 16 0
      playbooks/adhoc/create_pv/pv-template.j2
  55. 2 0
      playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
  56. 142 0
      playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
  57. 115 0
      playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
  58. 69 0
      playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
  59. 41 0
      playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
  60. 206 0
      playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
  61. 55 0
      playbooks/adhoc/noc/create_host.yml
  62. 36 0
      playbooks/adhoc/noc/create_maintenance.yml
  63. 1 1
      playbooks/adhoc/noc/get_zabbix_problems.yml
  64. 20 0
      playbooks/adhoc/s3_registry/s3_registry.j2
  65. 71 0
      playbooks/adhoc/s3_registry/s3_registry.yml
  66. 145 0
      playbooks/adhoc/uninstall.yml
  67. 21 0
      playbooks/adhoc/upgrades/README.md
  68. 1 0
      playbooks/adhoc/upgrades/filter_plugins
  69. 1 0
      playbooks/adhoc/upgrades/lookup_plugins
  70. 1 0
      playbooks/adhoc/upgrades/roles
  71. 138 0
      playbooks/adhoc/upgrades/upgrade.yml
  72. 58 0
      playbooks/adhoc/zabbix_setup/clean_zabbix.yml
  73. 1 0
      playbooks/adhoc/zabbix_setup/filter_plugins
  74. 7 0
      playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
  75. 13 0
      playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
  76. 0 0
      playbooks/adhoc/zabbix_setup/roles
  77. 1 1
      playbooks/aws/ansible-tower/launch.yml
  78. 11 25
      playbooks/aws/openshift-cluster/config.yml
  79. 28 8
      playbooks/aws/openshift-cluster/launch.yml
  80. 1 1
      playbooks/aws/openshift-cluster/library/ec2_ami_find.py
  81. 1 0
      playbooks/aws/openshift-cluster/lookup_plugins
  82. 76 3
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  83. 29 13
      playbooks/aws/openshift-cluster/templates/user_data.j2
  84. 60 1
      playbooks/aws/openshift-cluster/terminate.yml
  85. 3 1
      playbooks/aws/openshift-cluster/update.yml
  86. 9 3
      playbooks/aws/openshift-cluster/vars.online.int.yml
  87. 9 3
      playbooks/aws/openshift-cluster/vars.online.prod.yml
  88. 9 3
      playbooks/aws/openshift-cluster/vars.online.stage.yml
  89. 6 6
      playbooks/aws/openshift-cluster/vars.yml
  90. 0 19
      playbooks/aws/openshift-master/config.yml
  91. 0 1
      playbooks/aws/openshift-master/filter_plugins
  92. 0 70
      playbooks/aws/openshift-master/launch.yml
  93. 0 1
      playbooks/aws/openshift-master/roles
  94. 0 2
      playbooks/aws/openshift-master/terminate.yml
  95. 0 26
      playbooks/aws/openshift-node/config.yml
  96. 0 1
      playbooks/aws/openshift-node/filter_plugins
  97. 0 72
      playbooks/aws/openshift-node/launch.yml
  98. 0 1
      playbooks/aws/openshift-node/roles
  99. 0 2
      playbooks/aws/openshift-node/terminate.yml
  100. 0 0
      playbooks/aws/terminate.yml

+ 3 - 0
.tito/packages/.readme

@@ -0,0 +1,3 @@
+the .tito/packages directory contains metadata files
+named after their packages. Each file has the latest tagged
+version and the project's relative directory.

+ 1 - 0
.tito/packages/openshift-ansible

@@ -0,0 +1 @@
+3.0.4-1 ./

+ 1 - 0
.tito/packages/openshift-ansible-bin

@@ -0,0 +1 @@
+0.0.21-1 bin/

+ 1 - 0
.tito/packages/openshift-ansible-inventory

@@ -0,0 +1 @@
+0.0.11-1 inventory/

+ 13 - 0
.tito/releasers.conf

@@ -0,0 +1,13 @@
+[brew]
+releaser = tito.release.DistGitReleaser
+branches = libra-rhel-7
+
+[ose-3.0]
+releaser = tito.release.DistGitReleaser
+branches = rhose-3.0-rhel-7
+srpm_disttag = .el7ose
+
+[aos-3.1]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.1-rhel-7
+srpm_disttag = .el7aos

+ 5 - 0
.tito/tito.props

@@ -0,0 +1,5 @@
+[buildconfig]
+builder = tito.builder.Builder
+tagger = tito.tagger.VersionTagger
+changelog_do_not_remove_cherrypick = 0
+changelog_format = %s (%ae)

+ 23 - 0
DEPLOYMENT_TYPES.md

@@ -0,0 +1,23 @@
+#Deployment Types
+
+This module supports OpenShift Origin, OpenShift Enterprise, and Atomic
+Enterprise Platform. Each deployment type sets various defaults used throughout
+your environment.
+
+The table below outlines the defaults per `deployment_type`.
+
+| deployment_type                                                 | origin                                   | enterprise (< 3.1)                     | atomic-enterprise                | openshift-enterprise (>= 3.1)    |
+|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------|----------------------------------|
+| **openshift.common.service_type** (also used for package names) | origin                                   | openshift                              | atomic-openshift                 |                                  |
+| **openshift.common.config_base**                                | /etc/origin                              | /etc/openshift                         | /etc/origin                      | /etc/origin                      |
+| **openshift.common.data_dir**                                   | /var/lib/origin                          | /var/lib/openshift                     | /var/lib/origin                  | /var/lib/origin                  |
+| **openshift.master.registry_url openshift.node.registry_url**   | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} | aos3/aos-${component}:${version} |
+| **Image Streams**                                               | centos                                   | rhel + xpaas                           | N/A                              | rhel                             |
+
+
+**NOTE** `enterprise` deloyment type is used for OpenShift Enterprise version
+3.0.x OpenShift Enterprise deployments utilizing version 3.1 and later will
+make use of the new `openshift-enterprise` deployment type.  Additional work to
+migrate between the two will be forthcoming.
+
+

+ 17 - 0
Dockerfile

@@ -0,0 +1,17 @@
+FROM rhel7
+
+MAINTAINER Aaron Weitekamp <aweiteka@redhat.com>
+
+RUN yum -y install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
+# Not sure if all of these packages are necessary
+# only git and ansible are known requirements
+RUN yum install -y --enablerepo rhel-7-server-extras-rpms net-tools bind-utils git ansible
+
+ADD ./  /opt/openshift-ansible/
+
+ENTRYPOINT ["/usr/bin/ansible-playbook"]
+
+CMD ["/opt/openshift-ansible/playbooks/byo/config.yml"]
+
+LABEL RUN docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE

+ 7 - 3
README.md

@@ -1,6 +1,6 @@
-#openshift-ansible
+#OpenShift and Atomic Enterprise Ansible
 
-This repo contains OpenShift Ansible code.
+This repo contains Ansible code for OpenShift and Atomic Enterprise.
 
 ##Setup
 - Install base dependencies:
@@ -23,12 +23,13 @@ This repo contains OpenShift Ansible code.
 - Bring your own host deployments:
   - [OpenShift Enterprise](README_OSE.md)
   - [OpenShift Origin](README_origin.md)
+  - [Atomic Enterprise](README_AEP.md)
 
 - Build
   - [How to build the openshift-ansible rpms](BUILD.md)
 
 - Directory Structure:
-  - [bin/cluster](bin/cluster) - python script to easily create OpenShift 3 clusters
+  - [bin/cluster](bin/cluster) - python script to easily create clusters
   - [docs](docs) - Documentation for the project
   - [filter_plugins/](filter_plugins) - custom filters used to manipulate data in Ansible
   - [inventory/](inventory) - houses Ansible dynamic inventory scripts
@@ -36,6 +37,9 @@ This repo contains OpenShift Ansible code.
   - [roles/](roles) - shareable Ansible tasks
 
 ##Contributing
+- [Best Practices Guide](docs/best_practices_guide.adoc)
+- [Core Concepts](docs/core_concepts_guide.adoc)
+- [Style Guide](docs/style_guide.adoc)
 
 ###Feature Roadmap
 Our Feature Roadmap is available on the OpenShift Origin Infrastructure [Trello board](https://trello.com/b/nbkIrqKa/openshift-origin-infrastructure). All ansible items will be tagged with [installv3].

+ 240 - 0
README_AEP.md

@@ -0,0 +1,240 @@
+# Installing AEP from dev puddles using ansible
+
+* [Requirements](#requirements)
+* [Caveats](#caveats)
+* [Known Issues](#known-issues)
+* [Configuring the host inventory](#configuring-the-host-inventory)
+* [Creating the default variables for the hosts and host groups](#creating-the-default-variables-for-the-hosts-and-host-groups)
+* [Running the ansible playbooks](#running-the-ansible-playbooks)
+* [Post-ansible steps](#post-ansible-steps)
+* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
+
+## Requirements
+* ansible
+  * Tested using ansible 1.9.1 and 1.9.2
+  * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
+  * Available in Fedora channels
+  * Available for EL with EPEL and Optional channel
+* One or more RHEL 7.1 VMs
+* Either ssh key based auth for the root user or ssh key based auth for a user
+  with sudo access (no password)
+* A checkout of atomic-enterprise-ansible from https://github.com/projectatomic/atomic-enterprise-ansible/
+
+  ```sh
+  git clone https://github.com/projectatomic/atomic-enterprise-ansible.git
+  cd atomic-enterprise-ansible
+  ```
+
+## Caveats
+This ansible repo is currently under heavy revision for providing OSE support;
+the following items are highly likely to change before the OSE support is
+merged into the upstream repo:
+  * the current git branch for testing
+  * how the inventory file should be configured
+  * variables that need to be set
+  * bootstrapping steps
+  * other configuration steps
+
+## Known Issues
+* Host subscriptions are not configurable yet, the hosts need to be
+  pre-registered with subscription-manager or have the RHEL base repo
+  pre-configured. If using subscription-manager the following commands will
+  disable all but the rhel-7-server rhel-7-server-extras and
+  rhel-server7-ose-beta repos:
+```sh
+subscription-manager repos --disable="*"
+subscription-manager repos \
+--enable="rhel-7-server-rpms" \
+--enable="rhel-7-server-extras-rpms" \
+--enable="rhel-7-server-ose-3.0-rpms"
+```
+* Configuration of router is not automated yet
+* Configuration of docker-registry is not automated yet
+
+## Configuring the host inventory
+[Ansible docs](http://docs.ansible.com/intro_inventory.html)
+
+Example inventory file for configuring one master and two nodes for the test
+environment. This can be configured in the default inventory file
+(/etc/ansible/hosts), or using a custom file and passing the --inventory
+option to ansible-playbook.
+
+/etc/ansible/hosts:
+```ini
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a password
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true
+#ansible_sudo=true
+
+# To deploy origin, change deployment_type to origin
+deployment_type=enterprise
+
+# Pre-release registry URL
+oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
+
+# Pre-release additional repo
+openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
+'baseurl':
+'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os',
+'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name':
+'OpenShift Origin COPR', 'baseurl':
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/',
+'enabled': 1, 'gpgcheck': 1, gpgkey:
+'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# host group for masters
+[masters]
+ose3-master.example.com
+
+# host group for nodes
+[nodes]
+ose3-node[1:2].example.com
+```
+
+The hostnames above should resolve both from the hosts themselves and
+the host where ansible is running (if different).
+
+## Running the ansible playbooks
+From the atomic-enterprise-ansible checkout run:
+```sh
+ansible-playbook playbooks/byo/config.yml
+```
+**Note:** this assumes that the host inventory is /etc/ansible/hosts, if using a different
+inventory file use the -i option for ansible-playbook.
+
+## Post-ansible steps
+#### Create the default router
+On the master host:
+```sh
+oadm router --create=true \
+  --credentials=/etc/openshift/master/openshift-router.kubeconfig \
+  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}'
+```
+
+#### Create the default docker-registry
+On the master host:
+```sh
+oadm registry --create=true \
+  --credentials=/etc/openshift/master/openshift-registry.kubeconfig \
+  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}' \
+  --mount-host=/var/lib/openshift/docker-registry
+```
+
+## Overriding detected ip addresses and hostnames
+Some deployments will require that the user override the detected hostnames
+and ip addresses for the hosts. To see what the default values will be you can
+run the openshift_facts playbook:
+```sh
+ansible-playbook playbooks/byo/openshift_facts.yml
+```
+The output will be similar to:
+```
+ok: [10.3.9.45] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+                    "ip": "172.16.4.79",
+                    "public_hostname": "jdetiber-osev3-ansible-005dcfa6-27c6-463d-9b95-ef059579befd.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.45",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                  ... <snip> ...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+ok: [10.3.9.42] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+                    "ip": "172.16.4.75",
+                    "public_hostname": "jdetiber-osev3-ansible-c6ae8cdc-ba0b-4a81-bb37-14549893f9d3.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.42",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                  ...<snip>...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+ok: [10.3.9.36] => {
+    "result": {
+        "ansible_facts": {
+            "openshift": {
+                "common": {
+                    "hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+                    "ip": "172.16.4.73",
+                    "public_hostname": "jdetiber-osev3-ansible-bc39a3d3-cdd7-42fe-9c12-9fac9b0ec320.os1.phx2.redhat.com",
+                    "public_ip": "10.3.9.36",
+                    "use_openshift_sdn": true
+                },
+                "provider": {
+                    ...<snip>...
+                }
+            }
+        },
+        "changed": false,
+        "invocation": {
+            "module_args": "",
+            "module_name": "openshift_facts"
+        }
+    }
+}
+```
+Now, we want to verify the detected common settings to verify that they are
+what we expect them to be (if not, we can override them).
+
+* hostname
+  * Should resolve to the internal ip from the instances themselves.
+  * openshift_hostname will override.
+* ip
+  * Should be the internal ip of the instance.
+  * openshift_ip will override.
+* public hostname
+  * Should resolve to the external ip from hosts outside of the cloud
+  * provider openshift_public_hostname will override.
+* public_ip
+  * Should be the externally accessible ip associated with the instance
+  * openshift_public_ip will override
+* use_openshift_sdn
+  * Should be true unless the cloud is GCE.
+  * openshift_use_openshift_sdn overrides
+
+To override the the defaults, you can set the variables in your inventory:
+```
+...snip...
+[masters]
+ose3-master.example.com openshift_ip=1.1.1.1 openshift_hostname=ose3-master.example.com openshift_public_ip=2.2.2.2 openshift_public_hostname=ose3-master.public.example.com
+...snip...
+```

+ 15 - 0
README_ANSIBLE_CONTAINER.md

@@ -0,0 +1,15 @@
+# Running ansible in a docker container
+* Building ansible container:
+
+  ```sh
+  git clone https://github.com/openshift/openshift-ansible.git
+  cd openshift-ansible
+  docker build --rm -t ansible .
+  ```
+* Create /etc/ansible directory on the host machine and copy inventory file (hosts) into it.
+* Copy ssh public key of the host machine to master and nodes machines in the cluster.
+* Running the ansible container:
+
+  ```sh
+  docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible ansible
+  ```

+ 53 - 6
README_AWS.md

@@ -20,17 +20,39 @@ Create a credentials file
 ```
 Note: You must source this file before running any Ansible commands.
 
+Alternatively, you could configure credentials in either ~/.boto or ~/.aws/credentials, see the [boto docs](http://docs.pythonboto.org/en/latest/boto_config_tut.html) for the format.
+
+Subscribe to CentOS
+-------------------
+
+1. [CentOS on AWS](https://aws.amazon.com/marketplace/pp/B00O7WM7QW)
+
+
+Set up Security Group
+---------------------
+By default, a cluster is launched into the `public` security group. Make sure you allow hosts to talk to each other on port `4789` for SDN.
+You may also want to allow access from the outside world on the following ports:
+
+```
+• 22    - ssh
+• 80    - Web Apps
+• 443   - Web Apps (https)
+• 4789  - SDN / VXLAN
+• 8443  - OpenShift Console
+• 10250 - kubelet
+```
+
 
 (Optional) Setup your $HOME/.ssh/config file
 -------------------------------------------
-In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use '.ssh/config'
+In case of a cluster creation, or any other case where you don't know the machine hostname in advance, you can use `.ssh/config`
 to setup a private key file to allow ansible to connect to the created hosts.
 
 To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
-'''
+```
 Host *.compute-1.amazonaws.com
   PrivateKey $HOME/.ssh/my_private_key.pem
-'''
+```
 
 Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.
 
@@ -39,7 +61,7 @@ Alternatively, you can configure your ssh-agent to hold the credentials to conne
 
 By default, a cluster is launched with the following configuration:
 
-- Instance type: m3.large
+- Instance type: m4.large
 - AMI: ami-307b3658 (for online deployments, ami-acd999c4 for origin deployments and ami-10663b78 for enterprise deployments)
 - Region: us-east-1
 - Keypair name: libra
@@ -61,11 +83,17 @@ Node specific defaults:
 
 If needed, these values can be changed by setting environment variables on your system.
 
-- export ec2_instance_type='m3.large'
-- export ec2_ami='ami-307b3658'
+- export ec2_instance_type='m4.large'
+- export ec2_image='ami-307b3658'
 - export ec2_region='us-east-1'
 - export ec2_keypair='libra'
 - export ec2_security_groups="['public']"
+- export ec2_vpc_subnet='my_vpc_subnet'
+- export ec2_assign_public_ip='true'
+- export os_etcd_root_vol_size='20'
+- export os_etcd_root_vol_type='standard'
+- export os_etcd_vol_size='20'
+- export os_etcd_vol_type='standard'
 - export os_master_root_vol_size='20'
 - export os_master_root_vol_type='standard'
 - export os_node_root_vol_size='15'
@@ -114,3 +142,22 @@ Terminating a cluster
 ```
   bin/cluster terminate aws <cluster-id>
 ```
+
+Specifying a deployment type
+---------------------------
+The --deployment-type flag can be passed to bin/cluster to specify the deployment type
+1. To launch an online cluster (requires access to private repositories and amis):
+```
+  bin/cluster create aws --deployment-type=online <cluster-id>
+```
+Note: If no deployment type is specified, then the default is origin.
+
+
+## Post-ansible steps
+
+You should now be ready to follow the **What's Next?** section of the advanced installation guide to deploy your router, registry, and other components.
+
+Refer to the advanced installation guide for your deployment type:
+
+* [OpenShift Enterprise](https://docs.openshift.com/enterprise/3.0/install_config/install/advanced_install.html#what-s-next)
+* [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next)

+ 14 - 3
README_GCE.md

@@ -39,6 +39,13 @@ Create a gce.ini file for GCE
 * gce_service_account_pem_file_path - Full path from previous steps
 * gce_project_id - Found in "Projects", it list all the gce projects you are associated with.  The page lists their "Project Name" and "Project ID".  You want the "Project ID"
 
+Mandatory customization variables (check the values according to your tenant):
+* zone = europe-west1-d
+* network = default
+* gce_machine_type = n1-standard-2
+* gce_machine_image = preinstalled-slave-50g-v5
+
+
 1. vi ~/.gce/gce.ini
 1. make the contents look like this:
 ```
@@ -46,11 +53,15 @@ Create a gce.ini file for GCE
 gce_service_account_email_address = long...@developer.gserviceaccount.com
 gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
 gce_project_id = project_id
+zone = europe-west1-d
+network = default
+gce_machine_type = n1-standard-2
+gce_machine_image = preinstalled-slave-50g-v5
+
 ```
-1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py)
+1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
 ```
-  cd openshift-ansible/inventory/gce
-  ln -s ~/.gce/gce.ini gce.ini
+export GCE_INI_PATH=~/.gce/gce.ini
 ```
 
 

+ 5 - 18
README_OSE.md

@@ -19,7 +19,7 @@
 * Either ssh key based auth for the root user or ssh key based auth for a user
   with sudo access (no password)
 * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
-  
+
   ```sh
   git clone https://github.com/openshift/openshift-ansible.git
   cd openshift-ansible
@@ -46,7 +46,7 @@ subscription-manager repos --disable="*"
 subscription-manager repos \
 --enable="rhel-7-server-rpms" \
 --enable="rhel-7-server-extras-rpms" \
---enable="rhel-server-7-ose-beta-rpms"
+--enable="rhel-7-server-ose-3.0-rpms"
 ```
 * Configuration of router is not automated yet
 * Configuration of docker-registry is not automated yet
@@ -80,7 +80,7 @@ ansible_ssh_user=root
 deployment_type=enterprise
 
 # Pre-release registry URL
-oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
+oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
 
 # Pre-release additional repo
 openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
@@ -101,6 +101,7 @@ ose3-master.example.com
 
 # host group for nodes
 [nodes]
+ose3-master.example.com
 ose3-node[1:2].example.com
 ```
 
@@ -116,22 +117,8 @@ ansible-playbook playbooks/byo/config.yml
 inventory file use the -i option for ansible-playbook.
 
 ## Post-ansible steps
-#### Create the default router
-On the master host:
-```sh
-openshift ex router --create=true \
-  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-router/.kubeconfig \
-  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}'
-```
 
-#### Create the default docker-registry
-On the master host:
-```sh
-openshift ex registry --create=true \
-  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-registry/.kubeconfig \
-  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}' \
-  --mount-host=/var/lib/openshift/docker-registry
-```
+You should now be ready to follow the [What's Next?](https://docs.openshift.com/enterprise/3.0/install_config/install/advanced_install.html#what-s-next) section of the advanced installation guide to deploy your router, registry, and other components.
 
 ## Overriding detected ip addresses and hostnames
 Some deployments will require that the user override the detected hostnames

+ 30 - 12
README_libvirt.md

@@ -3,21 +3,23 @@ LIBVIRT Setup instructions
 
 `libvirt` is an `openshift-ansible` provider that uses `libvirt` to create local Fedora VMs that are provisioned exactly the same way that cloud VMs would be provisioned.
 
-This makes `libvirt` useful to develop, test and debug Openshift and openshift-ansible locally on the developer’s workstation before going to the cloud.
+This makes `libvirt` useful to develop, test and debug OpenShift and openshift-ansible locally on the developer’s workstation before going to the cloud.
 
 Install dependencies
 --------------------
 
-1.	Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-2.	Install [ebtables](http://ebtables.netfilter.org/)
-3.	Install [qemu](http://wiki.qemu.org/Main_Page)
-4.	Install [libvirt](http://libvirt.org/)
-5.	Enable and start the libvirt daemon, e.g:
+1.      Install [ansible](http://www.ansible.com/)
+2.	Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+3.	Install [ebtables](http://ebtables.netfilter.org/)
+4.	Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
+5.	Install [libvirt-python and libvirt](http://libvirt.org/)
+6.	Install [genisoimage](http://cdrkit.org/)
+7.	Enable and start the libvirt daemon, e.g:
 	-	`systemctl enable libvirtd`
 	-	`systemctl start libvirtd`
-6.	[Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-7.	Check that your `$HOME` is accessible to the qemu user²
-8.	Configure dns resolution on the host³
+8.	[Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+9.	Check that your `$HOME` is accessible to the qemu user²
+10.	Configure dns resolution on the host³
 
 #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
 
@@ -68,9 +70,14 @@ If your `$HOME` is world readable, everything is fine. If your `$HOME` is privat
 error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
 ```
 
-In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool.
+In order to fix that issue, you have several possibilities:
+ * set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
+   * backed by a filesystem with a lot of free disk space
+   * writable by your user;
+   * accessible by the qemu user.
+ * Grant the qemu user access to the storage pool.
 
-On Arch:
+On Arch or Fedora 22+:
 
 ```
 setfacl -m g:kvm:--x ~
@@ -89,7 +96,8 @@ dns=dnsmasq
 -	Configure dnsmasq to use the Virtual Network router for example.com:
 
 ```sh
-sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
+server=/example.com/192.168.55.1
 ```
 
 Test The Setup
@@ -102,6 +110,16 @@ Test The Setup
   bin/cluster list libvirt ''
 ```
 
+Configuration
+-------------
+
+The following options can be passed via the `-o` flag of the `create` command or as environment variables:
+
+* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2`): URL of the QCOW2 image to download
+* `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on
+* `image_sha256` (default to `e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab`): Expected SHA256 checksum of the downloaded image
+* `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible`
+
 Creating a cluster
 ------------------
 

+ 9 - 13
README_openstack.md

@@ -25,24 +25,20 @@ Configuration
 
 The following options can be passed via the `-o` flag of the `create` command:
 
-* `image_name`: Name of the image to use to spawn VMs
-* `keypair` (default to `${LOGNAME}_key`): Name of the ssh key
-* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
-* `master_flavor_ram` (default to `2048`): VM flavor for the master (by amount of RAM)
-* `master_flavor_id`: VM flavor for the master (by ID)
-* `master_flavor_include`: VM flavor for the master (by name)
-* `node_flavor_ram` (default to `4096`): VM flavor for the nodes (by amount of RAM)
-* `node_flavor_id`: VM flavor for the nodes (by ID)
-* `node_flavor_include`: VM flavor for the nodes (by name)
-* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yml`): filename of the HEAT template to use to create the cluster infrastructure
+* `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yaml`): filename of the HEAT template to use to create the cluster infrastructure
 
-The following options are used only by `heat_stack.yml`. They are so used only if the `infra_heat_stack` option is left with its default value.
+The following options are used only by `heat_stack.yaml`. They are so used only if the `infra_heat_stack` option is left with its default value.
 
+* `image_name`: Name of the image to use to spawn VMs
+* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
+* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master
+* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the compute nodes
+* `infra_flavor` (default to `m1.small`): The ID or name of the flavor for the infrastructure nodes
 * `network_prefix` (default to `openshift-ansible-<cluster_id>`): prefix prepended to all network objects (net, subnet, router, security groups)
 * `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use
-* `net_cidr` (default to `192.168.<rand()>.0/24`): CIDR of the network created by `heat_stack.yml`
+* `net_cidr` (default to `192.168.<rand()>.0/24`): CIDR of the network created by `heat_stack.yaml`
 * `external_net` (default to `external`): Name of the external network to connect to
-* `floating_ip_pools` (default to `external`): comma separated list of floating IP pools
+* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
 * `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
 
 

+ 4 - 15
README_origin.md

@@ -19,7 +19,7 @@
 * Either ssh key based auth for the root user or ssh key based auth for a user
   with sudo access (no password)
 * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
-  
+
   ```sh
   git clone https://github.com/openshift/openshift-ansible.git
   cd openshift-ansible
@@ -35,7 +35,7 @@ subscription-manager repos --disable="*"
 subscription-manager repos \
 --enable="rhel-7-server-rpms" \
 --enable="rhel-7-server-extras-rpms" \
---enable="rhel-server-7-ose-beta-rpms"
+--enable="rhel-7-server-ose-3.0-rpms"
 ```
 * Configuration of router is not automated yet
 * Configuration of docker-registry is not automated yet
@@ -73,6 +73,7 @@ osv3-master.example.com
 
 # host group for nodes
 [nodes]
+osv3-master.example.com
 osv3-node[1:2].example.com
 ```
 
@@ -88,20 +89,8 @@ ansible-playbook playbooks/byo/config.yml
 inventory file use the -i option for ansible-playbook.
 
 ## Post-ansible steps
-#### Create the default router
-On the master host:
-```sh
-openshift ex router --create=true \
-  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-router/.kubeconfig
-```
 
-#### Create the default docker-registry
-On the master host:
-```sh
-openshift ex registry --create=true \
-  --credentials=/var/lib/openshift/openshift.local.certificates/openshift-registry/.kubeconfig \
-  --mount-host=/var/lib/openshift/docker-registry
-```
+You should now be ready to follow the [What's Next?](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next) section of the advanced installation guide to deploy your router, registry, and other components.
 
 ## Overriding detected ip addresses and hostnames
 Some deployments will require that the user override the detected hostnames

+ 27 - 3
README_vagrant.md

@@ -1,10 +1,29 @@
 Requirements
 ------------
 - vagrant (tested against version 1.7.2)
-- vagrant-hostmaster plugin (tested against version 1.5.0)
+- vagrant-hostmanager plugin (tested against version 1.5.0)
+- vagrant-registration plugin (only required for enterprise deployment type)
 - vagrant-libvirt (tested against version 0.0.26)
   - Only required if using libvirt instead of virtualbox
 
+For ``enterprise`` deployment types the base RHEL box has to be added to Vagrant:
+
+1. Download the RHEL7 vagrant image (libvirt or virtualbox) available from the [Red Hat Container Development Kit downloads in the customer portal](https://access.redhat.com/downloads/content/293/ver=1/rhel---7/1.0.1/x86_64/product-downloads)
+
+2. Install it into vagrant
+
+   ``$ vagrant box add --name rhel-7 /path/to/rhel-server-libvirt-7.1-3.x86_64.box``
+
+3. (optional, recommended) Increase the disk size of the image to 20GB - This is a two step process. (these instructions are specific to libvirt)
+
+    Resize the actual qcow2 image:
+
+	``$ qemu-img resize ~/.vagrant.d/boxes/rhel-7/0/libvirt/box.img 20GB``
+
+    Edit `~/.vagrant.d/boxes/rhel-7/0/libvirt/metadata.json` to reflect the new size.  A corrected metadata.json looks like this:
+
+	``{"provider": "libvirt", "format": "qcow2", "virtual_size": 20}``
+
 Usage
 -----
 ```
@@ -21,5 +40,10 @@ vagrant provision
 Environment Variables
 ---------------------
 The following environment variables can be overriden:
-- OPENSHIFT_DEPLOYMENT_TYPE (defaults to origin, choices: origin, enterprise, online)
-- OPENSHIFT_NUM_NODES (the number of nodes to create, defaults to 2)
+- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
+- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
+
+For ``enterprise`` deployment types these env variables should also be specified:
+- ``rhel_subscription_user``: rhsm user
+- ``rhel_subscription_pass``: rhsm password
+- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects

+ 36 - 8
Vagrantfile

@@ -15,8 +15,30 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   config.hostmanager.manage_host = true
   config.hostmanager.include_offline = true
   config.ssh.insert_key = false
+
+  if deployment_type === 'enterprise'
+    unless Vagrant.has_plugin?('vagrant-registration')
+      raise 'vagrant-registration-plugin is required for enterprise deployment'
+    end
+    username = ENV['rhel_subscription_user']
+    password = ENV['rhel_subscription_pass']
+    unless username and password
+      raise 'rhel_subscription_user and rhel_subscription_pass are required'
+    end
+    config.registration.username = username
+    config.registration.password = password
+    # FIXME this is temporary until vagrant/ansible registration modules
+    # are capable of handling specific subscription pools
+    if not ENV['rhel_subscription_pool'].nil?
+      config.vm.provision "shell" do |s|
+        s.inline = "subscription-manager attach --pool=$1 || true"
+        s.args = "#{ENV['rhel_subscription_pool']}"
+      end
+    end
+  end
+
   config.vm.provider "virtualbox" do |vbox, override|
-    override.vm.box = "chef/centos-7.1"
+    override.vm.box = "centos/7"
     vbox.memory = 1024
     vbox.cpus = 2
 
@@ -28,10 +50,14 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     libvirt.cpus = 2
     libvirt.memory = 1024
     libvirt.driver = 'kvm'
-    override.vm.box = "centos-7.1"
-    override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box"
-    override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
-    override.vm.box_download_checksum_type = "sha256"
+    case deployment_type
+    when "enterprise"
+      override.vm.box = "rhel-7"
+    when "origin"
+      override.vm.box = "centos/7"
+      override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
+      override.vm.box_download_checksum_type = "sha256"
+    end
   end
 
   num_nodes.times do |n|
@@ -39,6 +65,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     config.vm.define "node#{node_index}" do |node|
       node.vm.hostname = "ose3-node#{node_index}.example.com"
       node.vm.network :private_network, ip: "192.168.100.#{200 + n}"
+      config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service"
     end
   end
 
@@ -46,17 +73,18 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     master.vm.hostname = "ose3-master.example.com"
     master.vm.network :private_network, ip: "192.168.100.100"
     master.vm.network :forwarded_port, guest: 8443, host: 8443
+    config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service"
     master.vm.provision "ansible" do |ansible|
       ansible.limit = 'all'
       ansible.sudo = true
       ansible.groups = {
         "masters" => ["master"],
-        "nodes"   => ["node1", "node2"],
+        "nodes"   => ["master", "node1", "node2"],
       }
       ansible.extra_vars = {
-        openshift_deployment_type: "origin",
+        deployment_type: deployment_type,
       }
-      ansible.playbook = "playbooks/byo/config.yml"
+      ansible.playbook = "playbooks/byo/vagrant.yml"
     end
   end
 end

+ 6 - 0
ansible-profile/README.md

@@ -0,0 +1,6 @@
+# Ansible profile
+
+This is a callback plugin for timing tasks.
+
+The upstream project lies in:
+https://github.com/jlafon/ansible-profile

+ 83 - 0
ansible-profile/callback_plugins/profile_tasks.py

@@ -0,0 +1,83 @@
+# The MIT License (MIT)
+
+# Copyright (c) 2014 Jharrod LaFon
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy of
+# this software and associated documentation files (the "Software"), to deal in
+# the Software without restriction, including without limitation the rights to
+# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+# the Software, and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+'''
+A plugin for timing tasks
+
+This plugin records the time spent in each task.
+At the end of the playbook, it displays the time spent in the 10 slowest tasks.
+'''
+
+import time
+
+
+class CallbackModule(object):
+    """
+    A plugin for timing tasks
+    """
+    def __init__(self):
+        self.stats = {}
+        self.current = None
+
+    # Reason: The is_conditional parameter is part of the Ansible plugin API
+    # Status: permanently disabled
+    # pylint: disable=unused-argument
+    def playbook_on_task_start(self, name, is_conditional):
+        """
+        Logs the start of each task
+        """
+        if self.current is not None:
+            # Record the running time of the last executed task
+            self.stats[self.current] = time.time() - self.stats[self.current]
+
+        # Record the start time of the current task
+        self.current = name
+        self.stats[self.current] = time.time()
+
+    # Reason: The stats parameter is part of the Ansible plugin API
+    # Status: permanently disabled
+    # pylint: disable=unused-argument
+    def playbook_on_stats(self, stats):
+        """
+        Prints the timings
+        """
+        # Record the timing of the very last task
+        if self.current is not None:
+            self.stats[self.current] = time.time() - self.stats[self.current]
+
+        # Sort the tasks by their running time
+        results = sorted(
+            self.stats.items(),
+            key=lambda value: value[1],
+            reverse=True,
+        )
+
+        # Just keep the top 10
+        results = results[:10]
+
+        # Print the timings
+        for name, elapsed in results:
+            print(
+                "{0:-<70}{1:->9}".format(
+                    '{0} '.format(name),
+                    ' {0:.02f}s'.format(elapsed),
+                )
+            )

+ 74 - 23
bin/cluster

@@ -3,8 +3,10 @@
 
 import argparse
 import ConfigParser
-import sys
 import os
+import sys
+import subprocess
+import traceback
 
 
 class Cluster(object):
@@ -22,6 +24,16 @@ class Cluster(object):
                 '-o ControlMaster=auto '
                 '-o ControlPersist=600s '
             )
+            # Because of `UserKnownHostsFile=/dev/null`
+            # our `.ssh/known_hosts` file most probably misses the ssh host public keys
+            # of our servers.
+            # In that case, ansible serializes the execution of ansible modules
+            # because we might be interactively prompted to accept the ssh host public keys.
+            # Because of `StrictHostKeyChecking=no` we know that we won't be prompted
+            # So, we don't want our modules execution to be serialized.
+            os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+            # TODO: A more secure way to proceed would consist in dynamically
+            # retrieving the ssh host public keys from the IaaS interface
 
     def get_deployment_type(self, args):
         """
@@ -37,11 +49,11 @@ class Cluster(object):
             deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
         return deployment_type
 
+
     def create(self, args):
         """
         Create an OpenShift cluster for given provider
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
@@ -50,66 +62,63 @@ class Cluster(object):
 
         env['num_masters'] = args.masters
         env['num_nodes'] = args.nodes
+        env['num_infra'] = args.infra
+        env['num_etcd'] = args.etcd
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def terminate(self, args):
         """
         Destroy OpenShift cluster
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def list(self, args):
         """
         List VMs in cluster
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def config(self, args):
         """
         Configure or reconfigure OpenShift across clustered VMs
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def update(self, args):
         """
         Update to latest OpenShift across clustered VMs
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def service(self, args):
         """
         Make the same service call across all nodes in the cluster
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args),
@@ -118,7 +127,7 @@ class Cluster(object):
         playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def setup_provider(self, provider):
         """
@@ -128,10 +137,14 @@ class Cluster(object):
         """
         config = ConfigParser.ConfigParser()
         if 'gce' == provider:
-            config.readfp(open('inventory/gce/hosts/gce.ini'))
+            gce_ini_default_path = os.path.join(
+                'inventory/gce/hosts/gce.ini')
+            gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+            if os.path.exists(gce_ini_path): 
+                config.readfp(open(gce_ini_path))
 
-            for key in config.options('gce'):
-                os.environ[key] = config.get('gce', key)
+                for key in config.options('gce'):
+                    os.environ[key] = config.get('gce', key)
 
             inventory = '-i inventory/gce/hosts'
         elif 'aws' == provider:
@@ -141,6 +154,17 @@ class Cluster(object):
                 os.environ[key] = config.get('ec2', key)
 
             inventory = '-i inventory/aws/hosts'
+
+            key_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
+            key_missing = [key for key in key_vars if key not in os.environ]
+
+            boto_conf_files = ['~/.aws/credentials', '~/.boto']
+            conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
+            boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
+
+            if len(key_missing) > 0 and len(boto_configs) == 0:
+                raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(key_missing))
+
         elif 'libvirt' == provider:
             inventory = '-i inventory/libvirt/hosts'
         elif 'openstack' == provider:
@@ -158,7 +182,6 @@ class Cluster(object):
         :param inventory: derived provider library
         :param env: environment variables for kubernetes
         :param playbook: ansible playbook to execute
-        :return: exit status from ansible-playbook command
         """
 
         verbose = ''
@@ -168,7 +191,7 @@ class Cluster(object):
         if args.option:
             for opt in args.option:
                 k, v = opt.split('=', 1)
-                env['opt_'+k] = v
+                env['cli_' + k] = v
 
         ansible_env = '-e \'{}\''.format(
             ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
@@ -178,6 +201,9 @@ class Cluster(object):
             verbose, inventory, ansible_env, playbook
         )
 
+        if args.profile:
+            command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command
+
         if args.verbose > 1:
             command = 'time {}'.format(command)
 
@@ -185,7 +211,18 @@ class Cluster(object):
             sys.stderr.write('RUN [{}]\n'.format(command))
             sys.stderr.flush()
 
-        return os.system(command)
+        try:
+            subprocess.check_call(command, shell=True)
+        except subprocess.CalledProcessError as exc:
+            raise ActionFailed("ACTION [{}] failed: {}"
+                               .format(args.action, exc))
+
+
+class ActionFailed(Exception):
+    """
+    Raised when action failed.
+    """
+    pass
 
 
 if __name__ == '__main__':
@@ -231,9 +268,15 @@ if __name__ == '__main__':
     meta_parser.add_argument('-t', '--deployment-type',
                              choices=['origin', 'online', 'enterprise'],
                              help='Deployment type. (default: origin)')
+    meta_parser.add_argument('-T', '--product-type',
+                             choices=['openshift', 'atomic-enterprise'],
+                             help='Product type. (default: openshift)')
     meta_parser.add_argument('-o', '--option', action='append',
                              help='options')
 
+    meta_parser.add_argument('-p', '--profile', action='store_true',
+                             help='Enable playbook profiling')
+
     action_parser = parser.add_subparsers(dest='action', title='actions',
                                           description='Choose from valid actions')
 
@@ -243,6 +286,10 @@ if __name__ == '__main__':
                                help='number of masters to create in cluster')
     create_parser.add_argument('-n', '--nodes', default=2, type=int,
                                help='number of nodes to create in cluster')
+    create_parser.add_argument('-i', '--infra', default=1, type=int,
+                               help='number of infra nodes to create in cluster')
+    create_parser.add_argument('-e', '--etcd', default=0, type=int,
+                               help='number of external etcd hosts to create in cluster')
     create_parser.set_defaults(func=cluster.create)
 
     config_parser = action_parser.add_parser('config',
@@ -290,7 +337,11 @@ if __name__ == '__main__':
             sys.stderr.write('\nACTION [update] aborted by user!\n')
             exit(1)
 
-    status = args.func(args)
-    if status != 0:
-        sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
-    exit(status)
+    try:
+        args.func(args)
+    except Exception as exc:
+        if args.verbose:
+            traceback.print_exc(file=sys.stderr)
+        else:
+            print >>sys.stderr, exc
+        exit(1)

+ 1 - 1
bin/ohi

@@ -83,7 +83,7 @@ class Ohi(object):
         """Setup the command line parser with the options we want
         """
 
-        parser = argparse.ArgumentParser(description='Openshift Host Inventory')
+        parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
 
         parser.add_argument('--list-host-types', default=False, action='store_true',
                        help='List all of the host types')

+ 0 - 104
bin/openshift-ansible-bin.spec

@@ -1,104 +0,0 @@
-Summary:       OpenShift Ansible Scripts for working with metadata hosts
-Name:          openshift-ansible-bin
-Version:       0.0.18
-Release:       1%{?dist}
-License:       ASL 2.0
-URL:           https://github.com/openshift/openshift-ansible
-Source0:       %{name}-%{version}.tar.gz
-Requires:      python2, openshift-ansible-inventory
-BuildRequires: python2-devel
-BuildArch:     noarch
-
-%description
-Scripts to make it nicer when working with hosts that are defined only by metadata.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}%{_bindir}
-mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
-mkdir -p %{buildroot}/etc/bash_completion.d
-mkdir -p %{buildroot}/etc/openshift_ansible
-
-cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir}
-cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
-
-# Make it so we can load multi_ec2.py as a library.
-rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py*
-ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
-ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc
-
-cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
-
-cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
-
-%files
-%{_bindir}/*
-%{python_sitelib}/openshift_ansible/
-/etc/bash_completion.d/*
-%config(noreplace) /etc/openshift_ansible/
-
-%changelog
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.18-1
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * Update defaults and examples to track core concepts guide
-  (jhonce@redhat.com)
-- Issue 119 - Add support for ~/.openshift-ansible (jhonce@redhat.com)
-- Infrastructure - Add service action to bin/cluster (jhonce@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
-- Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
-- Adding cache location for multi ec2 (kwoodson@redhat.com)
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1
-- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com)
-
-* Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1
-- fixed opssh and opscp to allow just environment or just host-type.
-  (twiest@redhat.com)
-
-* Mon May 04 2015 Thomas Wiest <twiest@redhat.com> 0.0.11-1
-- changed opssh to a bash script using ohi to make it easier to maintain, and
-  to expose all of the pssh features directly. (twiest@redhat.com)
-- Added --user option to ohi to pre-pend the username in the hostlist output.
-  (twiest@redhat.com)
-- Added utils.py that contains a normalize_dnsname function good for sorting
-  dns names to a human readable list. (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.10-1
-- added --list-host-types option to opscp (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.9-1
-- added opscp (twiest@redhat.com)
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
-- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
-
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
-- added the ability to run opssh and ohi on all hosts in an environment, as
-  well as all hosts of the same host-type regardless of environment
-  (twiest@redhat.com)
-- added ohi (twiest@redhat.com)
-* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- fixed bug where opssh would throw an exception if pssh returned a non-zero
-  exit code (twiest@redhat.com)
-
-* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- fixed the opssh default output behavior to be consistent with pssh. Also
-  fixed a bug in how directories are named for --outdir and --errdir.
-  (twiest@redhat.com)
-* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- created a python package named openshift_ansible (twiest@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-

+ 1 - 0
bin/openshift_ansible/aws

@@ -0,0 +1 @@
+../../inventory/aws/

+ 3 - 3
bin/oscp

@@ -55,7 +55,7 @@ class Oscp(object):
             config.read(self.config_path)
 
     def parse_cli_args(self):
-        parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
+        parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
         parser.add_argument('-e', '--env',
                           action="store", help="Environment where this server exists.")
         parser.add_argument('-d', '--debug', default=False,
@@ -167,7 +167,7 @@ class Oscp(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
                 if limit:
                     print
@@ -180,7 +180,7 @@ class Oscp(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
     def scp(self):
         '''scp files to or from a specified host

+ 3 - 3
bin/ossh

@@ -53,7 +53,7 @@ class Ossh(object):
             config.read(self.config_path)
 
     def parse_cli_args(self):
-        parser = argparse.ArgumentParser(description='Openshift Online SSH Tool.')
+        parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
         parser.add_argument('-e', '--env', action="store",
                           help="Which environment to search for the host ")
         parser.add_argument('-d', '--debug', default=False,
@@ -156,7 +156,7 @@ class Ossh(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
                 if limit:
                     print
@@ -169,7 +169,7 @@ class Ossh(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
     def ssh(self):
         '''SSH to a specified host

+ 17 - 4
bin/ossh_bash_completion

@@ -1,6 +1,12 @@
 __ossh_known_hosts(){
-    if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-        /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+    if python -c 'import openshift_ansible' &>/dev/null; then
+      /usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
+    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+
+    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
 
     fi
 }
@@ -19,8 +25,15 @@ _ossh()
 complete -F _ossh ossh oscp
 
 __opssh_known_hosts(){
-    if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-                /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+    if python -c 'import openshift_ansible' &>/dev/null; then
+      /usr/bin/python -c 'from openshift_ansible.multi_ec2 import MultiEc2; m=MultiEc2(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
+    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
+    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+
     fi
 }
 

+ 10 - 3
bin/ossh_zsh_completion

@@ -1,9 +1,16 @@
 #compdef ossh oscp
 
 _ossh_known_hosts(){
-  if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
-    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
-  fi
+    if python -c 'import openshift_ansible' &>/dev/null; then
+      print $(/usr/bin/python -c 'from openshift_ansible import multi_ec2; m=multi_ec2.MultiEc2(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+    elif [[ -f /dev/shm/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_ec2_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+    elif [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
+      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+
+    fi
 
 }
 _ossh(){

+ 252 - 2
docs/best_practices_guide.adoc

@@ -1,6 +1,6 @@
 // vim: ft=asciidoc
 
-= Openshift-Ansible Best Practices Guide
+= openshift-ansible Best Practices Guide
 
 The purpose of this guide is to describe the preferred patterns and best practices used in this repository (both in ansible and python).
 
@@ -27,6 +27,49 @@ The tooling is flexible enough that exceptions can be made so that the tool the
 
 == Python
 
+=== Python Source Files
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Python source files MUST contain the following vim mode line.
+|===
+
+[source]
+----
+# vim: expandtab:tabstop=4:shiftwidth=4
+----
+
+Since most developers contributing to this repository use vim, this rule helps to promote consistency.
+
+If mode lines for other editors are needed, please open a GitHub issue.
+
+=== Method Signatures
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| When adding a new paramemter to an existing method, a default value SHOULD be used
+|===
+The purpose of this rule is to make it so that method signatures are backwards compatible.
+
+If this rule isn't followed, it will be necessary for the person who changed the method to search out all callers and make sure that they're able to use the new method signature.
+
+.Before:
+[source,python]
+----
+def add_person(first_name, last_name):
+----
+
+.After:
+[source,python]
+----
+def add_person(first_name, last_name, age=None):
+----
+
+
 === PyLint
 http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
 
@@ -99,6 +142,137 @@ YAML is a superset of JSON, which means that Ansible allows JSON syntax to be in
 
 Every effort should be made to keep our Ansible YAML files in pure YAML.
 
+=== Modules
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Custom Ansible modules SHOULD be embedded in a role.
+|===
+
+.Context
+* http://docs.ansible.com/ansible/playbooks_roles.html#embedding-modules-in-roles[Ansible doc on how to embed modules in roles]
+
+The purpose of this rule is to make it easy to include custom modules in our playbooks and share them on Ansible Galaxy.
+
+.Custom module `openshift_facts.py` is embedded in the `openshift_facts` role.
+----
+> ll openshift-ansible/roles/openshift_facts/library/
+-rwxrwxr-x. 1 user group 33616 Jul 22 09:36 openshift_facts.py
+----
+
+.Custom module `openshift_facts` can be used after `openshift_facts` role has been referenced.
+[source,yaml]
+----
+- hosts: openshift_hosts
+  gather_facts: no
+  roles:
+  - role: openshift_facts
+  post_tasks:
+  - openshift_facts
+      role: common
+      hostname: host
+      public_hostname: host.example.com
+----
+
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
+|===
+
+When a module has several parameters that are being passed in, it's hard to see exactly what value each parameter is getting. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+
+.Bad:
+[source,yaml]
+----
+- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link
+----
+
+.Good:
+[source,yaml]
+----
+- file:
+    src: /file/to/link/to
+    dest: /path/to/symlink
+    owner: foo
+    group: foo
+    state: link
+----
+
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
+|===
+
+Lines that are long quickly become a wall of text that isn't easily parsable. It is preferred to use the Ansible Yaml syntax to pass in parameters so that it's more clear what values are being passed for each paramemter.
+
+.Bad:
+[source,yaml]
+----
+- get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
+----
+
+.Good:
+[source,yaml]
+----
+- get_url:
+    url: http://example.com/path/file.conf
+    dest: /etc/foo.conf
+    sha256sum: b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
+----
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| The Ansible `command` module SHOULD be used instead of the Ansible `shell` module.
+|===
+.Context
+* http://docs.ansible.com/shell_module.html#notes[Ansible doc on why using the command module is a best practice]
+
+The Ansible `shell` module can run most commands that can be run from a bash CLI. This makes it extremely powerful, but it also opens our playbooks up to being exploited by attackers.
+
+.Bad:
+[source,yaml]
+----
+- shell: "/bin/echo {{ cli_var }}"
+----
+
+.Better:
+[source,yaml]
+----
+- command: "/bin/echo {{ cli_var }}"
+----
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| The Ansible `quote` filter MUST be used with any variable passed into the shell module.
+|===
+.Context
+* http://docs.ansible.com/shell_module.html#notes[Ansible doc describing why to use the quote filter]
+
+It is recommended not to use the `shell` module. However, if it absolutely must be used, all variables passed into the `shell` module MUST use the `quote` filter to ensure they are shell safe.
+
+.Bad:
+[source,yaml]
+----
+- shell: "/bin/echo {{ cli_var }}"
+----
+
+.Good:
+[source,yaml]
+----
+- shell: "/bin/echo {{ cli_var | quote }}"
+----
+
 === Defensive Programming
 
 .Context
@@ -142,12 +316,88 @@ If an Ansible role requires certain variables to be set, it's best to check for
   when: arl_environment is not defined or arl_environment == ''
 ----
 
+=== Tasks
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Ansible tasks SHOULD NOT be used in ansible playbooks. Instead, use pre_tasks and post_tasks.
+|===
+An Ansible play is defined as a Yaml dictionary. Because of that, ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles.
+
+This can be quite confusing if the tasks list is defined in the playbook before the roles list because people assume in order execution in Ansible.
+
+Therefore, we SHOULD use pre_tasks and post_tasks to make it more clear when the tasks will be run.
+
+.Context
+* https://docs.ansible.com/playbooks_roles.html[Ansible documentation on pre_tasks and post_tasks]
+
+.Bad:
+[source,yaml]
+----
+---
+# playbook.yml
+- hosts: localhost
+  gather_facts: no
+  tasks:
+  - name: This will execute AFTER the example_role, so it's confusing
+    debug: msg="in tasks list"
+  roles:
+  - role: example_role
+
+# roles/example_role/tasks/main.yml
+- debug: msg="in example_role"
+----
+
+.Good:
+[source,yaml]
+----
+---
+# playbook.yml
+- hosts: localhost
+  gather_facts: no
+  pre_tasks:
+  - name: This will execute BEFORE the example_role, so it makes sense
+    debug: msg="in pre_tasks list"
+  roles:
+  - role: example_role
+
+# roles/example_role/tasks/main.yml
+- debug: msg="in example_role"
+----
+
+
 === Roles
 
 '''
 [cols="2v,v"]
 |===
 | **Rule**
+| All tasks in a role SHOULD be tagged with the role name.
+|===
+
+.Context
+* http://docs.ansible.com/playbooks_tags.html[Ansible doc explaining tags]
+
+Ansible tasks can be tagged, and then these tags can be used to either _run_ or _skip_ the tagged tasks using the `--tags` and `--skip-tags` ansible-playbook options respectively.
+
+This is very useful when developing and debugging new tasks. It can also significantly speed up playbook runs if the user specifies only the roles that changed.
+
+.Example:
+[source,yaml]
+----
+---
+# roles/example_role/tasks/main.yml
+- debug: msg="in example_role"
+  tags:
+  - example_role
+----
+
+
+'''
+[cols="2v,v"]
+|===
+| **Rule**
 | The Ansible roles directory MUST maintain a flat structure.
 |===
 
@@ -171,7 +421,7 @@ For consistency, role names SHOULD follow the above naming pattern. It is import
 Many times the `technology` portion of the pattern will line up with a package name. It is advised that whenever possible, the package name should be used.
 
 .Examples:
-* The role to configure an OpenShift Master is called `openshift_master`
+* The role to configure a master is called `openshift_master`
 * The role to configure OpenShift specific yum repositories is called `openshift_repos`
 
 === Filters

+ 1 - 1
docs/core_concepts_guide.adoc

@@ -1,6 +1,6 @@
 // vim: ft=asciidoc
 
-= Openshift-Ansible Core Concepts Guide
+= openshift-ansible Core Concepts Guide
 
 The purpose of this guide is to describe core concepts used in this repository.
 

+ 34 - 4
docs/style_guide.adoc

@@ -1,6 +1,6 @@
 // vim: ft=asciidoc
 
-= Openshift-Ansible Style Guide
+= openshift-ansible Style Guide
 
 The purpose of this guide is to describe the preferred coding conventions used in this repository (both in ansible and python).
 
@@ -43,18 +43,48 @@ This is a hard limit and is enforced by the build bot. This check MUST NOT be di
 
 == Ansible
 
-=== Ansible Global Variables
-Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc.
 
+=== Ansible Yaml file extension
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| All Ansible Yaml files MUST have a .yml extension (and NOT .YML, .yaml etc).
+|===
+
+Ansible tooling (like `ansible-galaxy init`) create files with a .yml extension. Also, the Ansible documentation website references files with a .yml extension several times. Because of this, it is normal in the Ansible community to use a .yml extension for all Ansible Yaml files.
+
+Example: `tasks.yml`
+
+
+=== Ansible CLI Variables
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Variables meant to be passed in from the ansible CLI MUST have a prefix of cli_
+|===
+
+Ansible allows variables to be passed in on the command line using the `-e` option. These variables MUST have a prefix of cli_ so that it's clear where they came from, and that they could be exploited.
+
+
+.Example:
+[source]
+----
+ansible-playbook -e cli_foo=bar someplays.yml
+----
+
+=== Ansible Global Variables
 '''
 [cols="2v,v"]
 |===
 | **Rule**
 | Global variables MUST have a prefix of g_
 |===
+Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc.
 
 
-Example:
+.Example:
 [source]
 ----
 g_environment: someval

+ 118 - 10
filter_plugins/oo_filters.py

@@ -8,6 +8,8 @@ Custom filters for use in openshift-ansible
 from ansible import errors
 from operator import itemgetter
 import pdb
+import re
+import json
 
 
 class FilterModule(object):
@@ -48,12 +50,12 @@ class FilterModule(object):
 
         return [item for sublist in data for item in sublist]
 
-
     @staticmethod
     def oo_collect(data, attribute=None, filters=None):
         ''' This takes a list of dict and collects all attributes specified into a
-            list If filter is specified then we will include all items that match
-            _ALL_ of filters.
+            list. If filter is specified then we will include all items that
+            match _ALL_ of filters.  If a dict entry is missing the key in a
+            filter it will be excluded from the match.
             Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
                          {'a':2, 'z': 'z'},        # True, return
                          {'a':3, 'z': 'z'},        # True, return
@@ -71,16 +73,35 @@ class FilterModule(object):
 
         if filters is not None:
             if not issubclass(type(filters), dict):
-                raise errors.AnsibleFilterError("|fialed expects filter to be a"
+                raise errors.AnsibleFilterError("|failed expects filter to be a"
                                                 " dict")
             retval = [FilterModule.get_attr(d, attribute) for d in data if (
-                all([d[key] == filters[key] for key in filters]))]
+                all([d.get(key, None) == filters[key] for key in filters]))]
         else:
             retval = [FilterModule.get_attr(d, attribute) for d in data]
 
         return retval
 
     @staticmethod
+    def oo_select_keys_from_list(data, keys):
+        ''' This returns a list, which contains the value portions for the keys
+            Ex: data = { 'a':1, 'b':2, 'c':3 }
+                keys = ['a', 'c']
+                returns [1, 3]
+        '''
+
+        if not issubclass(type(data), list):
+            raise errors.AnsibleFilterError("|failed expects to filter on a list")
+
+        if not issubclass(type(keys), list):
+            raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+        # Gather up the values for the list of keys passed in
+        retval = [FilterModule.oo_select_keys(item, keys) for item in data]
+
+        return FilterModule.oo_flatten(retval)
+
+    @staticmethod
     def oo_select_keys(data, keys):
         ''' This returns a list, which contains the value portions for the keys
             Ex: data = { 'a':1, 'b':2, 'c':3 }
@@ -95,7 +116,7 @@ class FilterModule(object):
             raise errors.AnsibleFilterError("|failed expects first param is a list")
 
         # Gather up the values for the list of keys passed in
-        retval = [data[key] for key in keys]
+        retval = [data[key] for key in keys if data.has_key(key)]
 
         return retval
 
@@ -130,6 +151,16 @@ class FilterModule(object):
         return rval
 
     @staticmethod
+    def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
+        '''Take a dict in the form of { 'key': 'value', 'key': 'value' } and
+           arrange them as a string 'key=value key=value'
+        '''
+        if not issubclass(type(data), dict):
+            raise errors.AnsibleFilterError("|failed expects first param is a dict")
+
+        return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()])
+
+    @staticmethod
     def oo_ami_selector(data, image_name):
         ''' This takes a list of amis and an image name and attempts to return
             the latest ami.
@@ -174,9 +205,9 @@ class FilterModule(object):
         '''
         if not issubclass(type(data), dict):
             raise errors.AnsibleFilterError("|failed expects first param is a dict")
-        if host_type not in ['master', 'node']:
-            raise errors.AnsibleFilterError("|failed expects either master or node"
-                                            " host type")
+        if host_type not in ['master', 'node', 'etcd']:
+            raise errors.AnsibleFilterError("|failed expects etcd, master or node"
+                                            " as the host type")
 
         root_vol = data[host_type]['root']
         root_vol['device_name'] = '/dev/sda1'
@@ -194,6 +225,13 @@ class FilterModule(object):
                 docker_vol.pop('delete_on_termination', None)
                 docker_vol['ephemeral'] = 'ephemeral0'
             return [root_vol, docker_vol]
+        elif host_type == 'etcd':
+            etcd_vol = data[host_type]['etcd']
+            etcd_vol['device_name'] = '/dev/xvdb'
+            etcd_vol['delete_on_termination'] = True
+            if etcd_vol['device_type'] != 'io1':
+                etcd_vol.pop('iops', None)
+            return [root_vol, etcd_vol]
         return [root_vol]
 
     @staticmethod
@@ -222,10 +260,78 @@ class FilterModule(object):
         # Gather up the values for the list of keys passed in
         return [x for x in data if x[filter_attr]]
 
+    @staticmethod
+    def oo_parse_heat_stack_outputs(data):
+        ''' Formats the HEAT stack output into a usable form
+
+            The goal is to transform something like this:
+
+            +---------------+-------------------------------------------------+
+            | Property      | Value                                           |
+            +---------------+-------------------------------------------------+
+            | capabilities  | [] |                                            |
+            | creation_time | 2015-06-26T12:26:26Z |                          |
+            | description   | OpenShift cluster |                             |
+            | …             | …                                               |
+            | outputs       | [                                               |
+            |               |   {                                             |
+            |               |     "output_value": "value_A"                   |
+            |               |     "description": "This is the value of Key_A" |
+            |               |     "output_key": "Key_A"                       |
+            |               |   },                                            |
+            |               |   {                                             |
+            |               |     "output_value": [                           |
+            |               |       "value_B1",                               |
+            |               |       "value_B2"                                |
+            |               |     ],                                          |
+            |               |     "description": "This is the value of Key_B" |
+            |               |     "output_key": "Key_B"                       |
+            |               |   },                                            |
+            |               | ]                                               |
+            | parameters    | {                                               |
+            | …             | …                                               |
+            +---------------+-------------------------------------------------+
+
+            into something like this:
+
+            {
+              "Key_A": "value_A",
+              "Key_B": [
+                "value_B1",
+                "value_B2"
+              ]
+            }
+        '''
+
+        # Extract the “outputs” JSON snippet from the pretty-printed array
+        in_outputs = False
+        outputs = ''
+
+        line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
+        for line in data['stdout_lines']:
+            match = line_regex.match(line)
+            if match:
+                if match.group(1) == 'outputs':
+                    in_outputs = True
+                elif match.group(1) != '':
+                    in_outputs = False
+                if in_outputs:
+                    outputs += match.group(2)
+
+        outputs = json.loads(outputs)
+
+        # Revamp the “outputs” to put it in the form of a “Key: value” map
+        revamped_outputs = {}
+        for output in outputs:
+            revamped_outputs[output['output_key']] = output['output_value']
+
+        return revamped_outputs
+
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {
             "oo_select_keys": self.oo_select_keys,
+            "oo_select_keys_from_list": self.oo_select_keys_from_list,
             "oo_collect": self.oo_collect,
             "oo_flatten": self.oo_flatten,
             "oo_pdb": self.oo_pdb,
@@ -233,6 +339,8 @@ class FilterModule(object):
             "oo_ami_selector": self.oo_ami_selector,
             "oo_ec2_volume_definition": self.oo_ec2_volume_definition,
             "oo_combine_key_value": self.oo_combine_key_value,
+            "oo_combine_dict": self.oo_combine_dict,
             "oo_split": self.oo_split,
-            "oo_filter_list": self.oo_filter_list
+            "oo_filter_list": self.oo_filter_list,
+            "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
         }

+ 108 - 0
filter_plugins/oo_zabbix_filters.py

@@ -0,0 +1,108 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom zabbix filters for use in openshift-ansible
+'''
+
+import pdb
+
+class FilterModule(object):
+    ''' Custom zabbix ansible filters '''
+
+    @staticmethod
+    def create_data(data, results, key, new_key):
+        '''Take a dict, filter through results and add results['key'] to dict
+        '''
+        new_list = [app[key] for app in results]
+        data[new_key] = new_list
+        return data
+
+    @staticmethod
+    def oo_set_zbx_trigger_triggerid(item, trigger_results):
+        '''Set zabbix trigger id from trigger results
+        '''
+        if isinstance(trigger_results, list):
+            item['triggerid'] = trigger_results[0]['triggerid']
+            return item
+
+        item['triggerid'] = trigger_results['triggerids'][0]
+        return item
+
+    @staticmethod
+    def oo_set_zbx_item_hostid(item, template_results):
+        ''' Set zabbix host id from template results
+        '''
+        if isinstance(template_results, list):
+            item['hostid'] = template_results[0]['templateid']
+            return item
+
+        item['hostid'] = template_results['templateids'][0]
+        return item
+
+    @staticmethod
+    def oo_pdb(arg):
+        ''' This pops you into a pdb instance where arg is the data passed in
+            from the filter.
+            Ex: "{{ hostvars | oo_pdb }}"
+        '''
+        pdb.set_trace()
+        return arg
+
+    @staticmethod
+    def select_by_name(ans_data, data):
+        ''' test
+        '''
+        for zabbix_item in data:
+            if ans_data['name'] == zabbix_item:
+                data[zabbix_item]['params']['hostid'] = ans_data['templateid']
+                return data[zabbix_item]['params']
+        return None
+
+    @staticmethod
+    def oo_build_zabbix_collect(data, string, value):
+        ''' Build a list of dicts from a list of data matched on string attribute
+        '''
+        rval = []
+        for item in data:
+            if item[string] == value:
+                rval.append(item)
+
+        return rval
+
+    @staticmethod
+    def oo_build_zabbix_list_dict(values, string):
+        ''' Build a list of dicts with string as key for each value
+        '''
+        rval = []
+        for value in values:
+            rval.append({string: value})
+        return rval
+
+    @staticmethod
+    def oo_remove_attr_from_list_dict(data, attr):
+        ''' Remove a specific attribute from a dict
+        '''
+        attrs = []
+        if isinstance(attr, str):
+            attrs.append(attr)
+        else:
+            attrs = attr
+
+        for attribute in attrs:
+            for _entry in data:
+                _entry.pop(attribute, None)
+
+        return data
+
+    def filters(self):
+        ''' returns a mapping of filters to methods '''
+        return {
+            "select_by_name": self.select_by_name,
+            "oo_set_zbx_item_hostid": self.oo_set_zbx_item_hostid,
+            "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid,
+            "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict,
+            "create_data": self.create_data,
+            "oo_build_zabbix_collect": self.oo_build_zabbix_collect,
+            "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict,
+        }

+ 3 - 3
git/.pylintrc

@@ -71,7 +71,7 @@ confidence=
 # no Warning level messages displayed, use"--disable=all --enable=classes
 # --disable=W"
 # w0511 - fixme - disabled because TODOs are acceptable
-disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511
+disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801
 
 
 [REPORTS]
@@ -205,7 +205,7 @@ docstring-min-length=-1
 [SIMILARITIES]
 
 # Minimum lines number of a similarity.
-min-similarity-lines=4
+min-similarity-lines=0
 
 # Ignore comments when computing similarities.
 ignore-comments=yes
@@ -214,7 +214,7 @@ ignore-comments=yes
 ignore-docstrings=yes
 
 # Ignore imports when computing similarities.
-ignore-imports=no
+ignore-imports=yes
 
 
 [VARIABLES]

+ 40 - 6
git/pylint.sh

@@ -1,14 +1,48 @@
 #!/usr/bin/env bash
+set -eu
 
+ANSIBLE_UPSTREAM_FILES=(
+    'inventory/aws/hosts/ec2.py'
+    'inventory/gce/hosts/gce.py'
+    'inventory/libvirt/hosts/libvirt_generic.py'
+    'inventory/openstack/hosts/nova.py'
+    'lookup_plugins/sequence.py'
+  )
 
 OLDREV=$1
 NEWREV=$2
-TRG_BRANCH=$3
+#TRG_BRANCH=$3
 
-PYTHON=/var/lib/jenkins/python27/bin/python
+PYTHON=$(which python)
 
-/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | \
- grep ".py$" | \
- xargs -r -I{} ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc  {}
+set +e
+PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$")
+set -e
 
-exit $?
+FILES_TO_TEST=""
+
+for PY_FILE in $PY_DIFF; do
+  IGNORE_FILE=false
+  for UPSTREAM_FILE in "${ANSIBLE_UPSTREAM_FILES[@]}"; do
+    if [ "${PY_FILE}" == "${UPSTREAM_FILE}" ]; then
+      IGNORE_FILE=true
+      break
+    fi
+  done
+
+  if [ "${IGNORE_FILE}" == true ]; then
+    echo "Skipping file ${PY_FILE} as an upstream Ansible file..."
+    continue
+  fi
+
+  if [ -e "${PY_FILE}" ]; then
+    FILES_TO_TEST="${FILES_TO_TEST} ${PY_FILE}"
+  fi
+done
+
+if [ "${FILES_TO_TEST}" != "" ]; then
+  echo "Testing files: ${FILES_TO_TEST}"
+  exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
+else
+  exit 0
+fi

+ 1 - 1
inventory/aws/hosts/hosts

@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'

+ 1 - 0
inventory/byo/.gitignore

@@ -0,0 +1 @@
+hosts

+ 0 - 39
inventory/byo/hosts

@@ -1,39 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# SSH user, this user should allow ssh based auth without requiring a password
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_sudo must be set to true
-#ansible_sudo=true
-
-# To deploy origin, change deployment_type to origin
-deployment_type=enterprise
-
-# Pre-release registry URL
-oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3_beta/ose-${component}:${version}
-
-# Pre-release additional repo
-openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
-
-# Origin copr repo
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-
-# htpasswd auth
-#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
-
-# host group for masters
-[masters]
-ose3-master-ansible.test.example.com
-
-# host group for nodes
-[nodes]
-#ose3-master-ansible.test.example.com openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 114 - 0
inventory/byo/hosts.example

@@ -0,0 +1,114 @@
+# This is an example of a bring your own (byo) host inventory
+
+# Create an OSEv3 group that contains the masters and nodes groups
+[OSEv3:children]
+masters
+nodes
+etcd
+
+# Set variables common for all OSEv3 hosts
+[OSEv3:vars]
+# SSH user, this user should allow ssh based auth without requiring a
+# password. If using ssh key based auth, then the key should be managed by an
+# ssh agent.
+ansible_ssh_user=root
+
+# If ansible_ssh_user is not root, ansible_sudo must be set to true and the
+# user must be configured for passwordless sudo
+#ansible_sudo=true
+
+# deployment type valid values are origin, online and enterprise
+deployment_type=atomic-enterprise
+
+# Enable cluster metrics
+#use_cluster_metrics=true
+
+# Pre-release registry URL
+#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
+
+# Pre-release Dev puddle repo
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Pre-release Errata puddle repo
+#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterpriseErrata/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+
+# Origin copr repo
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+
+# htpasswd auth
+openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
+
+# Allow all auth
+#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
+
+# Project Configuration
+#osm_project_request_message=''
+#osm_project_request_template=''
+#osm_mcs_allocator_range='s0:/2'
+#osm_mcs_labels_per_project=5
+#osm_uid_allocator_range='1000000000-1999999999/10000'
+
+# Configure Fluentd
+#use_fluentd=true
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
+# master cluster ha variables using pacemaker or RHEL HA
+#openshift_master_cluster_password=openshift_cluster
+#openshift_master_cluster_vip=192.168.133.25
+#openshift_master_cluster_public_vip=192.168.133.25
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+
+# master cluster ha variables when using a different HA solution
+# For installation the value of openshift_master_cluster_hostname must resolve
+# to the first master defined in the inventory.
+# The HA solution must be manually configured after installation and must ensure
+# that the master is running on a single master host.
+#openshift_master_cluster_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
+#openshift_master_cluster_defer_ha=True
+
+# default subdomain to use for exposed routes
+#osm_default_subdomain=apps.test.example.com
+
+# additional cors origins
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
+
+# default project node selector
+#osm_default_node_selector='region=primary'
+
+# default storage plugin dependencies to install, by default the ceph and
+# glusterfs plugin dependencies will be installed, if available.
+#osn_storage_plugin_deps=['ceph','glusterfs']
+
+# default selectors for router and registry services
+# openshift_router_selector='region=infra'
+# openshift_registry_selector='region=infra'
+
+# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
+# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
+
+# Disable the OpenShift SDN plugin
+# openshift_use_openshift_sdn=False
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-3.0.0.0
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 6 - 3
inventory/gce/hosts/gce.py

@@ -120,6 +120,7 @@ class GceInventory(object):
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
 
+
         # Create a ConfigParser.
         # This provides empty defaults to each key, so that environment
         # variable configuration (as opposed to INI configuration) is able
@@ -173,6 +174,7 @@ class GceInventory(object):
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
 
+        
         # Retrieve and return the GCE driver.
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce.connection.user_agent_append(
@@ -211,7 +213,8 @@ class GceInventory(object):
             'gce_image': inst.image,
             'gce_machine_type': inst.size,
             'gce_private_ip': inst.private_ips[0],
-            'gce_public_ip': inst.public_ips[0],
+            # Hosts don't always have a public IP name
+            #'gce_public_ip': inst.public_ips[0],
             'gce_name': inst.name,
             'gce_description': inst.extra['description'],
             'gce_status': inst.extra['status'],
@@ -219,8 +222,8 @@ class GceInventory(object):
             'gce_tags': inst.extra['tags'],
             'gce_metadata': md,
             'gce_network': net,
-            # Hosts don't have a public name, so we add an IP
-            'ansible_ssh_host': inst.public_ips[0]
+            # Hosts don't always have a public IP name
+            #'ansible_ssh_host': inst.public_ips[0]
         }
 
     def get_instance(self, instance_name):

+ 1 - 1
inventory/gce/hosts/hosts

@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'

+ 1 - 1
inventory/libvirt/hosts/hosts

@@ -1 +1 @@
-localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter=/usr/bin/python2
+localhost ansible_connection=local ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2'

+ 20 - 9
inventory/multi_ec2.py

@@ -78,7 +78,7 @@ class MultiEc2(object):
                 },
             ]
 
-            self.config['cache_max_age'] = 0
+            self.config['cache_max_age'] = 300
         else:
             raise RuntimeError("Could not find valid ec2 credentials in the environment.")
 
@@ -239,22 +239,33 @@ class MultiEc2(object):
     def apply_account_config(self, acc_config):
         ''' Apply account config settings
         '''
-        if not acc_config.has_key('hostvars') and not acc_config.has_key('all_group'):
-            return
-
         results = self.all_ec2_results[acc_config['name']]
-       # Update each hostvar with the newly desired key: value
-        for host_property, value in acc_config['hostvars'].items():
+
+        # Update each hostvar with the newly desired key: value
+        for new_var, value in acc_config.get('extra_vars', {}).items():
             # Verify the account results look sane
             # by checking for these keys ('_meta' and 'hostvars' exist)
             if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
                 for data in results['_meta']['hostvars'].values():
-                    data[str(host_property)] = str(value)
+                    data[str(new_var)] = str(value)
 
             # Add this group
             if results.has_key(acc_config['all_group']):
-                results["%s_%s" % (host_property, value)] = \
-                  copy.copy(results[acc_config['all_group']])
+                results["%s_%s" % (new_var, value)] = \
+                 copy.copy(results[acc_config['all_group']])
+
+        # Clone groups goes here
+        for name_from, name_to in acc_config.get('clone_groups', {}).items():
+            if results.has_key(name_from):
+                results[name_to] = copy.copy(results[name_from])
+
+        # Clone vars goes here
+        for to_name, from_name in acc_config.get('clone_vars', {}).items():
+            # Verify the account results look sane
+            # by checking for these keys ('_meta' and 'hostvars' exist)
+            if results.has_key('_meta') and results['_meta'].has_key('hostvars'):
+                for data in results['_meta']['hostvars'].values():
+                    data[str(to_name)] = data.get(str(from_name), 'nil')
 
         # store the results back into all_ec2_results
         self.all_ec2_results[acc_config['name']] = results

+ 1 - 1
inventory/multi_ec2.yaml.example

@@ -18,7 +18,7 @@ accounts:
       AWS_ACCESS_KEY_ID: XXXXXXXXXXXXXXXXXXXX
       AWS_SECRET_ACCESS_KEY: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
     all_group: ec2
-    hostvars:
+    extra_vars:
       cloud: aws
       account: aws1
 

+ 0 - 82
inventory/openshift-ansible-inventory.spec

@@ -1,82 +0,0 @@
-Summary:       OpenShift Ansible Inventories
-Name:          openshift-ansible-inventory
-Version:       0.0.8
-Release:       1%{?dist}
-License:       ASL 2.0
-URL:           https://github.com/openshift/openshift-ansible
-Source0:       %{name}-%{version}.tar.gz
-Requires:      python2
-BuildRequires: python2-devel
-BuildArch:     noarch
-
-%description
-Ansible Inventories used with the openshift-ansible scripts and playbooks.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}/etc/ansible
-mkdir -p %{buildroot}/usr/share/ansible/inventory
-mkdir -p %{buildroot}/usr/share/ansible/inventory/aws
-mkdir -p %{buildroot}/usr/share/ansible/inventory/gce
-
-cp -p multi_ec2.py %{buildroot}/usr/share/ansible/inventory
-cp -p multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
-cp -p aws/hosts/ec2.py %{buildroot}/usr/share/ansible/inventory/aws
-cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
-
-%files
-%config(noreplace) /etc/ansible/*
-%dir /usr/share/ansible/inventory
-/usr/share/ansible/inventory/multi_ec2.py*
-/usr/share/ansible/inventory/aws/ec2.py*
-/usr/share/ansible/inventory/gce/gce.py*
-
-%changelog
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1
-- Added more verbosity when error happens.  Also fixed a bug.
-  (kwoodson@redhat.com)
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * rename openshift_registry_url oreg_url * rename option_images to
-  _{oreg|ortr}_images (jhonce@redhat.com)
-- Fix the remaining pylint warnings (lhuard@amadeus.com)
-- Fix some of the pylint warnings (lhuard@amadeus.com)
-- [libvirt cluster] Use net-dhcp-leases to find VMs’ IPs (lhuard@amadeus.com)
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.7-1
-- Making multi_ec2 into a library (kwoodson@redhat.com)
-
-* Wed May 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- Added support for grouping and a bug fix. (kwoodson@redhat.com)
-
-* Tue May 12 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- removed ec2.ini from the openshift-ansible-inventory.spec file so that we're
-  not dictating what the ec2.ini file should look like. (twiest@redhat.com)
-- Added capability to pass in ec2.ini file. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed a bug due to renaming of variables. (kwoodson@redhat.com)
-
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- fixed build problems with openshift-ansible-inventory.spec
-  (twiest@redhat.com)
-- Allow option in multi_ec2 to set cache location. (kwoodson@redhat.com)
-- Add ansible_connection=local to localhost in inventory (jdetiber@redhat.com)
-- Adding refresh-cache option and cleanup for pylint. Also updated for
-  aws/hosts/ being added. (kwoodson@redhat.com)
-
-* Thu Mar 26 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added the ability to have a config file in /etc/openshift_ansible to
-  multi_ec2.py. (twiest@redhat.com)
-- Merge pull request #97 from jwhonce/wip/cluster (jhonce@redhat.com)
-- gce inventory/playbook updates for node registration changes
-  (jdetiber@redhat.com)
-- Various fixes (jdetiber@redhat.com)
-
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-

+ 1 - 1
inventory/openstack/hosts/hosts

@@ -1 +1 @@
-localhost ansible_sudo=no ansible_python_interpreter=/usr/bin/python2 connection=local
+localhost ansible_sudo=no ansible_python_interpreter='/usr/bin/env python2' connection=local

+ 1 - 1
inventory/openstack/hosts/nova.py

@@ -34,7 +34,7 @@ except ImportError:
 # executed with no parameters, return the list of
 # all groups and hosts
 
-NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
+NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"),
                      os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
                      "/etc/ansible/nova.ini"]
 

+ 73 - 0
lookup_plugins/oo_option.py

@@ -0,0 +1,73 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+
+'''
+oo_option lookup plugin for openshift-ansible
+
+Usage:
+
+    - debug:
+      msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
+
+This returns, by order of priority:
+
+* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> …`
+* if it exists, the envirnoment variable named `<key>`
+* if none of the above conditions are met, empty string is returned
+'''
+
+from ansible.utils import template
+import os
+
+# Reason: disable too-few-public-methods because the `run` method is the only
+#     one required by the Ansible API
+# Status: permanently disabled
+# pylint: disable=too-few-public-methods
+class LookupModule(object):
+    ''' oo_option lookup plugin main class '''
+
+    # Reason: disable unused-argument because Ansible is calling us with many
+    #     parameters we are not interested in.
+    #     The lookup plugins of Ansible have this kwargs “catch-all” parameter
+    #     which is not used
+    # Status: permanently disabled unless Ansible API evolves
+    # pylint: disable=unused-argument
+    def __init__(self, basedir=None, **kwargs):
+        ''' Constructor '''
+        self.basedir = basedir
+
+    # Reason: disable unused-argument because Ansible is calling us with many
+    #     parameters we are not interested in.
+    #     The lookup plugins of Ansible have this kwargs “catch-all” parameter
+    #     which is not used
+    # Status: permanently disabled unless Ansible API evolves
+    # pylint: disable=unused-argument
+    def run(self, terms, inject=None, **kwargs):
+        ''' Main execution path '''
+
+        try:
+            terms = template.template(self.basedir, terms, inject)
+        # Reason: disable broad-except to really ignore any potential exception
+        #         This is inspired by the upstream "env" lookup plugin:
+        #         https://github.com/ansible/ansible/blob/devel/v1/ansible/runner/lookup_plugins/env.py#L29
+        # pylint: disable=broad-except
+        except Exception:
+            pass
+
+        if isinstance(terms, basestring):
+            terms = [terms]
+
+        ret = []
+
+        for term in terms:
+            option_name = term.split()[0]
+            cli_key = 'cli_' + option_name
+            if inject and cli_key in inject:
+                ret.append(inject[cli_key])
+            elif option_name in os.environ:
+                ret.append(os.environ[option_name])
+            else:
+                ret.append('')
+
+        return ret

+ 215 - 0
lookup_plugins/sequence.py

@@ -0,0 +1,215 @@
+# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible.  If not, see <http://www.gnu.org/licenses/>.
+
+from ansible.errors import AnsibleError
+import ansible.utils as utils
+from re import compile as re_compile, IGNORECASE
+
+# shortcut format
+NUM = "(0?x?[0-9a-f]+)"
+SHORTCUT = re_compile(
+    "^(" +        # Group 0
+    NUM +         # Group 1: Start
+    "-)?" +
+    NUM +         # Group 2: End
+    "(/" +        # Group 3
+    NUM +         # Group 4: Stride
+    ")?" +
+    "(:(.+))?$",  # Group 5, Group 6: Format String
+    IGNORECASE
+)
+
+
+class LookupModule(object):
+    """
+    sequence lookup module
+
+    Used to generate some sequence of items. Takes arguments in two forms.
+
+    The simple / shortcut form is:
+
+      [start-]end[/stride][:format]
+
+    As indicated by the brackets: start, stride, and format string are all
+    optional.  The format string is in the style of printf.  This can be used
+    to pad with zeros, format in hexadecimal, etc.  All of the numerical values
+    can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
+    Negative numbers are not supported.
+
+    Some examples:
+
+      5 -> ["1","2","3","4","5"]
+      5-8 -> ["5", "6", "7", "8"]
+      2-10/2 -> ["2", "4", "6", "8", "10"]
+      4:host%02d -> ["host01","host02","host03","host04"]
+
+    The standard Ansible key-value form is accepted as well.  For example:
+
+      start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
+
+    This format takes an alternate form of "end" called "count", which counts
+    some number from the starting value.  For example:
+
+      count=5 -> ["1", "2", "3", "4", "5"]
+      start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
+      start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
+      start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
+
+    The count option is mostly useful for avoiding off-by-one errors and errors
+    calculating the number of entries in a sequence when a stride is specified.
+    """
+
+    def __init__(self, basedir, **kwargs):
+        """absorb any keyword args"""
+        self.basedir = basedir
+
+    def reset(self):
+        """set sensible defaults"""
+        self.start = 1
+        self.count = None
+        self.end = None
+        self.stride = 1
+        self.format = "%d"
+
+    def parse_kv_args(self, args):
+        """parse key-value style arguments"""
+        for arg in ["start", "end", "count", "stride"]:
+            try:
+                arg_raw = args.pop(arg, None)
+                if arg_raw is None:
+                    continue
+                arg_cooked = int(arg_raw, 0)
+                setattr(self, arg, arg_cooked)
+            except ValueError:
+                raise AnsibleError(
+                    "can't parse arg %s=%r as integer"
+                        % (arg, arg_raw)
+                )
+            if 'format' in args:
+                self.format = args.pop("format")
+        if args:
+            raise AnsibleError(
+                "unrecognized arguments to with_sequence: %r"
+                % args.keys()
+            )
+
+    def parse_simple_args(self, term):
+        """parse the shortcut forms, return True/False"""
+        match = SHORTCUT.match(term)
+        if not match:
+            return False
+
+        _, start, end, _, stride, _, format = match.groups()
+
+        if start is not None:
+            try:
+                start = int(start, 0)
+            except ValueError:
+                raise AnsibleError("can't parse start=%s as integer" % start)
+        if end is not None:
+            try:
+                end = int(end, 0)
+            except ValueError:
+                raise AnsibleError("can't parse end=%s as integer" % end)
+        if stride is not None:
+            try:
+                stride = int(stride, 0)
+            except ValueError:
+                raise AnsibleError("can't parse stride=%s as integer" % stride)
+
+        if start is not None:
+            self.start = start
+        if end is not None:
+            self.end = end
+        if stride is not None:
+            self.stride = stride
+        if format is not None:
+            self.format = format
+
+    def sanity_check(self):
+        if self.count is None and self.end is None:
+            raise AnsibleError(
+                "must specify count or end in with_sequence"
+            )
+        elif self.count is not None and self.end is not None:
+            raise AnsibleError(
+                "can't specify both count and end in with_sequence"
+            )
+        elif self.count is not None:
+            # convert count to end
+            if self.count != 0:
+                self.end = self.start + self.count * self.stride - 1
+            else:
+                self.start = 0
+                self.end = 0
+                self.stride = 0
+            del self.count
+        if self.stride > 0 and self.end < self.start:
+            raise AnsibleError("to count backwards make stride negative")
+        if self.stride < 0 and self.end > self.start:
+            raise AnsibleError("to count forward don't make stride negative")
+        if self.format.count('%') != 1:
+            raise AnsibleError("bad formatting string: %s" % self.format)
+
+    def generate_sequence(self):
+        if self.stride > 0:
+            adjust = 1
+        else:
+            adjust = -1
+        numbers = xrange(self.start, self.end + adjust, self.stride)
+
+        for i in numbers:
+            try:
+                formatted = self.format % i
+                yield formatted
+            except (ValueError, TypeError):
+                raise AnsibleError(
+                    "problem formatting %r with %r" % self.format
+                )
+
+    def run(self, terms, inject=None, **kwargs):
+        results = []
+
+        terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
+
+        if isinstance(terms, basestring):
+            terms = [ terms ]
+
+        for term in terms:
+            try:
+                self.reset()  # clear out things for this iteration
+
+                try:
+                    if not self.parse_simple_args(term):
+                        self.parse_kv_args(utils.parse_kv(term))
+                except Exception:
+                    raise AnsibleError(
+                        "unknown error parsing with_sequence arguments: %r"
+                        % term
+                    )
+
+                self.sanity_check()
+                if self.stride != 0:
+                    results.extend(self.generate_sequence())
+            except AnsibleError:
+                raise
+            except Exception, e:
+                raise AnsibleError(
+                    "unknown error generating sequence: %s" % str(e)
+                )
+
+        return results

+ 272 - 0
openshift-ansible.spec

@@ -0,0 +1,272 @@
+# %commit is intended to be set by tito custom builders provided
+# in the .tito/lib directory. The values in this spec file will not be kept up to date.
+%{!?commit:
+%global commit c64d09e528ca433832c6b6e6f5c7734a9cc8ee6f
+}
+
+Name:           openshift-ansible
+Version:        3.0.4
+Release:        1%{?dist}
+Summary:        Openshift and Atomic Enterprise Ansible
+License:        ASL 2.0
+URL:            https://github.com/openshift/openshift-ansible
+Source0:        https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
+BuildArch:      noarch
+
+Requires:      ansible
+
+%description
+Openshift and Atomic Enterprise Ansible
+
+This repo contains Ansible code and playbooks
+for Openshift and Atomic Enterprise.
+
+%prep
+%setup -q
+
+%build
+
+# atomic-openshift-utils install
+pushd utils
+%{__python} setup.py build
+popd
+
+%install
+# Base openshift-ansible install
+mkdir -p %{buildroot}%{_datadir}/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible/%{name}
+mkdir -p %{buildroot}%{_datadir}/ansible_plugins
+
+# openshift-ansible-bin install
+mkdir -p %{buildroot}%{_bindir}
+mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
+mkdir -p %{buildroot}/etc/bash_completion.d
+mkdir -p %{buildroot}/etc/openshift_ansible
+cp -p bin/{ossh,oscp,opssh,opscp,ohi} %{buildroot}%{_bindir}
+cp -pP bin/openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
+cp -p bin/ossh_bash_completion %{buildroot}/etc/bash_completion.d
+cp -p bin/openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
+# Fix links
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+rm -f %{buildroot}%{python_sitelib}/openshift_ansible/aws
+ln -sf %{_datadir}/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
+ln -sf %{_datadir}/ansible/inventory/aws %{buildroot}%{python_sitelib}/openshift_ansible/aws
+
+# openshift-ansible-docs install
+# -docs are currently just %doc, no install needed
+
+# openshift-ansible-inventory install
+mkdir -p %{buildroot}/etc/ansible
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory/aws
+mkdir -p %{buildroot}%{_datadir}/ansible/inventory/gce
+cp -p inventory/multi_ec2.py %{buildroot}%{_datadir}/ansible/inventory
+cp -p inventory/multi_ec2.yaml.example %{buildroot}/etc/ansible/multi_ec2.yaml
+cp -p inventory/aws/hosts/ec2.py %{buildroot}%{_datadir}/ansible/inventory/aws
+cp -p inventory/gce/hosts/gce.py %{buildroot}%{_datadir}/ansible/inventory/gce
+
+# openshift-ansible-playbooks install
+cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
+
+# openshift-ansible-roles install
+cp -rp roles %{buildroot}%{_datadir}/ansible/%{name}/
+
+# openshift-ansible-filter-plugins install
+cp -rp filter_plugins %{buildroot}%{_datadir}/ansible_plugins/
+
+# openshift-ansible-lookup-plugins install
+cp -rp lookup_plugins %{buildroot}%{_datadir}/ansible_plugins/
+
+# atomic-openshift-utils install
+pushd utils
+%{__python} setup.py install --skip-build --root %{buildroot}
+# Remove this line once the name change has happened
+mv -f %{buildroot}%{_bindir}/oo-install %{buildroot}%{_bindir}/atomic-openshift-installer
+popd
+
+# Base openshift-ansible files
+%files
+%doc LICENSE.md README*
+%dir %{_datadir}/ansible/%{name}
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-bin subpackage
+# ----------------------------------------------------------------------------------
+%package bin
+Summary:       Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
+Requires:      %{name}-inventory
+Requires:      python2
+BuildRequires: python2-devel
+BuildArch:     noarch
+
+%description bin
+Scripts to make it nicer when working with hosts that are defined only by metadata.
+
+%files bin
+%{_bindir}/*
+%{python_sitelib}/openshift_ansible/
+/etc/bash_completion.d/*
+%config(noreplace) /etc/openshift_ansible/
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-docs subpackage
+# ----------------------------------------------------------------------------------
+%package docs
+Summary:       Openshift and Atomic Enterprise Ansible documents
+Requires:      %{name}
+BuildArch:     noarch
+
+%description docs
+%{summary}.
+
+%files docs
+%doc  docs
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-inventory subpackage
+# ----------------------------------------------------------------------------------
+%package inventory
+Summary:       Openshift and Atomic Enterprise Ansible Inventories
+Requires:      python2
+BuildArch:     noarch
+
+%description inventory
+Ansible Inventories used with the openshift-ansible scripts and playbooks.
+
+%files inventory
+%config(noreplace) /etc/ansible/*
+%dir %{_datadir}/ansible/inventory
+%{_datadir}/ansible/inventory/multi_ec2.py*
+%{_datadir}/ansible/inventory/aws/ec2.py*
+%{_datadir}/ansible/inventory/gce/gce.py*
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-playbooks subpackage
+# ----------------------------------------------------------------------------------
+%package playbooks
+Summary:       Openshift and Atomic Enterprise Ansible Playbooks
+Requires:      %{name}
+BuildArch:     noarch
+
+%description playbooks
+%{summary}.
+
+%files playbooks
+%{_datadir}/ansible/%{name}/playbooks
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-roles subpackage
+# ----------------------------------------------------------------------------------
+%package roles
+Summary:       Openshift and Atomic Enterprise Ansible roles
+Requires:      %{name}
+BuildArch:     noarch
+
+%description roles
+%{summary}.
+
+%files roles
+%{_datadir}/ansible/%{name}/roles
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-filter-plugins subpackage
+# ----------------------------------------------------------------------------------
+%package filter-plugins
+Summary:       Openshift and Atomic Enterprise Ansible filter plugins
+Requires:      %{name}
+BuildArch:     noarch
+
+%description filter-plugins
+%{summary}.
+
+%files filter-plugins
+%{_datadir}/ansible_plugins/filter_plugins
+
+
+# ----------------------------------------------------------------------------------
+# openshift-ansible-lookup-plugins subpackage
+# ----------------------------------------------------------------------------------
+%package lookup-plugins
+Summary:       Openshift and Atomic Enterprise Ansible lookup plugins
+Requires:      %{name}
+BuildArch:     noarch
+
+%description lookup-plugins
+%{summary}.
+
+%files lookup-plugins
+%{_datadir}/ansible_plugins/lookup_plugins
+
+# ----------------------------------------------------------------------------------
+# atomic-openshift-utils subpackage
+# ----------------------------------------------------------------------------------
+
+%package -n atomic-openshift-utils
+Summary:       Atomic OpenShift Utilities
+BuildRequires: python-setuptools
+Requires:      ansible
+Requires:      python-click
+Requires:      python-setuptools
+Requires:      PyYAML
+BuildArch:     noarch
+
+%description -n atomic-openshift-utils
+Atomic OpenShift Utilities includes
+ - atomic-openshift-installer
+ - other utilities
+
+%files -n atomic-openshift-utils
+%{python_sitelib}/ooinstall*
+%{_bindir}/atomic-openshift-installer
+
+
+%changelog
+* Wed Oct 28 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.4-1
+- Removing spec files. (kwoodson@redhat.com)
+- Updated example (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-inventory] release [0.0.11-1].
+  (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-bin] release [0.0.21-1].
+  (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-inventory] release [0.0.10-1].
+  (kwoodson@redhat.com)
+- Automatic commit of package [openshift-ansible-bin] release [0.0.20-1].
+  (kwoodson@redhat.com)
+- Adding tito releasers configuration (bleanhar@redhat.com)
+- Bug fixes for the uninstall playbook (bleanhar@redhat.com)
+- Adding clone vars and groups. Renamed hostvars to extra_vars.
+  (kwoodson@redhat.com)
+- Start tracking docker info execution time (jdiaz@redhat.com)
+- The uninstall playbook should remove the kubeconfig for non-root installs
+  (bleanhar@redhat.com)
+- Adding uninstall support for Atomic Host (bleanhar@redhat.com)
+- add examples for SDN configuration (jdetiber@redhat.com)
+
+* Tue Oct 27 2015 Troy Dawson <tdawson@redhat.com> 3.0.3-1
+- Pylint fixes and ignores for incoming oo-install code. (dgoodwin@redhat.com)
+- Pylint fixes (abutcher@redhat.com)
+- Adding zabbix type and fixing zabbix agent vars (kwoodson@redhat.com)
+- Add atomic-openshift-utils add atomic-openshift-utils to openshift-
+  ansible.spec file (tdawson@redhat.com)
+- Fix quotes (spinolacastro@gmail.com)
+- Use standard library for version comparison. (abutcher@redhat.com)
+- added docker info to the end of docker loop to direct lvm playbook.
+  (twiest@redhat.com)
+- Add missing quotes (spinolacastro@gmail.com)
+- Adding Docker Log Options capabilities (epo@jemba.net)
+- Move version greater_than_fact into openshift_facts (abutcher@redhat.com)
+- Don't include proxy client cert when <3.1 or <1.1 (abutcher@redhat.com)
+- Add proxy client certs to master config. (abutcher@redhat.com)
+- Update imagestreams and quickstarts from origin (sdodson@redhat.com)
+- Get default values from openshift_facts (spinolacastro@gmail.com)
+- Cleanup (spinolacastro@gmail.com)
+- Add missing inventory example (spinolacastro@gmail.com)
+- Custom Project Config (spinolacastro@gmail.com)
+
+* Mon Oct 19 2015 Troy Dawson <tdawson@redhat.com> 3.0.2-1
+- Initial Package
+

+ 29 - 0
playbooks/adhoc/atomic_openshift_tutorial_reset.yml

@@ -0,0 +1,29 @@
+# This deletes *ALL* Docker images, and uninstalls OpenShift and
+# Atomic Enterprise RPMs.  It is primarily intended for use
+# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
+
+- hosts:
+    - OSEv3:children
+
+  sudo: yes
+
+  tasks:
+    - shell: docker ps -a -q | xargs docker stop
+      changed_when: False
+      failed_when: False
+
+    - shell: docker ps -a -q| xargs docker rm
+      changed_when: False
+      failed_when: False
+
+    - shell:  docker images -q |xargs docker rmi
+      changed_when: False
+      failed_when: False
+
+    - user: name={{ item }} state=absent remove=yes
+      with_items:
+        - alice
+        - joe

+ 159 - 0
playbooks/adhoc/create_pv/create_pv.yaml

@@ -0,0 +1,159 @@
+---
+#example run: 
+# ansible-playbook -e "cli_volume_size=1" \
+#                  -e "cli_device_name=/dev/xvdf" \
+#                  -e "cli_hosttype=master" \
+#                  -e "cli_environment=ops" \
+#                  create_pv.yaml
+# FIXME: we need to change "environment" to "clusterid" as that's what it really is now.
+#
+- name: Create a volume and attach it to master
+  hosts: localhost
+  gather_facts: no
+  vars:
+    cli_volume_type: gp2
+    cli_volume_iops: ''
+    oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
+                 intersect(groups['tag_environment_' ~ cli_environment]) |
+                 first }}"
+  pre_tasks:
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_volume_size
+    - cli_device_name
+    - cli_hosttype
+    - cli_environment
+
+  - name: set oo_name fact
+    set_fact:
+      oo_name: "{{ oo_name }}"
+
+
+  - name: Select a single master to run this on
+    add_host:
+      hostname: "{{ oo_name }}"
+      ansible_ssh_host: "{{ hostvars[oo_name].ec2_public_dns_name }}"
+      groups: oo_master
+
+  - name: Create a volume and attach it
+    ec2_vol:
+      state: present
+      instance: "{{ hostvars[oo_name]['ec2_id'] }}"
+      region: "{{ hostvars[oo_name]['ec2_region'] }}"
+      volume_size: "{{ cli_volume_size }}"
+      volume_type: "{{ cli_volume_type }}"
+      device_name: "{{ cli_device_name }}"
+      iops: "{{ cli_volume_iops }}"
+    register: vol
+
+  - debug: var=vol
+
+  - name: tag the vol with a name
+    ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}}
+    args:
+      tags:
+        Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
+        env: "{{cli_environment}}"
+    register: voltags
+
+  - debug: var=voltags
+
+- name: Configure the drive
+  gather_facts: no
+  hosts: oo_master
+  user: root
+  connection: ssh
+  vars:
+    pv_tmpdir: /tmp/persistentvolumes
+
+  post_tasks:
+  - name: Setting facts for template
+    set_fact:
+      pv_name: "pv-{{cli_volume_size}}-{{ hostvars[hostvars.localhost.oo_name]['ec2_tag_Name'] }}-{{hostvars.localhost.vol.volume_id }}"
+      vol_az: "{{ hostvars[hostvars.localhost.oo_name]['ec2_placement'] }}"
+      vol_id: "{{ hostvars.localhost.vol.volume_id }}"
+      vol_size: "{{ cli_volume_size }}"
+      pv_mntdir: "{{ pv_tmpdir }}/mnt-{{ 1000 | random }}"
+
+  - set_fact:
+      pv_template: "{{ pv_tmpdir }}/{{ pv_name }}.yaml"
+
+  - name: "Mkdir {{ pv_tmpdir }}"
+    file:
+      state: directory
+      path: "{{ pv_tmpdir }}"
+      mode: '0750'
+
+  - name: "Mkdir {{ pv_mntdir }}"
+    file:
+      state: directory
+      path: "{{ pv_mntdir }}"
+      mode: '0750'
+
+  - name: Create pv file from template
+    template:
+      src: ./pv-template.j2
+      dest: "{{ pv_template }}"
+      owner: root
+      mode: '0640'
+
+  - name: mkfs
+    filesystem:
+      dev: "{{ cli_device_name }}"
+      fstype: ext4
+    
+  - name: Mount the dev
+    mount:
+      name: "{{ pv_mntdir }}"
+      src: "{{ cli_device_name }}"
+      fstype: ext4
+      state: mounted
+
+  - name: chgrp g+rwXs
+    file: 
+      path: "{{ pv_mntdir }}"
+      mode: 'g+rwXs'
+      recurse: yes
+      seuser: system_u
+      serole: object_r
+      setype: svirt_sandbox_file_t
+      selevel: s0
+
+  - name: umount
+    mount:
+      name: "{{ pv_mntdir }}"
+      src: "{{ cli_device_name }}"
+      state: unmounted
+      fstype: ext4
+
+  - name: remove from fstab
+    mount:
+      name: "{{ pv_mntdir }}"
+      src: "{{ cli_device_name }}"
+      state: absent
+      fstype: ext4
+
+  - name: detach drive
+    delegate_to: localhost
+    ec2_vol:
+      region: "{{ hostvars[hostvars.localhost.oo_name].ec2_region }}"
+      id: "{{ hostvars.localhost.vol.volume_id }}"
+      instance: None
+
+  - name: "Remove {{ pv_mntdir }}"
+    file:
+      state: absent
+      path: "{{ pv_mntdir }}"
+
+  # We have to use the shell module because we can't set env vars with the command module.
+  - name: "Place PV into oc"
+    shell: "KUBECONFIG=/etc/openshift/master/admin.kubeconfig oc create -f {{ pv_template | quote }}"
+    register: oc_output
+
+  - debug: var=oc_output
+
+  - fail: 
+      msg: "Failed to add {{ pv_template }} to master."
+    when: oc_output.rc != 0

+ 16 - 0
playbooks/adhoc/create_pv/pv-template.j2

@@ -0,0 +1,16 @@
+---
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: {{ pv_name }}
+  labels:
+    type: ebs
+spec:
+  capacity:
+    storage: {{ vol_size }}Gi
+  accessModes:
+    - ReadWriteOnce
+  persistentVolumeReclaimPolicy: Recycle
+  awsElasticBlockStore:
+    volumeID: aws://{{ vol_az }}/{{ vol_id }}
+    fsType: ext4

+ 2 - 0
playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup

@@ -0,0 +1,2 @@
+DEVS=/dev/xvdb
+VG=docker_vg

+ 142 - 0
playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml

@@ -0,0 +1,142 @@
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker)
+#  in AWS.  This adds an additional EBS volume and creates the Volume Group on this EBS volume to use.
+#
+#  To run:
+#  1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+#    export AWS_ACCESS_KEY_ID='XXXXX'
+#    export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+#   ansible-playbook -e 'cli_tag_name=<tag-name>' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+#  Example:
+#   ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+#  Notes:
+#  * By default this will do a 30GB volume.
+#  * iops are calculated by Disk Size * 30.  e.g ( 30GB * 30) = 900 iops
+#  * This will remove /var/lib/docker!
+#  * You may need to re-deploy docker images after this is run (like monitoring)
+#
+
+- name: Fix docker to have a provisioned iops drive
+  hosts: "tag_Name_{{ cli_tag_name }}"
+  user: root
+  connection: ssh
+  gather_facts: no
+
+  vars:
+    cli_volume_type: gp2
+    cli_volume_size: 30
+
+  pre_tasks:
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_tag_name
+    - cli_volume_size
+
+  - debug:
+      var: hosts
+
+  - name: start docker
+    service:
+      name: docker
+      state: started
+
+  - name: Determine if loopback
+    shell: docker info | grep 'Data file:.*loop'
+    register: loop_device_check
+    ignore_errors: yes
+
+  - debug:
+      var: loop_device_check
+
+  - name: fail if we don't detect loopback
+    fail:
+      msg:  loopback not detected! Please investigate manually.
+    when: loop_device_check.rc == 1
+
+  - name: stop zagg client monitoring container
+    service:
+      name: oso-rhel7-zagg-client
+      state: stopped
+    ignore_errors: yes
+
+  - name: stop pcp client monitoring container
+    service:
+      name: oso-f22-host-monitoring
+      state: stopped
+    ignore_errors: yes
+
+  - name: stop docker
+    service:
+      name: docker
+      state: stopped
+
+  - name: delete /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: remove /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: check to see if /dev/xvdb exists
+    command: test -e /dev/xvdb
+    register: xvdb_check
+    ignore_errors: yes
+
+  - debug: var=xvdb_check
+
+  - name: fail if /dev/xvdb already exists
+    fail:
+      msg: /dev/xvdb already exists.  Please investigate
+    when: xvdb_check.rc == 0
+
+  - name: Create a volume and attach it
+    delegate_to: localhost
+    ec2_vol:
+      state: present
+      instance: "{{ ec2_id }}"
+      region: "{{ ec2_region }}"
+      volume_size: "{{ cli_volume_size | default(30, True)}}"
+      volume_type: "{{ cli_volume_type }}"
+      device_name: /dev/xvdb
+    register: vol
+
+  - debug: var=vol
+
+  - name: tag the vol with a name
+    delegate_to: localhost
+    ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }}
+    args:
+      tags:
+        Name: "{{ ec2_tag_Name }}"
+        env: "{{ ec2_tag_environment }}"
+    register: voltags
+
+  - name: Wait for volume to attach
+    pause:
+      seconds: 30
+
+  - name: copy the docker-storage-setup config file
+    copy:
+      src: docker-storage-setup
+      dest: /etc/sysconfig/docker-storage-setup
+      owner: root
+      group: root
+      mode: 0664
+
+  - name: docker storage setup
+    command: docker-storage-setup
+    register: setup_output
+
+  - debug: var=setup_output
+
+  - name: start docker
+    command: systemctl start docker.service
+    register: dockerstart
+
+  - debug: var=dockerstart
+

+ 115 - 0
playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml

@@ -0,0 +1,115 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+#  To run:
+#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+#  Example:
+#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+#  Notes:
+#  * This will remove /var/lib/docker!
+#  * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+  hosts: "{{ cli_name }}"
+  user: root
+  connection: ssh
+  gather_facts: no
+
+  pre_tasks:
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_docker_device
+
+  - name: start docker
+    service:
+      name: docker
+      state: started
+
+  - name: Determine if loopback
+    shell: docker info | grep 'Data file:.*loop'
+    register: loop_device_check
+    ignore_errors: yes
+
+  - debug:
+      var: loop_device_check
+
+  - name: fail if we don't detect loopback
+    fail:
+      msg:  loopback not detected! Please investigate manually.
+    when: loop_device_check.rc == 1
+
+  - name: stop zagg client monitoring container
+    service:
+      name: oso-rhel7-zagg-client
+      state: stopped
+    ignore_errors: yes
+
+  - name: stop pcp client monitoring container
+    service:
+      name: oso-f22-host-monitoring
+      state: stopped
+    ignore_errors: yes
+
+  - name: "check to see if {{ cli_docker_device }} exists"
+    command: "test -e {{ cli_docker_device }}"
+    register: docker_dev_check
+    ignore_errors: yes
+
+  - debug: var=docker_dev_check
+
+  - name: "fail if {{ cli_docker_device }} doesn't exist"
+    fail:
+      msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+    when: docker_dev_check.rc != 0
+
+  - name: stop docker
+    service:
+      name: docker
+      state: stopped
+
+  - name: delete /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: remove /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: copy the docker-storage-setup config file
+    copy:
+      content: >
+        DEVS={{ cli_docker_device }}
+        VG=docker_vg
+      dest: /etc/sysconfig/docker-storage-setup
+      owner: root
+      group: root
+      mode: 0664
+
+  - name: docker storage setup
+    command: docker-storage-setup
+    register: setup_output
+
+  - debug: var=setup_output
+
+  - name: extend the vg
+    command: lvextend -l 90%VG /dev/docker_vg/docker-pool
+    register: extend_output
+
+  - debug: var=extend_output
+
+  - name: start docker
+    service:
+      name: docker
+      state: restarted
+
+  - name: docker info
+    command: docker info
+    register: dockerinfo
+
+  - debug: var=dockerinfo

+ 69 - 0
playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml

@@ -0,0 +1,69 @@
+---
+# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues.
+#
+#  To run:
+#
+#  1. run the playbook:
+#
+#   ansible-playbook -e 'cli_tag_name=<tag-name>' docker_storage_cleanup.yml
+#
+#  Example:
+#
+#   ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml
+#
+#  Notes:
+#  *  This *should* not interfere with running docker images
+#
+
+- name: Clean up Docker Storage
+  gather_facts: no
+  hosts: "tag_Name_{{ cli_tag_name }}"
+  user: root
+  connection: ssh
+
+  pre_tasks:
+
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_tag_name
+
+  - name: Ensure docker is running
+    service:
+      name: docker
+      state: started
+      enabled: yes
+
+  - name: Get docker info
+    command: docker info
+    register: docker_info
+
+  - name: Show docker info
+    debug:
+      var: docker_info.stdout_lines
+
+  - name: Remove exited and dead containers
+    shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm"
+    ignore_errors: yes
+
+  - name: Remove dangling docker images
+    shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi"
+    ignore_errors: yes
+
+  - name: Remove non-running docker images
+    shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
+    ignore_errors: yes
+
+  # leaving off the '-t' for docker exec.  With it, it doesn't work with ansible and tty support
+  - name: update zabbix docker items
+    command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
+
+  # Get and show docker info again.
+  - name: Get docker info
+    command: docker info
+    register: docker_info
+
+  - name: Show docker info
+    debug:
+      var: docker_info.stdout_lines

+ 41 - 0
playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py

@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+    ''' Custom ansible filters '''
+
+    @staticmethod
+    def oo_pdb(arg):
+        ''' This pops you into a pdb instance where arg is the data passed in
+            from the filter.
+            Ex: "{{ hostvars | oo_pdb }}"
+        '''
+        pdb.set_trace()
+        return arg
+
+    @staticmethod
+    def translate_volume_name(volumes, target_volume):
+        '''
+            This filter matches a device string /dev/sdX to /dev/xvdX
+            It will then return the AWS volume ID
+        '''
+        for vol in volumes:
+            translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+            if target_volume.startswith(translated_name):
+                return vol["id"]
+
+        return None
+
+
+    def filters(self):
+        ''' returns a mapping of filters to methods '''
+        return {
+            "translate_volume_name": self.translate_volume_name,
+        }

+ 206 - 0
playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml

@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+#  * add a new volume
+#  * add volume to the existing VG.
+#  * pv move to the new volume.
+#  * remove old volume
+#  * detach volume
+#  * mark old volume in AWS with "REMOVE ME" tag
+#  * grow docker LVM to 90% of the VG
+#
+#  To run:
+#  1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+#    export AWS_ACCESS_KEY_ID='XXXXX'
+#    export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+#   ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+#  Example:
+#   ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+#  Notes:
+#  * By default this will do a 55GB GP2 volume.  The can be overidden with the "-e 'cli_volume_size=100'" variable
+#  * This does a GP2 by default.  Support for Provisioned IOPS has not been added
+#  * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+#  * This can be done with NO downtime on the host
+#  * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool".  This is
+#      the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+  hosts: "tag_Name_{{ cli_tag_name }}"
+  user: root
+  connection: ssh
+  gather_facts: no
+
+  vars:
+    cli_volume_type: gp2
+    cli_volume_size: 55
+#    cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+  pre_tasks:
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_tag_name
+    - cli_volume_size
+
+  - debug:
+      var: hosts
+
+  - name: start docker
+    service:
+      name: docker
+      state: started
+
+  - name: Determine if Storage Driver (docker info) is devicemapper
+    shell: docker info | grep 'Storage Driver:.*devicemapper'
+    register: device_mapper_check
+    ignore_errors: yes
+
+  - debug:
+      var: device_mapper_check
+
+  - name: fail if we don't detect devicemapper
+    fail:
+      msg:  The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+    when: device_mapper_check.rc == 1
+
+  # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test
+  # and find the volume group.
+  - name: Attempt to find the Volume Group that docker is using
+    shell: lvs | grep docker-pool | awk '{print $2}'
+    register: docker_vg_name
+    ignore_errors: yes
+
+  - debug:
+      var: docker_vg_name
+
+  - name: fail if we don't find a docker volume group
+    fail:
+      msg:  Unable to find docker volume group. Please investigate manually.
+    when: docker_vg_name.stdout_lines|length != 1
+
+  # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test
+  # and find the physical volume.
+  - name: Attempt to find the Phyisical Volume that docker is using
+    shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+    register: docker_pv_name
+    ignore_errors: yes
+
+  - debug:
+      var: docker_pv_name
+
+  - name: fail if we don't find a docker physical volume
+    fail:
+      msg:  Unable to find docker physical volume. Please investigate manually.
+    when: docker_pv_name.stdout_lines|length != 1
+
+
+  - name: get list of volumes from AWS
+    delegate_to: localhost
+    ec2_vol:
+      state: list
+      instance: "{{ ec2_id }}"
+      region: "{{ ec2_region }}"
+    register: attached_volumes
+
+  - debug: var=attached_volumes
+
+  - name: get volume id of current docker volume
+    set_fact:
+      old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+  - debug: var=old_docker_volume_id
+
+  - name: check to see if /dev/xvdc exists
+    command: test -e /dev/xvdc
+    register: xvdc_check
+    ignore_errors: yes
+
+  - debug: var=xvdc_check
+
+  - name: fail if /dev/xvdc already exists
+    fail:
+      msg: /dev/xvdc already exists.  Please investigate
+    when: xvdc_check.rc == 0
+
+  - name: Create a volume and attach it
+    delegate_to: localhost
+    ec2_vol:
+      state: present
+      instance: "{{ ec2_id }}"
+      region: "{{ ec2_region }}"
+      volume_size: "{{ cli_volume_size | default(30, True)}}"
+      volume_type: "{{ cli_volume_type }}"
+      device_name: /dev/xvdc
+    register: create_volume
+
+  - debug: var=create_volume
+
+  - name: Fail when problems creating volumes and attaching
+    fail:
+      msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+    when: create_volume.msg is defined
+
+  - name: tag the vol with a name
+    delegate_to: localhost
+    ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+    args:
+      tags:
+        Name: "{{ ec2_tag_Name }}"
+        env: "{{ ec2_tag_environment }}"
+    register: voltags
+
+  - name: check for attached drive
+    command: test -b /dev/xvdc
+    register: attachment_check
+    until: attachment_check.rc == 0
+    retries: 30
+    delay: 2
+
+  - name: partition the new drive and make it lvm
+    command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+  - name: pvcreate /dev/xvdc
+    command: pvcreate /dev/xvdc1
+
+  - name: Extend the docker volume group
+    command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+  - name: pvmove onto new volume
+    command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+    async: 43200
+    poll: 10
+
+  - name: Remove the old docker drive from the volume group
+    command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+  - name: Remove the pv from the old drive
+    command: "pvremove {{ docker_pv_name.stdout }}"
+
+  - name: Extend the docker lvm
+    command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+  - name: detach  old docker volume
+    delegate_to: localhost
+    ec2_vol:
+      region: "{{ ec2_region }}"
+      id: "{{ old_docker_volume_id }}"
+      instance: None
+
+  - name: tag the old vol valid label
+    delegate_to: localhost
+    ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+    args:
+      tags:
+        Name: "{{ ec2_tag_Name }} REMOVE ME"
+    register: voltags
+
+  - name: Update the /etc/sysconfig/docker-storage-setup with new device
+    lineinfile:
+      dest: /etc/sysconfig/docker-storage-setup
+      regexp: ^DEVS=
+      line: DEVS=/dev/xvdc

+ 55 - 0
playbooks/adhoc/noc/create_host.yml

@@ -0,0 +1,55 @@
+---
+- name: 'Create a host object in zabbix'
+  hosts: localhost
+  gather_facts: no
+  roles:
+    - os_zabbix
+  post_tasks:
+
+    - zbxapi:
+        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+        zbx_class: Template
+        state: list
+        params:
+          host: ctr_test_kwoodson
+          filter:
+            host:
+            -  ctr_kwoodson_test_tmpl
+
+      register: tmpl_results
+
+    - debug: var=tmpl_results
+
+#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
+- name: 'Create a host object in zabbix'
+  hosts: localhost
+  gather_facts: no
+  roles:
+    - os_zabbix
+  post_tasks:
+
+    - zbxapi:
+        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+        zbx_class: Host
+        state: absent
+        params:
+          host: ctr_test_kwoodson
+          interfaces:
+          - type: 1
+            main: 1
+            useip: 1
+            ip: 127.0.0.1
+            dns: ""
+            port: 10050
+          groups:
+          - groupid: 1
+          templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
+          output: extend
+          filter:
+            host:
+            -  ctr_test_kwoodson
+
+      register: host_results
+
+    - debug: var=host_results
+

+ 36 - 0
playbooks/adhoc/noc/create_maintenance.yml

@@ -0,0 +1,36 @@
+---
+#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
+- name: 'Create a maintenace object in zabbix'
+  hosts: localhost
+  gather_facts: no
+  roles:
+    - os_zabbix
+  vars:
+    oo_hostids: ''
+    oo_groupids: ''
+  post_tasks:
+    - assert:
+        that: oo_desc is defined
+
+    - zbxapi:
+        server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
+        zbx_class: Maintenance
+        state: present
+        params:
+          name: "{{ oo_name }}"
+          description: "{{ oo_desc }}"
+          active_since: "{{ oo_start }}"
+          active_till: "{{ oo_stop }}"
+          maintenance_type: "0"
+          output: extend
+          hostids: "{{ oo_hostids.split(',') | default([]) }}"
+#groupids: "{{ oo_groupids.split(',') | default([]) }}"
+          timeperiods:
+          - start_time: "{{ oo_start }}"
+            period: "{{ oo_stop }}"
+          selectTimeperiods: extend
+
+      register: maintenance
+
+    - debug: var=maintenance
+

+ 1 - 1
playbooks/adhoc/noc/get_zabbix_problems.yml

@@ -11,7 +11,7 @@
     - zbxapi:
         server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
         zbx_class: Trigger
-        action: get
+        state: list
         params:
           only_true: true
           output: extend

+ 20 - 0
playbooks/adhoc/s3_registry/s3_registry.j2

@@ -0,0 +1,20 @@
+version: 0.1
+log:
+  level: debug
+http:
+  addr: :5000
+storage:
+  cache:
+    layerinfo: inmemory
+  s3:
+    accesskey: {{ aws_access_key }}
+    secretkey: {{ aws_secret_key }}
+    region: us-east-1
+    bucket: {{ clusterid }}-docker
+    encrypt: true
+    secure: true
+    v4auth: true
+    rootdirectory: /registry
+middleware:
+  repository:
+    - name: openshift

+ 71 - 0
playbooks/adhoc/s3_registry/s3_registry.yml

@@ -0,0 +1,71 @@
+---
+# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
+# Usage:
+#  ansible-playbook s3_registry.yml -e clusterid="mycluster"
+#
+# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
+# The 'clusterid' is the short name of your cluster.
+
+- hosts: tag_env-host-type_{{ clusterid }}-openshift-master
+  remote_user: root
+  gather_facts: False
+
+  vars:
+    aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+    aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+
+  tasks:
+
+  - name: Check for AWS creds
+    fail: 
+      msg: "Couldn't find {{ item }} creds in ENV"
+    when: "{{ item }} == ''"
+    with_items:
+    - aws_access_key
+    - aws_secret_key
+
+  - name: Scale down registry
+    command: oc scale --replicas=0 dc/docker-registry
+
+  - name: Create S3 bucket
+    local_action:
+      module: s3 bucket="{{ clusterid }}-docker" mode=create
+
+  - name: Set up registry environment variable
+    command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
+
+  - name: Generate docker registry config
+    template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
+
+  - name: Determine if new secrets are needed
+    command: oc get secrets
+    register: secrets
+
+  - name: Create registry secrets
+    command: oc secrets new dockerregistry /root/config.yml
+    when: "'dockerregistry' not in secrets.stdout"
+
+  - name: Determine if service account contains secrets
+    command: oc describe serviceaccount/registry
+    register: serviceaccount
+
+  - name: Add secrets to registry service account
+    command: oc secrets add serviceaccount/registry secrets/dockerregistry
+    when: "'dockerregistry' not in serviceaccount.stdout"
+
+  - name: Determine if deployment config contains secrets
+    command: oc volume dc/docker-registry --list
+    register: dc
+
+  - name: Add secrets to registry deployment config
+    command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
+    when: "'dockersecrets' not in dc.stdout"
+
+  - name: Wait for deployment config to take effect before scaling up
+    pause: seconds=30
+
+  - name: Scale up registry
+    command: oc scale --replicas=1 dc/docker-registry
+
+  - name: Delete temporary config file
+    file: path=/root/config.yml state=absent

+ 145 - 0
playbooks/adhoc/uninstall.yml

@@ -0,0 +1,145 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible.  This includes:
+#
+#    configuration
+#    containers
+#    example templates and imagestreams
+#    images
+#    RPMs
+---
+- hosts:
+    - OSEv3:children
+
+  sudo: yes
+
+  tasks:
+    - name: Detecting Operating System
+      shell: ls /run/ostree-booted
+      ignore_errors: yes
+      failed_when: false
+      register: ostree_output
+
+    - set_fact:
+        is_atomic: "{{ ostree_output.rc == 0 }}"
+
+    - service: name={{ item }} state=stopped
+      with_items:
+        - atomic-enterprise-master
+        - atomic-enterprise-node
+        - atomic-openshift-master
+        - atomic-openshift-master-api
+        - atomic-openshift-master-controllers
+        - atomic-openshift-node
+        - etcd
+        - openshift-master
+        - openshift-master-api
+        - openshift-master-controllers
+        - openshift-node
+        - openvswitch
+        - origin-master
+        - origin-master-api
+        - origin-master-controllers
+        - origin-node
+
+    - yum: name={{ item }} state=absent
+      when: not is_atomic | bool
+      with_items:
+        - atomic-enterprise
+        - atomic-enterprise-master
+        - atomic-enterprise-node
+        - atomic-enterprise-sdn-ovs
+        - atomic-openshift
+        - atomic-openshift-clients
+        - atomic-openshift-master
+        - atomic-openshift-node
+        - atomic-openshift-sdn-ovs
+        - etcd
+        - openshift
+        - openshift-master
+        - openshift-node
+        - openshift-sdn
+        - openshift-sdn-ovs
+        - openvswitch
+        - origin
+        - origin-master
+        - origin-node
+        - origin-sdn-ovs
+        - tuned-profiles-atomic-enterprise-node
+        - tuned-profiles-atomic-openshift-node
+        - tuned-profiles-openshift-node
+        - tuned-profiles-origin-node
+
+    - shell: systemctl reset-failed
+      changed_when: False
+
+    - shell: systemctl daemon-reload
+      changed_when: False
+
+    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+      changed_when: False
+
+    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+      changed_when: False
+
+    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+      changed_when: False
+
+    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node 
+      changed_when: False
+      failed_when: False
+      with_items:
+        - openshift-enterprise
+        - atomic-enterprise
+        - origin
+
+    - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}'
+      changed_when: False
+      failed_when: False
+      register: exited_containers_to_delete
+      with_items:
+        - aep3/aep
+        - openshift3/ose
+        - openshift/origin
+
+    - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+      changed_when: False
+      failed_when: False
+      with_items: "{{ exited_containers_to_delete.results }}"
+
+    - shell: docker images | grep {{ item }} | awk '{ print $3 }'
+      changed_when: False
+      failed_when: False
+      register: images_to_delete
+      with_items:
+        - registry.access.redhat.com/openshift3
+        - registry.access.redhat.com/aep3
+        - docker.io/openshift
+
+    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+      changed_when: False
+      failed_when: False
+      with_items: "{{ images_to_delete.results }}"
+
+    - file: path={{ item }} state=absent
+      with_items:
+        - /etc/ansible/facts.d/openshift.fact
+        - /etc/atomic-enterprise
+        - /etc/etcd
+        - /etc/openshift
+        - /etc/openshift-sdn
+        - /etc/origin
+        - /etc/sysconfig/atomic-enterprise-master
+        - /etc/sysconfig/atomic-enterprise-node
+        - /etc/sysconfig/atomic-openshift-master
+        - /etc/sysconfig/atomic-openshift-node
+        - /etc/sysconfig/openshift-master
+        - /etc/sysconfig/openshift-node
+        - /etc/sysconfig/origin-master
+        - /etc/sysconfig/origin-node
+        - /root/.kube
+        - "~{{ ansible_ssh_user }}/.kube"
+        - /usr/share/openshift/examples
+        - /var/lib/atomic-enterprise
+        - /var/lib/etcd
+        - /var/lib/openshift
+        - /var/lib/origin

+ 21 - 0
playbooks/adhoc/upgrades/README.md

@@ -0,0 +1,21 @@
+# [NOTE]
+This playbook will re-run installation steps overwriting any local
+modifications. You should ensure that your inventory has been updated with any
+modifications you've made after your initial installation. If you find any items
+that cannot be configured via ansible please open an issue at
+https://github.com/openshift/openshift-ansible
+
+# Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies latest configuration by re-running the installation playbook
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+# Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml

+ 1 - 0
playbooks/adhoc/upgrades/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins/

+ 1 - 0
playbooks/adhoc/upgrades/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins/

+ 1 - 0
playbooks/adhoc/upgrades/roles

@@ -0,0 +1 @@
+../../../roles/

+ 138 - 0
playbooks/adhoc/upgrades/upgrade.yml

@@ -0,0 +1,138 @@
+---
+- name: Upgrade base package on masters
+  hosts: masters
+  roles:
+  - openshift_facts
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  tasks:
+    - name: Upgrade base package
+      yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=latest
+
+- name: Re-Run cluster configuration to apply latest configuration changes
+  include: ../../common/openshift-cluster/config.yml
+  vars:
+    g_etcd_group: "{{ 'etcd' }}"
+    g_masters_group: "{{ 'masters' }}"
+    g_nodes_group: "{{ 'nodes' }}"
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Upgrade masters
+  hosts: masters
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  tasks:
+    - name: Upgrade master packages
+      yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+    - name: Restart master services
+      service: name="{{ openshift.common.service_type}}-master" state=restarted
+
+- name: Upgrade nodes
+  hosts: nodes
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  tasks:
+    - name: Upgrade node packages
+      yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+    - name: Restart node services
+      service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Determine new master version
+  hosts: oo_first_master
+  tasks:
+    - name: Determine new version
+      command: >
+        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
+      register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+  hosts: oo_first_master
+  tasks:
+    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
+
+- name: Update cluster policy
+  hosts: oo_first_master
+  tasks:
+    - name: oadm policy reconcile-cluster-roles --confirm
+      command: >
+        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+        policy reconcile-cluster-roles --confirm
+
+- name: Update cluster policy bindings
+  hosts: oo_first_master
+  tasks:
+    - name: oadm policy reconcile-cluster-role-bindings --confirm
+      command: >
+        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+        policy reconcile-cluster-role-bindings
+        --exclude-groups=system:authenticated
+        --exclude-groups=system:unauthenticated
+        --exclude-users=system:anonymous
+        --additive-only=true --confirm
+      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
+
+- name: Upgrade default router
+  hosts: oo_first_master
+  vars:
+    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+  tasks:
+    - name: Check for default router
+      command: >
+        {{ oc_cmd }} get -n default dc/router
+      register: _default_router
+      failed_when: false
+      changed_when: false
+    - name: Check for allowHostNetwork and allowHostPorts
+      when: _default_router.rc == 0
+      shell: >
+        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+      register: _scc
+    - name: Grant allowHostNetwork and allowHostPorts
+      when:
+        - _default_router.rc == 0
+        - "'false' in _scc.stdout"
+      command: >
+        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+    - name: Update deployment config to 1.0.4/3.0.1 spec
+      when: _default_router.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/router -p
+        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+    - name: Switch to hostNetwork=true
+      when: _default_router.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+    - name: Update router image to current version
+      when: _default_router.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/router -p
+        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+- name: Upgrade default
+  hosts: oo_first_master
+  vars:
+    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
+    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+  tasks:
+    - name: Check for default registry
+      command: >
+          {{ oc_cmd }} get -n default dc/docker-registry
+      register: _default_registry
+      failed_when: false
+      changed_when: false
+    - name: Update registry image to current version
+      when: _default_registry.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/docker-registry -p
+        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+  hosts: oo_first_master
+  vars:
+    openshift_examples_import_command: "update"
+    openshift_deployment_type: "{{ deployment_type }}"
+  roles:
+    - openshift_examples

+ 58 - 0
playbooks/adhoc/zabbix_setup/clean_zabbix.yml

@@ -0,0 +1,58 @@
+---
+- hosts: localhost
+  gather_facts: no
+  vars:
+    g_server: http://localhost:8080/zabbix/api_jsonrpc.php
+    g_user: ''
+    g_password: ''
+
+  roles:
+  - lib_zabbix
+
+  post_tasks:
+  - name: CLEAN List template for heartbeat
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
+      state: list
+      name: 'Template Heartbeat'
+    register: templ_heartbeat
+
+  - name: CLEAN List template app zabbix server
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
+      state: list
+      name: 'Template App Zabbix Server'
+    register: templ_zabbix_server
+
+  - name: CLEAN List template app zabbix server
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
+      state: list
+      name: 'Template App Zabbix Agent'
+    register: templ_zabbix_agent
+
+  - name: CLEAN List all templates
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
+      state: list
+    register: templates
+
+  - debug: var=templ_heartbeat.results
+
+  - name: Remove templates if heartbeat template is missing
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
+      name: "{{ item }}"
+      state: absent
+    with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
+    when:  templ_heartbeat.results | length == 0

+ 1 - 0
playbooks/adhoc/zabbix_setup/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins/

+ 7 - 0
playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml

@@ -0,0 +1,7 @@
+#!/usr/bin/env ansible-playbook
+---
+- include: clean_zabbix.yml
+  vars:
+    g_server: http://localhost/zabbix/api_jsonrpc.php
+    g_user: Admin
+    g_password: zabbix

+ 13 - 0
playbooks/adhoc/zabbix_setup/oo-config-zaio.yml

@@ -0,0 +1,13 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+  gather_facts: no
+  vars:
+    g_server: http://localhost/zabbix/api_jsonrpc.php
+    g_user: Admin
+    g_password: zabbix
+  roles:
+  - role: os_zabbix
+    ozb_server: "{{ g_server }}"
+    ozb_user: "{{ g_user }}"
+    ozb_password: "{{ g_password }}"

playbooks/gce/openshift-node/roles → playbooks/adhoc/zabbix_setup/roles


+ 1 - 1
playbooks/aws/ansible-tower/launch.yml

@@ -6,7 +6,7 @@
 
   vars:
     inst_region: us-east-1
-    rhel7_ami: ami-78756d10
+    rhel7_ami: ami-9101c8fa
     user_data_file: user_data.txt
 
   vars_files:

+ 11 - 25
playbooks/aws/openshift-cluster/config.yml

@@ -1,37 +1,23 @@
 ---
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
+- hosts: localhost
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    when: "'tag_env-host-type_{{ cluster_id }}-openshift-master' in groups"
+  - set_fact:
+      g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+      g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
+    g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+    g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+    g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+    g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+    g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
+    openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"

+ 28 - 8
playbooks/aws/openshift-cluster/launch.yml

@@ -11,28 +11,48 @@
       msg: Deployment type not supported for aws provider yet
     when: deployment_type == 'enterprise'
 
+  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ etcd_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+      g_sub_host_type: "default"
+
   - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
   - include: tasks/launch_instances.yml
     vars:
       instances: "{{ master_names }}"
       cluster: "{{ cluster_id }}"
       type: "{{ k8s_type }}"
+      g_sub_host_type: "default"
 
   - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+    vars:
+      type: "compute"
+      count: "{{ num_nodes }}"
   - include: tasks/launch_instances.yml
     vars:
       instances: "{{ node_names }}"
       cluster: "{{ cluster_id }}"
       type: "{{ k8s_type }}"
+      g_sub_host_type: "{{ sub_host_type }}"
 
-  - set_fact:
-      a_master: "{{ master_names[0] }}"
-  - add_host: name={{ a_master }} groups=service_master
-
-- include: update.yml
+  - include: ../../common/openshift-cluster/set_node_launch_facts_tasks.yml
+    vars:
+      type: "infra"
+      count: "{{ num_infra }}"
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ node_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+      g_sub_host_type: "{{ sub_host_type }}"
 
-- include: ../../common/openshift-cluster/create_services.yml
-  vars:
-     g_svc_master: "{{ service_master }}"
+  - add_host:
+      name: "{{ master_names.0 }}"
+      groups: service_master
+    when: master_names is defined and master_names.0 is defined
 
+- include: update.yml
 - include: list.yml

+ 1 - 1
playbooks/aws/openshift-cluster/library/ec2_ami_find.py

@@ -158,7 +158,7 @@ EXAMPLES = '''
 # Launch an EC2 instance
 - ec2:
     image: "{{ ami_search.results[0].ami_id }}"
-    instance_type: m3.medium
+    instance_type: m4.medium
     key_name: mykey
     wait: yes
 '''

+ 1 - 0
playbooks/aws/openshift-cluster/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 76 - 3
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -5,6 +5,7 @@
     env: "{{ cluster }}"
     env_host_type: "{{ cluster }}-openshift-{{ type }}"
     host_type: "{{ type }}"
+    sub_host_type: "{{ g_sub_host_type }}"
 
 - set_fact:
     ec2_region: "{{ lookup('env', 'ec2_region')
@@ -34,6 +35,35 @@
     ec2_assign_public_ip: "{{ lookup('env', 'ec2_assign_public_ip')
                     | default(deployment_vars[deployment_type].assign_public_ip, true) }}"
   when: ec2_assign_public_ip is not defined
+
+- set_fact:
+    ec2_instance_type: "{{ ec2_master_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ ec2_master_security_groups
+                    | default(deployment_vars[deployment_type].security_groups, true) }}"
+  when: host_type == "master" and sub_host_type == "default"
+
+- set_fact:
+    ec2_instance_type: "{{ ec2_etcd_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ ec2_etcd_security_groups
+                    | default(deployment_vars[deployment_type].security_groups, true)}}"
+  when: host_type == "etcd" and sub_host_type == "default"
+
+- set_fact:
+    ec2_instance_type: "{{ ec2_infra_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ ec2_infra_security_groups
+                    | default(deployment_vars[deployment_type].security_groups, true) }}"
+  when: host_type == "node" and sub_host_type == "infra"
+
+- set_fact:
+    ec2_instance_type: "{{ ec2_node_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+    ec2_security_groups: "{{ ec2_node_security_groups
+                    | default(deployment_vars[deployment_type].security_groups, true) }}"
+  when: host_type == "node" and sub_host_type == "compute"
+
+- set_fact:
+    ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
+                          | default(deployment_vars[deployment_type].type, true) }}"
+  when: ec2_instance_type is not defined
 - set_fact:
     ec2_security_groups: "{{ lookup('env', 'ec2_security_groups')
                     | default(deployment_vars[deployment_type].security_groups, true) }}"
@@ -51,8 +81,17 @@
 
 - set_fact:
     latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
-    user_data: "{{ lookup('template', '../templates/user_data.j2') if type == 'node' else None | default('omit') }}"
+    user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
     volume_defs:
+      etcd:
+        root:
+          volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}"
+          device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}"
+        etcd:
+          volume_size: "{{ lookup('env', 'os_etcd_vol_size') | default(32, true) }}"
+          device_type: "{{ lookup('env', 'os_etcd_vol_type') | default('gp2', true) }}"
+          iops: "{{ lookup('env', 'os_etcd_vol_iops') | default(500, true) }}"
       master:
         root:
           volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
@@ -60,7 +99,7 @@
           iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
       node:
         root:
-          volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(25, true) }}"
+          volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
           device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}"
           iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}"
         docker:
@@ -90,6 +129,7 @@
       env: "{{ env }}"
       host-type: "{{ host_type }}"
       env-host-type: "{{ env_host_type }}"
+      sub-host-type: "{{ sub_host_type }}"
     volumes: "{{ volumes }}"
   register: ec2
 
@@ -103,7 +143,38 @@
       Name: "{{ item.0 }}"
 
 - set_fact:
-    instance_groups: tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }}
+    instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }},
+                    tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }},
+                    tag_sub-host-type_{{ sub_host_type }}"
+
+- set_fact:
+    node_label:
+      region: "{{ec2_region}}"
+      type: "{{sub_host_type}}"
+  when: host_type == "node"
+
+- set_fact:
+    node_label:
+      region: "{{ec2_region}}"
+      type: "{{host_type}}"
+  when: host_type != "node"
+
+- set_fact:
+    logrotate:
+        - name: syslog
+          path: "/var/log/cron
+                 \n/var/log/maillog
+                 \n/var/log/messages
+                 \n/var/log/secure
+                 \n/var/log/spooler \n"
+          options:
+            - daily
+            - rotate 7
+            - compress
+            - sharedscripts
+            - missingok
+          scripts:
+            postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
 
 - name: Add new instances groups and variables
   add_host:
@@ -114,6 +185,8 @@
     groups: "{{ instance_groups }}"
     ec2_private_ip_address: "{{ item.1.private_ip }}"
     ec2_ip_address: "{{ item.1.public_ip }}"
+    openshift_node_labels: "{{ node_label }}"
+    logrotate_scripts: "{{ logrotate }}"
   with_together:
   - instances
   - ec2.instances

+ 29 - 13
playbooks/aws/openshift-cluster/templates/user_data.j2

@@ -1,17 +1,25 @@
 #cloud-config
-yum_repos:
-  jdetiber-copr:
-    name: Copr repo for origin owned by jdetiber
-    baseurl: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/epel-7-$basearch/
-    skip_if_unavailable: true
-    gpgcheck: true
-    gpgkey: https://copr-be.cloud.fedoraproject.org/results/jdetiber/origin/pubkey.gpg
-    enabled: true
+{% if type =='etcd' %}
+cloud_config_modules:
+- disk_setup
+- mounts
 
-packages:
-- xfsprogs # can be dropped after docker-storage-setup properly requires it: https://github.com/projectatomic/docker-storage-setup/pull/8
-- docker-storage-setup
+mounts:
+- [ xvdb, /var/lib/etcd, xfs, "defaults" ]
+
+disk_setup:
+  xvdb:
+    table_type: mbr
+    layout: True
 
+fs_setup:
+- label: etcd_storage
+  filesystem: xfs
+  device: /dev/xvdb
+  partition: auto
+{% endif %}
+
+{% if type == 'node' %}
 mounts:
 - [ xvdb ]
 - [ ephemeral0 ]
@@ -23,7 +31,15 @@ write_files:
   path: /etc/sysconfig/docker-storage-setup
   owner: root:root
   permissions: '0644'
+{% endif %}
+
+{% if deployment_type == 'online' %}
+devices: ['/var'] # Workaround for https://bugs.launchpad.net/bugs/1455436
 
+disable_root: 0
+growpart:
+  mode: auto
+  devices: ['/var']
 runcmd:
-- systemctl daemon-reload
-- systemctl enable lvm2-lvmetad.service docker-storage-setup.service
+- xfs_growfs /var
+{% endif %}

+ 60 - 1
playbooks/aws/openshift-cluster/terminate.yml

@@ -13,4 +13,63 @@
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     with_items: groups[scratch_group] | default([]) | difference(['localhost'])
 
-- include: ../terminate.yml
+- name: Unsubscribe VMs
+  hosts: oo_hosts_to_terminate
+  roles:
+  - role: rhel_unsubscribe
+    when: deployment_type == "enterprise" and
+          ansible_distribution == "RedHat" and
+          lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
+            default('no', True) | lower in ['no', 'false']
+
+- name: Terminate instances
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars:
+    host_vars: "{{ hostvars
+                   | oo_select_keys(groups['oo_hosts_to_terminate']) }}"
+  tasks:
+    - name: Remove tags from instances
+      ec2_tag: resource={{ item.ec2_id }} region={{ item.ec2_region }} state=absent
+      args:
+        tags:
+          env: "{{ item['ec2_tag_env'] }}"
+          host-type: "{{ item['ec2_tag_host-type'] }}"
+          env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
+          sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}"
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    - name: Terminate instances
+      ec2:
+        state: absent
+        instance_ids: ["{{ item.ec2_id }}"]
+        region: "{{ item.ec2_region }}"
+      ignore_errors: yes
+      register: ec2_term
+      with_items: host_vars
+      when: "'oo_hosts_to_terminate' in groups"
+
+    # Fail if any of the instances failed to terminate with an error other
+    # than 403 Forbidden
+    - fail: msg=Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}
+      when: "'oo_hosts_to_terminate' in groups and item.failed and not item.msg | search(\"error: EC2ResponseError: 403 Forbidden\")"
+      with_items: ec2_term.results
+
+    - name: Stop instance if termination failed
+      ec2:
+        state: stopped
+        instance_ids: ["{{ item.item.ec2_id }}"]
+        region: "{{ item.item.ec2_region }}"
+      register: ec2_stop
+      when: "'oo_hosts_to_terminate' in groups and item.failed"
+      with_items: ec2_term.results
+
+    - name: Rename stopped instances
+      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+      args:
+        tags:
+          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+      with_items: ec2_stop.results
+      when: "'oo_hosts_to_terminate' in groups"

+ 3 - 1
playbooks/aws/openshift-cluster/update.yml

@@ -11,7 +11,9 @@
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"]) | default([])
+    with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
+                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
+                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 

+ 9 - 3
playbooks/aws/openshift-cluster/vars.online.int.yml

@@ -1,9 +1,15 @@
 ---
-ec2_image: ami-78756d10
+ec2_image: ami-9101c8fa
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.medium
+ec2_master_security_groups: [ 'integration', 'integration-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'integration', 'integration-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'integration', 'integration-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'integration', 'integration-etcd' ]
 ec2_vpc_subnet: subnet-987c0def
 ec2_assign_public_ip: yes

+ 9 - 3
playbooks/aws/openshift-cluster/vars.online.prod.yml

@@ -1,9 +1,15 @@
 ---
-ec2_image: ami-78756d10
+ec2_image: ami-9101c8fa
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.medium
+ec2_master_security_groups: [ 'production', 'production-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'production', 'production-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'production', 'production-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'production', 'production-etcd' ]
 ec2_vpc_subnet: subnet-987c0def
 ec2_assign_public_ip: yes

+ 9 - 3
playbooks/aws/openshift-cluster/vars.online.stage.yml

@@ -1,9 +1,15 @@
 ---
-ec2_image: ami-78756d10
+ec2_image: ami-9101c8fa
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
-ec2_instance_type: m3.large
-ec2_security_groups: [ 'int-v3' ]
+ec2_master_instance_type: t2.medium
+ec2_master_security_groups: [ 'stage', 'stage-master' ]
+ec2_infra_instance_type: c4.large
+ec2_infra_security_groups: [ 'stage', 'stage-infra' ]
+ec2_node_instance_type: m4.large
+ec2_node_security_groups: [ 'stage', 'stage-node' ]
+ec2_etcd_instance_type: m4.large
+ec2_etcd_security_groups: [ 'stage', 'stage-etcd' ]
 ec2_vpc_subnet: subnet-987c0def
 ec2_assign_public_ip: yes

+ 6 - 6
playbooks/aws/openshift-cluster/vars.yml

@@ -1,14 +1,14 @@
 ---
 deployment_vars:
   origin:
-    # fedora, since centos requires marketplace
-    image: ami-acd999c4
+    # centos-7, requires marketplace
+    image: ami-96a818fe
     image_name:
     region: us-east-1
-    ssh_user: fedora
+    ssh_user: centos
     sudo: yes
     keypair: libra
-    type: m3.large
+    type: m4.large
     security_groups: [ 'public' ]
     vpc_subnet:
     assign_public_ip:
@@ -20,7 +20,7 @@ deployment_vars:
     ssh_user: root
     sudo: no
     keypair: libra
-    type: m3.large
+    type: m4.large
     security_groups: [ 'public' ]
     vpc_subnet:
     assign_public_ip:
@@ -32,7 +32,7 @@ deployment_vars:
     ssh_user: ec2-user
     sudo: yes
     keypair: libra
-    type: m3.large
+    type: m4.large
     security_groups: [ 'public' ]
     vpc_subnet:
     assign_public_ip:

+ 0 - 19
playbooks/aws/openshift-master/config.yml

@@ -1,19 +0,0 @@
----
-- name: Populate oo_masters_to_config host group
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_masters_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_masters_to_config
-      ansible_ssh_user: root
-    with_items: oo_host_group_exp | default([])
-
-- include: ../../common/openshift-master/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_hostname: "{{ ec2_private_ip_address }}"
-    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 0 - 1
playbooks/aws/openshift-master/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 70
playbooks/aws/openshift-master/launch.yml

@@ -1,70 +0,0 @@
----
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-
-# TODO: modify g_ami based on deployment_type
-  vars:
-    inst_region: us-east-1
-    g_ami: ami-86781fee
-    user_data_file: user_data.txt
-
-  tasks:
-    - name: Launch instances
-      ec2:
-        state: present
-        region: "{{ inst_region }}"
-        keypair: libra
-        group: ['public']
-        instance_type: m3.large
-        image: "{{ g_ami }}"
-        count: "{{ oo_new_inst_names | length }}"
-        user_data: "{{ lookup('file', user_data_file) }}"
-        wait: yes
-      register: ec2
-
-    - name: Add new instances public IPs to the host group
-      add_host: "hostname={{ item.public_ip }} groupname=new_ec2_instances"
-      with_items: ec2.instances
-
-    - name: Add Name and environment tags to instances
-      ec2_tag: "resource={{ item.1.id }} region={{ inst_region }} state=present"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-      args:
-        tags:
-          Name: "{{ item.0 }}"
-
-    - name: Add other tags to instances
-      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
-      with_items: ec2.instances
-      args:
-        tags: "{{ oo_new_inst_tags }}"
-
-    - name: Add new instances public IPs to oo_masters_to_config
-      add_host:
-        hostname: "{{ item.0 }}"
-        ansible_ssh_host: "{{ item.1.dns_name }}"
-        groupname: oo_masters_to_config
-        ec2_private_ip_address: "{{ item.1.private_ip }}"
-        ec2_ip_address: "{{ item.1.public_ip }}"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-
-    - name: Wait for ssh
-      wait_for: port=22 host={{ item.dns_name }}
-      with_items: ec2.instances
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 1
playbooks/aws/openshift-master/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 2
playbooks/aws/openshift-master/terminate.yml

@@ -1,2 +0,0 @@
----
-- include: ../terminate.yml

+ 0 - 26
playbooks/aws/openshift-node/config.yml

@@ -1,26 +0,0 @@
----
-- name: Populate oo_nodes_to_config and oo_first_master host groups
-  hosts: localhost
-  gather_facts: no
-  tasks:
-  - name: Evaluate oo_nodes_to_config
-    add_host:
-      name: "{{ item }}"
-      groups: oo_nodes_to_config
-      ansible_ssh_user: root
-    with_items: oo_host_group_exp | default([])
-  - name: Evaluate oo_first_master
-    add_host:
-      name: "{{ groups['tag_env-host-type_' ~ cluster_id ~ '-openshift-master'][0] }}"
-      groups: oo_first_master
-      ansible_ssh_user: root
-
-
-- include: ../../common/openshift-node/config.yml
-  vars:
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_first_master: "{{ groups.oo_first_master.0 }}"
-    openshift_hostname: "{{ ec2_private_ip_address }}"
-    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 0 - 1
playbooks/aws/openshift-node/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 72
playbooks/aws/openshift-node/launch.yml

@@ -1,72 +0,0 @@
----
-- name: Launch instance(s)
-  hosts: localhost
-  connection: local
-  gather_facts: no
-
-# TODO: modify g_ami based on deployment_type
-  vars:
-    inst_region: us-east-1
-    g_ami: ami-86781fee
-    user_data_file: user_data.txt
-
-  tasks:
-    - name: Launch instances
-      ec2:
-        state: present
-        region: "{{ inst_region }}"
-        keypair: libra
-        group: ['public']
-        instance_type: m3.large
-        image: "{{ g_ami }}"
-        count: "{{ oo_new_inst_names | length }}"
-        user_data: "{{ lookup('file', user_data_file) }}"
-        wait: yes
-      register: ec2
-
-    - name: Add new instances public IPs to the host group
-      add_host:
-        hostname: "{{ item.public_ip }}"
-        groupname: new_ec2_instances"
-      with_items: ec2.instances
-
-    - name: Add Name and environment tags to instances
-      ec2_tag: resource={{ item.1.id }} region={{ inst_region }} state=present
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-      args:
-        tags:
-          Name: "{{ item.0 }}"
-
-    - name: Add other tags to instances
-      ec2_tag: resource={{ item.id }} region={{ inst_region }} state=present
-      with_items: ec2.instances
-      args:
-        tags: "{{ oo_new_inst_tags }}"
-
-    - name: Add new instances public IPs to oo_nodes_to_config
-      add_host:
-        hostname: "{{ item.0 }}"
-        ansible_ssh_host: "{{ item.1.dns_name }}"
-        groupname: oo_nodes_to_config
-        ec2_private_ip_address: "{{ item.1.private_ip }}"
-        ec2_ip_address: "{{ item.1.public_ip }}"
-      with_together:
-        - oo_new_inst_names
-        - ec2.instances
-
-    - name: Wait for ssh
-      wait_for: port=22 host={{ item.dns_name }}
-      with_items: ec2.instances
-
-    - name: Wait for root user setup
-      command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null root@{{ item.dns_name }} echo root user is setup"
-      register: result
-      until: result.rc == 0
-      retries: 20
-      delay: 10
-      with_items: ec2.instances
-
-# Apply the configs, seprate so that just the configs can be run by themselves
-- include: config.yml

+ 0 - 1
playbooks/aws/openshift-node/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 2
playbooks/aws/openshift-node/terminate.yml

@@ -1,2 +0,0 @@
----
-- include: ../terminate.yml

+ 0 - 0
playbooks/aws/terminate.yml


برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است