Browse Source

fix merge conflicts

Diego Castro 9 years ago
parent
commit
f559eb3146
100 changed files with 2755 additions and 712 deletions
  1. 23 0
      DEPLOYMENT_TYPES.md
  2. 17 0
      Dockerfile
  3. 3 0
      README.md
  4. 15 0
      README_ANSIBLE_CONTAINER.md
  5. 7 15
      README_AWS.md
  6. 14 3
      README_GCE.md
  7. 3 16
      README_OSE.md
  8. 19 11
      README_libvirt.md
  9. 6 6
      README_openstack.md
  10. 2 16
      README_origin.md
  11. 4 5
      Vagrantfile
  12. 35 25
      bin/cluster
  13. 19 1
      bin/openshift-ansible-bin.spec
  14. 2 2
      bin/oscp
  15. 2 2
      bin/ossh
  16. 22 2
      filter_plugins/oo_filters.py
  17. 29 0
      filter_plugins/oo_zabbix_filters.py
  18. 1 1
      git/pylint.sh
  19. 27 6
      inventory/byo/hosts.example
  20. 6 3
      inventory/gce/hosts/gce.py
  21. 27 1
      inventory/openshift-ansible-inventory.spec
  22. 1 1
      inventory/openstack/hosts/nova.py
  23. 29 0
      playbooks/adhoc/atomic_openshift_tutorial_reset.yml
  24. 17 0
      playbooks/adhoc/create_pv/create_pv.yaml
  25. 2 0
      playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup
  26. 142 0
      playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
  27. 104 0
      playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
  28. 69 0
      playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
  29. 41 0
      playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
  30. 206 0
      playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
  31. 20 0
      playbooks/adhoc/s3_registry/s3_registry.j2
  32. 71 0
      playbooks/adhoc/s3_registry/s3_registry.yml
  33. 134 0
      playbooks/adhoc/uninstall.yml
  34. 21 0
      playbooks/adhoc/upgrades/README.md
  35. 1 0
      playbooks/adhoc/upgrades/filter_plugins
  36. 1 0
      playbooks/adhoc/upgrades/lookup_plugins
  37. 1 0
      playbooks/adhoc/upgrades/roles
  38. 128 0
      playbooks/adhoc/upgrades/upgrade.yml
  39. 32 25
      playbooks/adhoc/zabbix_setup/clean_zabbix.yml
  40. 0 57
      playbooks/adhoc/zabbix_setup/create_template.yml
  41. 1 1
      playbooks/adhoc/zabbix_setup/filter_plugins
  42. 7 0
      playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
  43. 13 0
      playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
  44. 1 1
      playbooks/adhoc/zabbix_setup/roles
  45. 0 38
      playbooks/adhoc/zabbix_setup/setup_zabbix.yml
  46. 0 11
      playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml
  47. 0 27
      playbooks/adhoc/zabbix_setup/vars/template_host.yml
  48. 0 27
      playbooks/adhoc/zabbix_setup/vars/template_master.yml
  49. 0 27
      playbooks/adhoc/zabbix_setup/vars/template_node.yml
  50. 0 90
      playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml
  51. 0 27
      playbooks/adhoc/zabbix_setup/vars/template_router.yml
  52. 1 1
      playbooks/aws/openshift-cluster/config.yml
  53. 0 5
      playbooks/aws/openshift-cluster/launch.yml
  54. 31 0
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  55. 1 1
      playbooks/aws/openshift-cluster/vars.online.int.yml
  56. 1 1
      playbooks/aws/openshift-cluster/vars.online.prod.yml
  57. 1 1
      playbooks/aws/openshift-cluster/vars.online.stage.yml
  58. 1 1
      playbooks/byo/openshift-cluster/config.yml
  59. 1 1
      playbooks/byo/openshift_facts.yml
  60. 0 8
      playbooks/common/openshift-cluster/create_services.yml
  61. 1 0
      playbooks/common/openshift-etcd/config.yml
  62. 36 7
      playbooks/common/openshift-master/config.yml
  63. 2 2
      playbooks/common/openshift-master/service.yml
  64. 8 13
      playbooks/common/openshift-node/config.yml
  65. 2 2
      playbooks/common/openshift-node/service.yml
  66. 6 1
      playbooks/gce/openshift-cluster/config.yml
  67. 49 0
      playbooks/gce/openshift-cluster/join_node.yml
  68. 17 16
      playbooks/gce/openshift-cluster/launch.yml
  69. 2 2
      playbooks/gce/openshift-cluster/list.yml
  70. 26 6
      playbooks/gce/openshift-cluster/tasks/launch_instances.yml
  71. 34 21
      playbooks/gce/openshift-cluster/terminate.yml
  72. 5 3
      playbooks/gce/openshift-cluster/vars.yml
  73. 1 1
      playbooks/libvirt/openshift-cluster/config.yml
  74. 8 0
      playbooks/libvirt/openshift-cluster/launch.yml
  75. 4 3
      playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
  76. 1 1
      playbooks/libvirt/openshift-cluster/templates/network.xml
  77. 1 1
      playbooks/libvirt/openshift-cluster/templates/user-data
  78. 1 1
      playbooks/openstack/openshift-cluster/config.yml
  79. 24 18
      playbooks/openstack/openshift-cluster/files/heat_stack.yaml
  80. 11 22
      playbooks/openstack/openshift-cluster/launch.yml
  81. 0 27
      playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml
  82. 2 6
      playbooks/openstack/openshift-cluster/vars.yml
  83. 1 1
      rel-eng/packages/openshift-ansible-bin
  84. 1 1
      rel-eng/packages/openshift-ansible-inventory
  85. 1 2
      roles/ansible_tower/tasks/main.yaml
  86. 5 0
      roles/cockpit/defaults/main.yml
  87. 15 0
      roles/cockpit/meta/main.yml
  88. 16 0
      roles/cockpit/tasks/main.yml
  89. 1 0
      roles/etcd/tasks/main.yml
  90. 2 0
      roles/etcd_ca/tasks/main.yml
  91. 7 3
      roles/fluentd_master/tasks/main.yml
  92. 38 0
      roles/lib_zabbix/README.md
  93. 3 0
      roles/lib_zabbix/library/__init__.py
  94. 538 0
      roles/lib_zabbix/library/zbx_action.py
  95. 41 59
      roles/os_zabbix/library/zbx_item.py
  96. 205 0
      roles/lib_zabbix/library/zbx_discoveryrule.py
  97. 20 19
      roles/os_zabbix/library/zbx_host.py
  98. 8 8
      roles/os_zabbix/library/zbx_hostgroup.py
  99. 250 0
      roles/lib_zabbix/library/zbx_item.py
  100. 0 0
      roles/lib_zabbix/library/zbx_itemprototype.py

+ 23 - 0
DEPLOYMENT_TYPES.md

@@ -0,0 +1,23 @@
+#Deployment Types
+
+This module supports OpenShift Origin, OpenShift Enterprise, and Atomic
+Enterprise Platform. Each deployment type sets various defaults used throughout
+your environment.
+
+The table below outlines the defaults per `deployment_type`.
+
+| deployment_type                                                 | origin                                   | enterprise (< 3.1)                     | atomic-enterprise                | openshift-enterprise (>= 3.1)    |
+|-----------------------------------------------------------------|------------------------------------------|----------------------------------------|----------------------------------|----------------------------------|
+| **openshift.common.service_type** (also used for package names) | origin                                   | openshift                              | atomic-openshift                 |                                  |
+| **openshift.common.config_base**                                | /etc/origin                              | /etc/openshift                         | /etc/origin                      | /etc/origin                      |
+| **openshift.common.data_dir**                                   | /var/lib/origin                          | /var/lib/openshift                     | /var/lib/origin                  | /var/lib/origin                  |
+| **openshift.master.registry_url openshift.node.registry_url**   | openshift/origin-${component}:${version} | openshift3/ose-${component}:${version} | aos3/aos-${component}:${version} | aos3/aos-${component}:${version} |
+| **Image Streams**                                               | centos                                   | rhel + xpaas                           | N/A                              | rhel                             |
+
+
+**NOTE** `enterprise` deloyment type is used for OpenShift Enterprise version
+3.0.x OpenShift Enterprise deployments utilizing version 3.1 and later will
+make use of the new `openshift-enterprise` deployment type.  Additional work to
+migrate between the two will be forthcoming.
+
+

+ 17 - 0
Dockerfile

@@ -0,0 +1,17 @@
+FROM rhel7
+
+MAINTAINER Aaron Weitekamp <aweiteka@redhat.com>
+
+RUN yum -y install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+
+# Not sure if all of these packages are necessary
+# only git and ansible are known requirements
+RUN yum install -y --enablerepo rhel-7-server-extras-rpms net-tools bind-utils git ansible
+
+ADD ./  /opt/openshift-ansible/
+
+ENTRYPOINT ["/usr/bin/ansible-playbook"]
+
+CMD ["/opt/openshift-ansible/playbooks/byo/config.yml"]
+
+LABEL RUN docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE

+ 3 - 0
README.md

@@ -37,6 +37,9 @@ This repo contains Ansible code for Openshift and Atomic Enterprise.
   - [roles/](roles) - shareable Ansible tasks
 
 ##Contributing
+- [Best Practices Guide](docs/best_practices_guide.adoc)
+- [Core Concepts](docs/core_concepts_guide.adoc)
+- [Style Guide](docs/style_guide.adoc)
 
 ###Feature Roadmap
 Our Feature Roadmap is available on the OpenShift Origin Infrastructure [Trello board](https://trello.com/b/nbkIrqKa/openshift-origin-infrastructure). All ansible items will be tagged with [installv3].

+ 15 - 0
README_ANSIBLE_CONTAINER.md

@@ -0,0 +1,15 @@
+# Running ansible in a docker container
+* Building ansible container:
+
+  ```sh
+  git clone https://github.com/openshift/openshift-ansible.git
+  cd openshift-ansible
+  docker build --rm -t ansible .
+  ```
+* Create /etc/ansible directory on the host machine and copy inventory file (hosts) into it.
+* Copy ssh public key of the host machine to master and nodes machines in the cluster.
+* Running the ansible container:
+
+  ```sh
+  docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible ansible
+  ```

+ 7 - 15
README_AWS.md

@@ -154,18 +154,10 @@ Note: If no deployment type is specified, then the default is origin.
 
 
 ## Post-ansible steps
-Create the default router
--------------------------
-On the master host:
-```sh
-oadm router --create=true \
-  --credentials=/etc/openshift/master/openshift-router.kubeconfig
-```
-
-Create the default docker-registry
-----------------------------------
-On the master host:
-```sh
-oadm registry --create=true \
-  --credentials=/etc/openshift/master/openshift-registry.kubeconfig
-```
+
+You should now be ready to follow the **What's Next?** section of the advanced installation guide to deploy your router, registry, and other components.
+
+Refer to the advanced installation guide for your deployment type:
+
+* [OpenShift Enterprise](https://docs.openshift.com/enterprise/3.0/install_config/install/advanced_install.html#what-s-next)
+* [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next)

+ 14 - 3
README_GCE.md

@@ -39,6 +39,13 @@ Create a gce.ini file for GCE
 * gce_service_account_pem_file_path - Full path from previous steps
 * gce_project_id - Found in "Projects", it list all the gce projects you are associated with.  The page lists their "Project Name" and "Project ID".  You want the "Project ID"
 
+Mandatory customization variables (check the values according to your tenant):
+* zone = europe-west1-d
+* network = default
+* gce_machine_type = n1-standard-2
+* gce_machine_image = preinstalled-slave-50g-v5
+
+
 1. vi ~/.gce/gce.ini
 1. make the contents look like this:
 ```
@@ -46,11 +53,15 @@ Create a gce.ini file for GCE
 gce_service_account_email_address = long...@developer.gserviceaccount.com
 gce_service_account_pem_file_path = /full/path/to/project_id-gce_key_hash.pem
 gce_project_id = project_id
+zone = europe-west1-d
+network = default
+gce_machine_type = n1-standard-2
+gce_machine_image = preinstalled-slave-50g-v5
+
 ```
-1. Setup a sym link so that gce.py will pick it up (link must be in same dir as gce.py)
+1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
 ```
-  cd openshift-ansible/inventory/gce
-  ln -s ~/.gce/gce.ini gce.ini
+export GCE_INI_PATH=~/.gce/gce.ini
 ```
 
 

+ 3 - 16
README_OSE.md

@@ -80,7 +80,7 @@ ansible_ssh_user=root
 deployment_type=enterprise
 
 # Pre-release registry URL
-oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
+oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
 
 # Pre-release additional repo
 openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel',
@@ -101,6 +101,7 @@ ose3-master.example.com
 
 # host group for nodes
 [nodes]
+ose3-master.example.com
 ose3-node[1:2].example.com
 ```
 
@@ -116,22 +117,8 @@ ansible-playbook playbooks/byo/config.yml
 inventory file use the -i option for ansible-playbook.
 
 ## Post-ansible steps
-#### Create the default router
-On the master host:
-```sh
-oadm router --create=true \
-  --credentials=/etc/openshift/master/openshift-router.kubeconfig \
-  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}'
-```
 
-#### Create the default docker-registry
-On the master host:
-```sh
-oadm registry --create=true \
-  --credentials=/etc/openshift/master/openshift-registry.kubeconfig \
-  --images='docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}' \
-  --mount-host=/var/lib/openshift/docker-registry
-```
+You should now be ready to follow the [What's Next?](https://docs.openshift.com/enterprise/3.0/install_config/install/advanced_install.html#what-s-next) section of the advanced installation guide to deploy your router, registry, and other components.
 
 ## Overriding detected ip addresses and hostnames
 Some deployments will require that the user override the detected hostnames

+ 19 - 11
README_libvirt.md

@@ -8,16 +8,18 @@ This makes `libvirt` useful to develop, test and debug OpenShift and openshift-a
 Install dependencies
 --------------------
 
-1.	Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
-2.	Install [ebtables](http://ebtables.netfilter.org/)
-3.	Install [qemu](http://wiki.qemu.org/Main_Page)
-4.	Install [libvirt](http://libvirt.org/)
-5.	Enable and start the libvirt daemon, e.g:
+1.      Install [ansible](http://www.ansible.com/)
+2.	Install [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html)
+3.	Install [ebtables](http://ebtables.netfilter.org/)
+4.	Install [qemu and qemu-system-x86](http://wiki.qemu.org/Main_Page)
+5.	Install [libvirt-python and libvirt](http://libvirt.org/)
+6.	Install [genisoimage](http://cdrkit.org/)
+7.	Enable and start the libvirt daemon, e.g:
 	-	`systemctl enable libvirtd`
 	-	`systemctl start libvirtd`
-6.	[Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
-7.	Check that your `$HOME` is accessible to the qemu user²
-8.	Configure dns resolution on the host³
+8.	[Grant libvirt access to your user¹](https://libvirt.org/aclpolkit.html)
+9.	Check that your `$HOME` is accessible to the qemu user²
+10.	Configure dns resolution on the host³
 
 #### ¹ Depending on your distribution, libvirt access may be denied by default or may require a password at each access.
 
@@ -68,9 +70,14 @@ If your `$HOME` is world readable, everything is fine. If your `$HOME` is privat
 error: Cannot access storage file '$HOME/libvirt-storage-pool-openshift/lenaic-master-216d8.qcow2' (as uid:99, gid:78): Permission denied
 ```
 
-In order to fix that issue, you have several possibilities:* set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory: * backed by a filesystem with a lot of free disk space * writable by your user; * accessible by the qemu user.* Grant the qemu user access to the storage pool.
+In order to fix that issue, you have several possibilities:
+ * set `libvirt_storage_pool_path` inside `playbooks/libvirt/openshift-cluster/launch.yml` and `playbooks/libvirt/openshift-cluster/terminate.yml` to a directory:
+   * backed by a filesystem with a lot of free disk space
+   * writable by your user;
+   * accessible by the qemu user.
+ * Grant the qemu user access to the storage pool.
 
-On Arch:
+On Arch or Fedora 22+:
 
 ```
 setfacl -m g:kvm:--x ~
@@ -89,7 +96,8 @@ dns=dnsmasq
 -	Configure dnsmasq to use the Virtual Network router for example.com:
 
 ```sh
-sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf server=/example.com/192.168.55.1
+sudo vi /etc/NetworkManager/dnsmasq.d/libvirt_dnsmasq.conf
+server=/example.com/192.168.55.1
 ```
 
 Test The Setup

+ 6 - 6
README_openstack.md

@@ -25,20 +25,20 @@ Configuration
 
 The following options can be passed via the `-o` flag of the `create` command:
 
-* `image_name`: Name of the image to use to spawn VMs
-* `keypair` (default to `${LOGNAME}_key`): Name of the ssh key
-* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
-* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master
-* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the nodes
 * `infra_heat_stack` (default to `playbooks/openstack/openshift-cluster/files/heat_stack.yaml`): filename of the HEAT template to use to create the cluster infrastructure
 
 The following options are used only by `heat_stack.yaml`. They are so used only if the `infra_heat_stack` option is left with its default value.
 
+* `image_name`: Name of the image to use to spawn VMs
+* `public_key` (default to `~/.ssh/id_rsa.pub`): filename of the ssh public key
+* `master_flavor` (default to `m1.small`): The ID or name of the flavor for the master
+* `node_flavor` (default to `m1.medium`): The ID or name of the flavor for the compute nodes
+* `infra_flavor` (default to `m1.small`): The ID or name of the flavor for the infrastructure nodes
 * `network_prefix` (default to `openshift-ansible-<cluster_id>`): prefix prepended to all network objects (net, subnet, router, security groups)
 * `dns` (default to `8.8.8.8,8.8.4.4`): comma separated list of DNS to use
 * `net_cidr` (default to `192.168.<rand()>.0/24`): CIDR of the network created by `heat_stack.yaml`
 * `external_net` (default to `external`): Name of the external network to connect to
-* `floating_ip_pools` (default to `external`): comma separated list of floating IP pools
+* `floating_ip_pool` (default to `external`): comma separated list of floating IP pools
 * `ssh_from` (default to `0.0.0.0/0`): IPs authorized to connect to the VMs via ssh
 
 

+ 2 - 16
README_origin.md

@@ -73,6 +73,7 @@ osv3-master.example.com
 
 # host group for nodes
 [nodes]
+osv3-master.example.com
 osv3-node[1:2].example.com
 ```
 
@@ -88,23 +89,8 @@ ansible-playbook playbooks/byo/config.yml
 inventory file use the -i option for ansible-playbook.
 
 ## Post-ansible steps
-#### Create the default router
-On the master host:
-```sh
-oadm router --create=true \
-  --credentials=/etc/openshift/master/openshift-router.kubeconfig
-```
-
-#### Create the default docker-registry
-On the master host:
-```sh
-oadm registry --create=true \
-  --credentials=/etc/openshift/master/openshift-registry.kubeconfig
-```
 
-If you would like persistent storage, refer to the
-[OpenShift documentation](https://docs.openshift.org/latest/admin_guide/install/docker_registry.html)
-for more information on deployment options for the built in docker-registry.
+You should now be ready to follow the [What's Next?](https://docs.openshift.org/latest/install_config/install/advanced_install.html#what-s-next) section of the advanced installation guide to deploy your router, registry, and other components.
 
 ## Overriding detected ip addresses and hostnames
 Some deployments will require that the user override the detected hostnames

+ 4 - 5
Vagrantfile

@@ -38,7 +38,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   end
 
   config.vm.provider "virtualbox" do |vbox, override|
-    override.vm.box = "chef/centos-7.1"
+    override.vm.box = "centos/7"
     vbox.memory = 1024
     vbox.cpus = 2
 
@@ -54,8 +54,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     when "enterprise"
       override.vm.box = "rhel-7"
     when "origin"
-      override.vm.box = "centos-7.1"
-      override.vm.box_url = "https://download.gluster.org/pub/gluster/purpleidea/vagrant/centos-7.1/centos-7.1.box"
+      override.vm.box = "centos/7"
       override.vm.box_download_checksum = "b2a9f7421e04e73a5acad6fbaf4e9aba78b5aeabf4230eebacc9942e577c1e05"
       override.vm.box_download_checksum_type = "sha256"
     end
@@ -66,7 +65,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     config.vm.define "node#{node_index}" do |node|
       node.vm.hostname = "ose3-node#{node_index}.example.com"
       node.vm.network :private_network, ip: "192.168.100.#{200 + n}"
-      config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart network.service"
+      config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service"
     end
   end
 
@@ -74,7 +73,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
     master.vm.hostname = "ose3-master.example.com"
     master.vm.network :private_network, ip: "192.168.100.100"
     master.vm.network :forwarded_port, guest: 8443, host: 8443
-    config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart network.service"
+    config.vm.provision "shell", inline: "nmcli connection reload; systemctl restart NetworkManager.service"
     master.vm.provision "ansible" do |ansible|
       ansible.limit = 'all'
       ansible.sudo = true

+ 35 - 25
bin/cluster

@@ -5,6 +5,7 @@ import argparse
 import ConfigParser
 import os
 import sys
+import subprocess
 import traceback
 
 
@@ -48,11 +49,11 @@ class Cluster(object):
             deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
         return deployment_type
 
+
     def create(self, args):
         """
         Create an OpenShift cluster for given provider
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
@@ -64,65 +65,60 @@ class Cluster(object):
         env['num_infra'] = args.infra
         env['num_etcd'] = args.etcd
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def terminate(self, args):
         """
         Destroy OpenShift cluster
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def list(self, args):
         """
         List VMs in cluster
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def config(self, args):
         """
         Configure or reconfigure OpenShift across clustered VMs
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def update(self, args):
         """
         Update to latest OpenShift across clustered VMs
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
         playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def service(self, args):
         """
         Make the same service call across all nodes in the cluster
         :param args: command line arguments provided by user
-        :return: exit status from run command
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args),
@@ -131,7 +127,7 @@ class Cluster(object):
         playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
-        return self.action(args, inventory, env, playbook)
+        self.action(args, inventory, env, playbook)
 
     def setup_provider(self, provider):
         """
@@ -141,10 +137,14 @@ class Cluster(object):
         """
         config = ConfigParser.ConfigParser()
         if 'gce' == provider:
-            config.readfp(open('inventory/gce/hosts/gce.ini'))
+            gce_ini_default_path = os.path.join(
+                'inventory/gce/hosts/gce.ini')
+            gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+            if os.path.exists(gce_ini_path): 
+                config.readfp(open(gce_ini_path))
 
-            for key in config.options('gce'):
-                os.environ[key] = config.get('gce', key)
+                for key in config.options('gce'):
+                    os.environ[key] = config.get('gce', key)
 
             inventory = '-i inventory/gce/hosts'
         elif 'aws' == provider:
@@ -163,7 +163,7 @@ class Cluster(object):
             boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
 
             if len(key_missing) > 0 and len(boto_configs) == 0:
-                raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(missing))
+                raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(key_missing))
 
         elif 'libvirt' == provider:
             inventory = '-i inventory/libvirt/hosts'
@@ -182,7 +182,6 @@ class Cluster(object):
         :param inventory: derived provider library
         :param env: environment variables for kubernetes
         :param playbook: ansible playbook to execute
-        :return: exit status from ansible-playbook command
         """
 
         verbose = ''
@@ -212,7 +211,18 @@ class Cluster(object):
             sys.stderr.write('RUN [{}]\n'.format(command))
             sys.stderr.flush()
 
-        return os.system(command)
+        try:
+            subprocess.check_call(command, shell=True)
+        except subprocess.CalledProcessError as exc:
+            raise ActionFailed("ACTION [{}] failed: {}"
+                               .format(args.action, exc))
+
+
+class ActionFailed(Exception):
+    """
+    Raised when action failed.
+    """
+    pass
 
 
 if __name__ == '__main__':
@@ -258,6 +268,9 @@ if __name__ == '__main__':
     meta_parser.add_argument('-t', '--deployment-type',
                              choices=['origin', 'online', 'enterprise'],
                              help='Deployment type. (default: origin)')
+    meta_parser.add_argument('-T', '--product-type',
+                             choices=['openshift', 'atomic-enterprise'],
+                             help='Product type. (default: openshift)')
     meta_parser.add_argument('-o', '--option', action='append',
                              help='options')
 
@@ -324,14 +337,11 @@ if __name__ == '__main__':
             sys.stderr.write('\nACTION [update] aborted by user!\n')
             exit(1)
 
-    status = 1
     try:
-        status = args.func(args)
-        if status != 0:
-            sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
-    except Exception, e:
+        args.func(args)
+    except Exception as exc:
         if args.verbose:
             traceback.print_exc(file=sys.stderr)
         else:
-            sys.stderr.write("{}\n".format(e))
-    exit(status)
+            print >>sys.stderr, exc
+        exit(1)

+ 19 - 1
bin/openshift-ansible-bin.spec

@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Scripts for working with metadata hosts
 Name:          openshift-ansible-bin
-Version:       0.0.18
+Version:       0.0.19
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -42,6 +42,24 @@ cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshif
 %config(noreplace) /etc/openshift_ansible/
 
 %changelog
+* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.19-1
+- Updated to show private ips when doing a list (kwoodson@redhat.com)
+- Updated to read config first and default to users home dir
+  (kwoodson@redhat.com)
+- Prevent Ansible from serializing tasks (lhuard@amadeus.com)
+- Infra node support (whearn@redhat.com)
+- Playbook updates for clustered etcd (jdetiber@redhat.com)
+- bin/cluster supports boto credentials as well as env variables
+  (jdetiber@redhat.com)
+- Merge pull request #291 from lhuard1A/profile
+  (twiest@users.noreply.github.com)
+- Add a generic mechanism for passing options (lhuard@amadeus.com)
+- Infrastructure - Validate AWS environment before calling playbooks
+  (jhonce@redhat.com)
+- Add a --profile option to spot which task takes more time
+  (lhuard@amadeus.com)
+- changed Openshift to OpenShift (twiest@redhat.com)
+
 * Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.18-1
 - Implement OpenStack provider (lhuard@amadeus.com)
 - * Update defaults and examples to track core concepts guide

+ 2 - 2
bin/oscp

@@ -167,7 +167,7 @@ class Oscp(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
                 if limit:
                     print
@@ -180,7 +180,7 @@ class Oscp(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
     def scp(self):
         '''scp files to or from a specified host

+ 2 - 2
bin/ossh

@@ -156,7 +156,7 @@ class Ossh(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
                 if limit:
                     print
@@ -169,7 +169,7 @@ class Ossh(object):
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
     def ssh(self):
         '''SSH to a specified host

+ 22 - 2
filter_plugins/oo_filters.py

@@ -73,7 +73,7 @@ class FilterModule(object):
 
         if filters is not None:
             if not issubclass(type(filters), dict):
-                raise errors.AnsibleFilterError("|fialed expects filter to be a"
+                raise errors.AnsibleFilterError("|failed expects filter to be a"
                                                 " dict")
             retval = [FilterModule.get_attr(d, attribute) for d in data if (
                 all([d.get(key, None) == filters[key] for key in filters]))]
@@ -83,6 +83,25 @@ class FilterModule(object):
         return retval
 
     @staticmethod
+    def oo_select_keys_from_list(data, keys):
+        ''' This returns a list, which contains the value portions for the keys
+            Ex: data = { 'a':1, 'b':2, 'c':3 }
+                keys = ['a', 'c']
+                returns [1, 3]
+        '''
+
+        if not issubclass(type(data), list):
+            raise errors.AnsibleFilterError("|failed expects to filter on a list")
+
+        if not issubclass(type(keys), list):
+            raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+        # Gather up the values for the list of keys passed in
+        retval = [FilterModule.oo_select_keys(item, keys) for item in data]
+
+        return FilterModule.oo_flatten(retval)
+
+    @staticmethod
     def oo_select_keys(data, keys):
         ''' This returns a list, which contains the value portions for the keys
             Ex: data = { 'a':1, 'b':2, 'c':3 }
@@ -97,7 +116,7 @@ class FilterModule(object):
             raise errors.AnsibleFilterError("|failed expects first param is a list")
 
         # Gather up the values for the list of keys passed in
-        retval = [data[key] for key in keys]
+        retval = [data[key] for key in keys if data.has_key(key)]
 
         return retval
 
@@ -312,6 +331,7 @@ class FilterModule(object):
         ''' returns a mapping of filters to methods '''
         return {
             "oo_select_keys": self.oo_select_keys,
+            "oo_select_keys_from_list": self.oo_select_keys_from_list,
             "oo_collect": self.oo_collect,
             "oo_flatten": self.oo_flatten,
             "oo_pdb": self.oo_pdb,

+ 29 - 0
filter_plugins/oo_zabbix_filters.py

@@ -60,6 +60,17 @@ class FilterModule(object):
         return None
 
     @staticmethod
+    def oo_build_zabbix_collect(data, string, value):
+        ''' Build a list of dicts from a list of data matched on string attribute
+        '''
+        rval = []
+        for item in data:
+            if item[string] == value:
+                rval.append(item)
+
+        return rval
+
+    @staticmethod
     def oo_build_zabbix_list_dict(values, string):
         ''' Build a list of dicts with string as key for each value
         '''
@@ -68,6 +79,22 @@ class FilterModule(object):
             rval.append({string: value})
         return rval
 
+    @staticmethod
+    def oo_remove_attr_from_list_dict(data, attr):
+        ''' Remove a specific attribute from a dict
+        '''
+        attrs = []
+        if isinstance(attr, str):
+            attrs.append(attr)
+        else:
+            attrs = attr
+
+        for attribute in attrs:
+            for _entry in data:
+                _entry.pop(attribute, None)
+
+        return data
+
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {
@@ -76,4 +103,6 @@ class FilterModule(object):
             "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid,
             "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict,
             "create_data": self.create_data,
+            "oo_build_zabbix_collect": self.oo_build_zabbix_collect,
+            "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict,
         }

+ 1 - 1
git/pylint.sh

@@ -13,7 +13,7 @@ OLDREV=$1
 NEWREV=$2
 #TRG_BRANCH=$3
 
-PYTHON=/var/lib/jenkins/python27/bin/python
+PYTHON=$(which python)
 
 set +e
 PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$")

+ 27 - 6
inventory/byo/hosts.example

@@ -18,10 +18,13 @@ ansible_ssh_user=root
 #ansible_sudo=true
 
 # deployment type valid values are origin, online and enterprise
-deployment_type=enterprise
+deployment_type=atomic-enterprise
+
+# Enable cluster metrics
+#use_cluster_metrics=true
 
 # Pre-release registry URL
-#oreg_url=docker-buildvm-rhose.usersys.redhat.com:5000/openshift3/ose-${component}:${version}
+#oreg_url=rcm-img-docker01.build.eng.bos.redhat.com:5001/openshift3/ose-${component}:${version}
 
 # Pre-release Dev puddle repo
 #openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://buildvm-devops.usersys.redhat.com/puddle/build/OpenShiftEnterprise/3.0/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
@@ -45,6 +48,15 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #osm_mcs_labels_per_project=5
 #osm_uid_allocator_range='1000000000-1999999999/10000'
 
+# Configure Fluentd
+#use_fluentd=true
+
+# Enable cockpit
+#osm_use_cockpit=true
+#
+# Set cockpit plugins
+#osm_cockpit_plugins=['cockpit-kubernetes']
+
 # master cluster ha variables using pacemaker or RHEL HA
 #openshift_master_cluster_password=openshift_cluster
 #openshift_master_cluster_vip=192.168.133.25
@@ -56,7 +68,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # For installation the value of openshift_master_cluster_hostname must resolve
 # to the first master defined in the inventory.
 # The HA solution must be manually configured after installation and must ensure
-# that openshift-master is running on a single master host.
+# that the master is running on a single master host.
 #openshift_master_cluster_hostname=openshift-ansible.test.example.com
 #openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
 #openshift_master_cluster_defer_ha=True
@@ -65,11 +77,18 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 #osm_default_subdomain=apps.test.example.com
 
 # additional cors origins
-#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] 
+#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
 
 # default project node selector
 #osm_default_node_selector='region=primary'
 
+# default selectors for router and registry services
+# openshift_router_selector='region=infra'
+# openshift_registry_selector='region=infra'
+
+# set RPM version for debugging purposes
+#openshift_pkg_version=-3.0.0.0
+
 # host group for masters
 [masters]
 ose3-master[1:3]-ansible.test.example.com
@@ -77,7 +96,9 @@ ose3-master[1:3]-ansible.test.example.com
 [etcd]
 ose3-etcd[1:3]-ansible.test.example.com
 
-# host group for nodes
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
 [nodes]
-ose3-master[1:3]-ansible.test.example.com openshift_scheduleable=False
+ose3-master[1:3]-ansible.test.example.com
 ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 6 - 3
inventory/gce/hosts/gce.py

@@ -120,6 +120,7 @@ class GceInventory(object):
             os.path.dirname(os.path.realpath(__file__)), "gce.ini")
         gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
 
+
         # Create a ConfigParser.
         # This provides empty defaults to each key, so that environment
         # variable configuration (as opposed to INI configuration) is able
@@ -173,6 +174,7 @@ class GceInventory(object):
         args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
         kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
 
+        
         # Retrieve and return the GCE driver.
         gce = get_driver(Provider.GCE)(*args, **kwargs)
         gce.connection.user_agent_append(
@@ -211,7 +213,8 @@ class GceInventory(object):
             'gce_image': inst.image,
             'gce_machine_type': inst.size,
             'gce_private_ip': inst.private_ips[0],
-            'gce_public_ip': inst.public_ips[0],
+            # Hosts don't always have a public IP name
+            #'gce_public_ip': inst.public_ips[0],
             'gce_name': inst.name,
             'gce_description': inst.extra['description'],
             'gce_status': inst.extra['status'],
@@ -219,8 +222,8 @@ class GceInventory(object):
             'gce_tags': inst.extra['tags'],
             'gce_metadata': md,
             'gce_network': net,
-            # Hosts don't have a public name, so we add an IP
-            'ansible_ssh_host': inst.public_ips[0]
+            # Hosts don't always have a public IP name
+            #'ansible_ssh_host': inst.public_ips[0]
         }
 
     def get_instance(self, instance_name):

+ 27 - 1
inventory/openshift-ansible-inventory.spec

@@ -1,6 +1,6 @@
 Summary:       OpenShift Ansible Inventories
 Name:          openshift-ansible-inventory
-Version:       0.0.8
+Version:       0.0.9
 Release:       1%{?dist}
 License:       ASL 2.0
 URL:           https://github.com/openshift/openshift-ansible
@@ -36,6 +36,32 @@ cp -p gce/hosts/gce.py %{buildroot}/usr/share/ansible/inventory/gce
 /usr/share/ansible/inventory/gce/gce.py*
 
 %changelog
+* Thu Aug 20 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.9-1
+- Merge pull request #408 from sdodson/docker-buildvm (bleanhar@redhat.com)
+- Merge pull request #428 from jtslear/issue-383
+  (twiest@users.noreply.github.com)
+- Merge pull request #407 from aveshagarwal/ae-ansible-merge-auth
+  (bleanhar@redhat.com)
+- Enable htpasswd by default in the example hosts file. (avagarwa@redhat.com)
+- Add support for setting default node selector (jdetiber@redhat.com)
+- Merge pull request #429 from spinolacastro/custom_cors (bleanhar@redhat.com)
+- Updated to read config first and default to users home dir
+  (kwoodson@redhat.com)
+- Fix Custom Cors (spinolacastro@gmail.com)
+- Revert "namespace the byo inventory so the group names aren't so generic"
+  (sdodson@redhat.com)
+- Removes hardcoded python2 (jtslear@gmail.com)
+- namespace the byo inventory so the group names aren't so generic
+  (admiller@redhat.com)
+- docker-buildvm-rhose is dead (sdodson@redhat.com)
+- Add support for setting routingConfig:subdomain (jdetiber@redhat.com)
+- Initial HA master (jdetiber@redhat.com)
+- Make it clear that the byo inventory file is just an example
+  (jdetiber@redhat.com)
+- Playbook updates for clustered etcd (jdetiber@redhat.com)
+- Update for RC2 changes (sdodson@redhat.com)
+- Templatize configs and 0.5.2 changes (jdetiber@redhat.com)
+
 * Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.8-1
 - Added more verbosity when error happens.  Also fixed a bug.
   (kwoodson@redhat.com)

+ 1 - 1
inventory/openstack/hosts/nova.py

@@ -34,7 +34,7 @@ except ImportError:
 # executed with no parameters, return the list of
 # all groups and hosts
 
-NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
+NOVA_CONFIG_FILES = [os.path.join(os.path.dirname(os.path.realpath(__file__)), "nova.ini"),
                      os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
                      "/etc/ansible/nova.ini"]
 

+ 29 - 0
playbooks/adhoc/atomic_openshift_tutorial_reset.yml

@@ -0,0 +1,29 @@
+# This deletes *ALL* Docker images, and uninstalls OpenShift and
+# Atomic Enterprise RPMs.  It is primarily intended for use
+# with the tutorial as well as for developers to reset state.
+#
+---
+- include: uninstall.yml
+
+- hosts:
+    - OSEv3:children
+
+  sudo: yes
+
+  tasks:
+    - shell: docker ps -a -q | xargs docker stop
+      changed_when: False
+      failed_when: False
+
+    - shell: docker ps -a -q| xargs docker rm
+      changed_when: False
+      failed_when: False
+
+    - shell:  docker images -q |xargs docker rmi
+      changed_when: False
+      failed_when: False
+
+    - user: name={{ item }} state=absent remove=yes
+      with_items:
+        - alice
+        - joe

+ 17 - 0
playbooks/adhoc/create_pv/create_pv.yaml

@@ -50,6 +50,16 @@
 
   - debug: var=vol
 
+  - name: tag the vol with a name
+    ec2_tag: region={{ hostvars[oo_name]['ec2_region'] }} resource={{vol.volume_id}}
+    args:
+      tags:
+        Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
+        env: "{{cli_environment}}"
+    register: voltags
+
+  - debug: var=voltags
+
 - name: Configure the drive
   gather_facts: no
   hosts: oo_master
@@ -118,6 +128,13 @@
       state: unmounted
       fstype: ext4
 
+  - name: remove from fstab
+    mount:
+      name: "{{ pv_mntdir }}"
+      src: "{{ cli_device_name }}"
+      state: absent
+      fstype: ext4
+
   - name: detach drive
     delegate_to: localhost
     ec2_vol:

+ 2 - 0
playbooks/adhoc/docker_loopback_to_lvm/docker-storage-setup

@@ -0,0 +1,2 @@
+DEVS=/dev/xvdb
+VG=docker_vg

+ 142 - 0
playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml

@@ -0,0 +1,142 @@
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker)
+#  in AWS.  This adds an additional EBS volume and creates the Volume Group on this EBS volume to use.
+#
+#  To run:
+#  1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+#    export AWS_ACCESS_KEY_ID='XXXXX'
+#    export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+#   ansible-playbook -e 'cli_tag_name=<tag-name>' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+#  Example:
+#   ansible-playbook -e 'cli_tag_name=ops-master-12345' -e "cli_volume_size=30" docker_loopback_to_direct_lvm.yml
+#
+#  Notes:
+#  * By default this will do a 30GB volume.
+#  * iops are calculated by Disk Size * 30.  e.g ( 30GB * 30) = 900 iops
+#  * This will remove /var/lib/docker!
+#  * You may need to re-deploy docker images after this is run (like monitoring)
+#
+
+- name: Fix docker to have a provisioned iops drive
+  hosts: "tag_Name_{{ cli_tag_name }}"
+  user: root
+  connection: ssh
+  gather_facts: no
+
+  vars:
+    cli_volume_type: gp2
+    cli_volume_size: 30
+
+  pre_tasks:
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_tag_name
+    - cli_volume_size
+
+  - debug:
+      var: hosts
+
+  - name: start docker
+    service:
+      name: docker
+      state: started
+
+  - name: Determine if loopback
+    shell: docker info | grep 'Data file:.*loop'
+    register: loop_device_check
+    ignore_errors: yes
+
+  - debug:
+      var: loop_device_check
+
+  - name: fail if we don't detect loopback
+    fail:
+      msg:  loopback not detected! Please investigate manually.
+    when: loop_device_check.rc == 1
+
+  - name: stop zagg client monitoring container
+    service:
+      name: oso-rhel7-zagg-client
+      state: stopped
+    ignore_errors: yes
+
+  - name: stop pcp client monitoring container
+    service:
+      name: oso-f22-host-monitoring
+      state: stopped
+    ignore_errors: yes
+
+  - name: stop docker
+    service:
+      name: docker
+      state: stopped
+
+  - name: delete /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: remove /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: check to see if /dev/xvdb exists
+    command: test -e /dev/xvdb
+    register: xvdb_check
+    ignore_errors: yes
+
+  - debug: var=xvdb_check
+
+  - name: fail if /dev/xvdb already exists
+    fail:
+      msg: /dev/xvdb already exists.  Please investigate
+    when: xvdb_check.rc == 0
+
+  - name: Create a volume and attach it
+    delegate_to: localhost
+    ec2_vol:
+      state: present
+      instance: "{{ ec2_id }}"
+      region: "{{ ec2_region }}"
+      volume_size: "{{ cli_volume_size | default(30, True)}}"
+      volume_type: "{{ cli_volume_type }}"
+      device_name: /dev/xvdb
+    register: vol
+
+  - debug: var=vol
+
+  - name: tag the vol with a name
+    delegate_to: localhost
+    ec2_tag: region={{ ec2_region }} resource={{ vol.volume_id }}
+    args:
+      tags:
+        Name: "{{ ec2_tag_Name }}"
+        env: "{{ ec2_tag_environment }}"
+    register: voltags
+
+  - name: Wait for volume to attach
+    pause:
+      seconds: 30
+
+  - name: copy the docker-storage-setup config file
+    copy:
+      src: docker-storage-setup
+      dest: /etc/sysconfig/docker-storage-setup
+      owner: root
+      group: root
+      mode: 0664
+
+  - name: docker storage setup
+    command: docker-storage-setup
+    register: setup_output
+
+  - debug: var=setup_output
+
+  - name: start docker
+    command: systemctl start docker.service
+    register: dockerstart
+
+  - debug: var=dockerstart
+

+ 104 - 0
playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml

@@ -0,0 +1,104 @@
+#!/usr/bin/ansible-playbook
+---
+# This playbook coverts docker to go from loopback to direct-lvm (the Red Hat recommended way to run docker).
+#
+# It requires the block device to be already provisioned and attached to the host. This is a generic playbook,
+# meant to be used for manual conversion. For AWS specific conversions, use the other playbook in this directory.
+#
+#  To run:
+#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=<host to run on> -e cli_docker_device=<path to device>
+#
+#  Example:
+#   ./ops-docker-loopback-to-direct-lvm.yml -e cli_host=twiesttest-master-fd32 -e cli_docker_device=/dev/sdb
+#
+#  Notes:
+#  * This will remove /var/lib/docker!
+#  * You may need to re-deploy docker images after this is run (like monitoring)
+
+- name: Fix docker to have a provisioned iops drive
+  hosts: "{{ cli_name }}"
+  user: root
+  connection: ssh
+  gather_facts: no
+
+  pre_tasks:
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_docker_device
+
+  - name: start docker
+    service:
+      name: docker
+      state: started
+
+  - name: Determine if loopback
+    shell: docker info | grep 'Data file:.*loop'
+    register: loop_device_check
+    ignore_errors: yes
+
+  - debug:
+      var: loop_device_check
+
+  - name: fail if we don't detect loopback
+    fail:
+      msg:  loopback not detected! Please investigate manually.
+    when: loop_device_check.rc == 1
+
+  - name: stop zagg client monitoring container
+    service:
+      name: oso-rhel7-zagg-client
+      state: stopped
+    ignore_errors: yes
+
+  - name: stop pcp client monitoring container
+    service:
+      name: oso-f22-host-monitoring
+      state: stopped
+    ignore_errors: yes
+
+  - name: "check to see if {{ cli_docker_device }} exists"
+    command: "test -e {{ cli_docker_device }}"
+    register: docker_dev_check
+    ignore_errors: yes
+
+  - debug: var=docker_dev_check
+
+  - name: "fail if {{ cli_docker_device }} doesn't exist"
+    fail:
+      msg: "{{ cli_docker_device }} doesn't exist. Please investigate"
+    when: docker_dev_check.rc != 0
+
+  - name: stop docker
+    service:
+      name: docker
+      state: stopped
+
+  - name: delete /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: remove /var/lib/docker
+    command: rm -rf /var/lib/docker
+
+  - name: copy the docker-storage-setup config file
+    copy:
+      content: >
+        DEVS={{ cli_docker_device }}
+        VG=docker_vg
+      dest: /etc/sysconfig/docker-storage-setup
+      owner: root
+      group: root
+      mode: 0664
+
+  - name: docker storage setup
+    command: docker-storage-setup
+    register: setup_output
+
+  - debug: var=setup_output
+
+  - name: start docker
+    command: systemctl start docker.service
+    register: dockerstart
+
+  - debug: var=dockerstart

+ 69 - 0
playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml

@@ -0,0 +1,69 @@
+---
+# This playbook attempts to cleanup unwanted docker files to help alleviate docker disk space issues.
+#
+#  To run:
+#
+#  1. run the playbook:
+#
+#   ansible-playbook -e 'cli_tag_name=<tag-name>' docker_storage_cleanup.yml
+#
+#  Example:
+#
+#   ansible-playbook -e 'cli_tag_name=ops-node-compute-12345' docker_storage_cleanup.yml
+#
+#  Notes:
+#  *  This *should* not interfere with running docker images
+#
+
+- name: Clean up Docker Storage
+  gather_facts: no
+  hosts: "tag_Name_{{ cli_tag_name }}"
+  user: root
+  connection: ssh
+
+  pre_tasks:
+
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_tag_name
+
+  - name: Ensure docker is running
+    service:
+      name: docker
+      state: started
+      enabled: yes
+
+  - name: Get docker info
+    command: docker info
+    register: docker_info
+
+  - name: Show docker info
+    debug:
+      var: docker_info.stdout_lines
+
+  - name: Remove exited and dead containers
+    shell: "docker ps -a | awk '/Exited|Dead/ {print $1}' | xargs --no-run-if-empty docker rm"
+    ignore_errors: yes
+
+  - name: Remove dangling docker images
+    shell: "docker images -q -f dangling=true | xargs --no-run-if-empty docker rmi"
+    ignore_errors: yes
+
+  - name: Remove non-running docker images
+    shell: "docker images | grep -v -e registry.access.redhat.com -e docker-registry.usersys.redhat.com -e docker-registry.ops.rhcloud.com | awk '{print $3}' | xargs --no-run-if-empty docker rmi 2>/dev/null"
+    ignore_errors: yes
+
+  # leaving off the '-t' for docker exec.  With it, it doesn't work with ansible and tty support
+  - name: update zabbix docker items
+    command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
+
+  # Get and show docker info again.
+  - name: Get docker info
+    command: docker info
+    register: docker_info
+
+  - name: Show docker info
+    debug:
+      var: docker_info.stdout_lines

+ 41 - 0
playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py

@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+    ''' Custom ansible filters '''
+
+    @staticmethod
+    def oo_pdb(arg):
+        ''' This pops you into a pdb instance where arg is the data passed in
+            from the filter.
+            Ex: "{{ hostvars | oo_pdb }}"
+        '''
+        pdb.set_trace()
+        return arg
+
+    @staticmethod
+    def translate_volume_name(volumes, target_volume):
+        '''
+            This filter matches a device string /dev/sdX to /dev/xvdX
+            It will then return the AWS volume ID
+        '''
+        for vol in volumes:
+            translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+            if target_volume.startswith(translated_name):
+                return vol["id"]
+
+        return None
+
+
+    def filters(self):
+        ''' returns a mapping of filters to methods '''
+        return {
+            "translate_volume_name": self.translate_volume_name,
+        }

+ 206 - 0
playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml

@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+#  * add a new volume
+#  * add volume to the existing VG.
+#  * pv move to the new volume.
+#  * remove old volume
+#  * detach volume
+#  * mark old volume in AWS with "REMOVE ME" tag
+#  * grow docker LVM to 90% of the VG
+#
+#  To run:
+#  1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+#    export AWS_ACCESS_KEY_ID='XXXXX'
+#    export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+#   ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+#  Example:
+#   ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+#  Notes:
+#  * By default this will do a 55GB GP2 volume.  The can be overidden with the "-e 'cli_volume_size=100'" variable
+#  * This does a GP2 by default.  Support for Provisioned IOPS has not been added
+#  * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+#  * This can be done with NO downtime on the host
+#  * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool".  This is
+#      the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+  hosts: "tag_Name_{{ cli_tag_name }}"
+  user: root
+  connection: ssh
+  gather_facts: no
+
+  vars:
+    cli_volume_type: gp2
+    cli_volume_size: 55
+#    cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+  pre_tasks:
+  - fail:
+      msg: "This playbook requires {{item}} to be set."
+    when: "{{ item }} is not defined or {{ item }} == ''"
+    with_items:
+    - cli_tag_name
+    - cli_volume_size
+
+  - debug:
+      var: hosts
+
+  - name: start docker
+    service:
+      name: docker
+      state: started
+
+  - name: Determine if Storage Driver (docker info) is devicemapper
+    shell: docker info | grep 'Storage Driver:.*devicemapper'
+    register: device_mapper_check
+    ignore_errors: yes
+
+  - debug:
+      var: device_mapper_check
+
+  - name: fail if we don't detect devicemapper
+    fail:
+      msg:  The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+    when: device_mapper_check.rc == 1
+
+  # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test
+  # and find the volume group.
+  - name: Attempt to find the Volume Group that docker is using
+    shell: lvs | grep docker-pool | awk '{print $2}'
+    register: docker_vg_name
+    ignore_errors: yes
+
+  - debug:
+      var: docker_vg_name
+
+  - name: fail if we don't find a docker volume group
+    fail:
+      msg:  Unable to find docker volume group. Please investigate manually.
+    when: docker_vg_name.stdout_lines|length != 1
+
+  # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test
+  # and find the physical volume.
+  - name: Attempt to find the Phyisical Volume that docker is using
+    shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+    register: docker_pv_name
+    ignore_errors: yes
+
+  - debug:
+      var: docker_pv_name
+
+  - name: fail if we don't find a docker physical volume
+    fail:
+      msg:  Unable to find docker physical volume. Please investigate manually.
+    when: docker_pv_name.stdout_lines|length != 1
+
+
+  - name: get list of volumes from AWS
+    delegate_to: localhost
+    ec2_vol:
+      state: list
+      instance: "{{ ec2_id }}"
+      region: "{{ ec2_region }}"
+    register: attached_volumes
+
+  - debug: var=attached_volumes
+
+  - name: get volume id of current docker volume
+    set_fact:
+      old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+  - debug: var=old_docker_volume_id
+
+  - name: check to see if /dev/xvdc exists
+    command: test -e /dev/xvdc
+    register: xvdc_check
+    ignore_errors: yes
+
+  - debug: var=xvdc_check
+
+  - name: fail if /dev/xvdc already exists
+    fail:
+      msg: /dev/xvdc already exists.  Please investigate
+    when: xvdc_check.rc == 0
+
+  - name: Create a volume and attach it
+    delegate_to: localhost
+    ec2_vol:
+      state: present
+      instance: "{{ ec2_id }}"
+      region: "{{ ec2_region }}"
+      volume_size: "{{ cli_volume_size | default(30, True)}}"
+      volume_type: "{{ cli_volume_type }}"
+      device_name: /dev/xvdc
+    register: create_volume
+
+  - debug: var=create_volume
+
+  - name: Fail when problems creating volumes and attaching
+    fail:
+      msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+    when: create_volume.msg is defined
+
+  - name: tag the vol with a name
+    delegate_to: localhost
+    ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+    args:
+      tags:
+        Name: "{{ ec2_tag_Name }}"
+        env: "{{ ec2_tag_environment }}"
+    register: voltags
+
+  - name: check for attached drive
+    command: test -b /dev/xvdc
+    register: attachment_check
+    until: attachment_check.rc == 0
+    retries: 30
+    delay: 2
+
+  - name: partition the new drive and make it lvm
+    command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+  - name: pvcreate /dev/xvdc
+    command: pvcreate /dev/xvdc1
+
+  - name: Extend the docker volume group
+    command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+  - name: pvmove onto new volume
+    command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+    async: 43200
+    poll: 10
+
+  - name: Remove the old docker drive from the volume group
+    command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+  - name: Remove the pv from the old drive
+    command: "pvremove {{ docker_pv_name.stdout }}"
+
+  - name: Extend the docker lvm
+    command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+  - name: detach  old docker volume
+    delegate_to: localhost
+    ec2_vol:
+      region: "{{ ec2_region }}"
+      id: "{{ old_docker_volume_id }}"
+      instance: None
+
+  - name: tag the old vol valid label
+    delegate_to: localhost
+    ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+    args:
+      tags:
+        Name: "{{ ec2_tag_Name }} REMOVE ME"
+    register: voltags
+
+  - name: Update the /etc/sysconfig/docker-storage-setup with new device
+    lineinfile:
+      dest: /etc/sysconfig/docker-storage-setup
+      regexp: ^DEVS=
+      line: DEVS=/dev/xvdc

+ 20 - 0
playbooks/adhoc/s3_registry/s3_registry.j2

@@ -0,0 +1,20 @@
+version: 0.1
+log:
+  level: debug
+http:
+  addr: :5000
+storage:
+  cache:
+    layerinfo: inmemory
+  s3:
+    accesskey: {{ aws_access_key }}
+    secretkey: {{ aws_secret_key }}
+    region: us-east-1
+    bucket: {{ clusterid }}-docker
+    encrypt: true
+    secure: true
+    v4auth: true
+    rootdirectory: /registry
+middleware:
+  repository:
+    - name: openshift

+ 71 - 0
playbooks/adhoc/s3_registry/s3_registry.yml

@@ -0,0 +1,71 @@
+---
+# This playbook creates an S3 bucket named after your cluster and configures the docker-registry service to use the bucket as its backend storage.
+# Usage:
+#  ansible-playbook s3_registry.yml -e clusterid="mycluster"
+#
+# The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
+# The 'clusterid' is the short name of your cluster.
+
+- hosts: tag_env-host-type_{{ clusterid }}-openshift-master
+  remote_user: root
+  gather_facts: False
+
+  vars:
+    aws_access_key: "{{ lookup('env', 'S3_ACCESS_KEY_ID') }}"
+    aws_secret_key: "{{ lookup('env', 'S3_SECRET_ACCESS_KEY') }}"
+
+  tasks:
+
+  - name: Check for AWS creds
+    fail: 
+      msg: "Couldn't find {{ item }} creds in ENV"
+    when: "{{ item }} == ''"
+    with_items:
+    - aws_access_key
+    - aws_secret_key
+
+  - name: Scale down registry
+    command: oc scale --replicas=0 dc/docker-registry
+
+  - name: Create S3 bucket
+    local_action:
+      module: s3 bucket="{{ clusterid }}-docker" mode=create
+
+  - name: Set up registry environment variable
+    command: oc env dc/docker-registry REGISTRY_CONFIGURATION_PATH=/etc/registryconfig/config.yml
+
+  - name: Generate docker registry config
+    template: src="s3_registry.j2" dest="/root/config.yml" owner=root mode=0600
+
+  - name: Determine if new secrets are needed
+    command: oc get secrets
+    register: secrets
+
+  - name: Create registry secrets
+    command: oc secrets new dockerregistry /root/config.yml
+    when: "'dockerregistry' not in secrets.stdout"
+
+  - name: Determine if service account contains secrets
+    command: oc describe serviceaccount/registry
+    register: serviceaccount
+
+  - name: Add secrets to registry service account
+    command: oc secrets add serviceaccount/registry secrets/dockerregistry
+    when: "'dockerregistry' not in serviceaccount.stdout"
+
+  - name: Determine if deployment config contains secrets
+    command: oc volume dc/docker-registry --list
+    register: dc
+
+  - name: Add secrets to registry deployment config
+    command: oc volume dc/docker-registry --add --name=dockersecrets -m /etc/registryconfig --type=secret --secret-name=dockerregistry
+    when: "'dockersecrets' not in dc.stdout"
+
+  - name: Wait for deployment config to take effect before scaling up
+    pause: seconds=30
+
+  - name: Scale up registry
+    command: oc scale --replicas=1 dc/docker-registry
+
+  - name: Delete temporary config file
+    file: path=/root/config.yml state=absent

+ 134 - 0
playbooks/adhoc/uninstall.yml

@@ -0,0 +1,134 @@
+# This deletes *ALL* Origin, Atomic Enterprise Platform and OpenShift
+# Enterprise content installed by ansible.  This includes:
+# 
+#    configuration
+#    containers
+#    example templates and imagestreams
+#    images
+#    RPMs
+---
+- hosts:
+    - OSEv3:children
+
+  sudo: yes
+
+  tasks:
+    - service: name={{ item }} state=stopped
+      with_items:
+        - atomic-enterprise-master
+        - atomic-enterprise-node
+        - atomic-openshift-master
+        - atomic-openshift-master-api
+        - atomic-openshift-master-controllers
+        - atomic-openshift-node
+        - etcd
+        - openshift-master
+        - openshift-master-api
+        - openshift-master-controllers
+        - openshift-node
+        - openvswitch
+        - origin-master
+        - origin-master-api
+        - origin-master-controllers
+        - origin-node
+
+    - yum: name={{ item }} state=absent
+      with_items:
+        - atomic-enterprise
+        - atomic-enterprise-master
+        - atomic-enterprise-node
+        - atomic-enterprise-sdn-ovs
+        - atomic-openshift
+        - atomic-openshift-clients
+        - atomic-openshift-master
+        - atomic-openshift-node
+        - atomic-openshift-sdn-ovs
+        - etcd
+        - openshift
+        - openshift-master
+        - openshift-node
+        - openshift-sdn
+        - openshift-sdn-ovs
+        - openvswitch
+        - origin
+        - origin-master
+        - origin-node
+        - origin-sdn-ovs
+        - tuned-profiles-atomic-enterprise-node
+        - tuned-profiles-atomic-openshift-node
+        - tuned-profiles-openshift-node
+        - tuned-profiles-origin-node
+
+    - shell: systemctl reset-failed
+      changed_when: False
+
+    - shell: systemctl daemon-reload
+      changed_when: False
+
+    - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+      changed_when: False
+
+    - shell: find /var/lib/atomic-enterprise/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+      changed_when: False
+
+    - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
+      changed_when: False
+
+    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node 
+      changed_when: False
+      failed_when: False
+      with_items:
+        - openshift-enterprise
+        - atomic-enterprise
+        - origin
+
+    - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}'
+      changed_when: False
+      failed_when: False
+      register: exited_containers_to_delete
+      with_items:
+        - aep3/aep
+        - openshift3/ose
+        - openshift/origin
+
+    - shell: "docker rm {{ item.stdout_lines | join(' ') }}"
+      changed_when: False
+      failed_when: False
+      with_items: "{{ exited_containers_to_delete.results }}"
+
+    - shell: docker images | grep {{ item }} | awk '{ print $3 }'
+      changed_when: False
+      failed_when: False
+      register: images_to_delete
+      with_items:
+        - registry.access.redhat.com/openshift3
+        - registry.access.redhat.com/aep3
+        - docker.io/openshift
+
+    - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+      changed_when: False
+      failed_when: False
+      with_items: "{{ images_to_delete.results }}"
+
+    - file: path={{ item }} state=absent
+      with_items:
+        - /etc/ansible/facts.d/openshift.fact
+        - /etc/atomic-enterprise
+        - /etc/etcd
+        - /etc/openshift
+        - /etc/openshift-sdn
+        - /etc/origin
+        - /etc/sysconfig/atomic-enterprise-master
+        - /etc/sysconfig/atomic-enterprise-node
+        - /etc/sysconfig/atomic-openshift-master
+        - /etc/sysconfig/atomic-openshift-node
+        - /etc/sysconfig/openshift-master
+        - /etc/sysconfig/openshift-node
+        - /etc/sysconfig/origin-master
+        - /etc/sysconfig/origin-node
+        - /root/.kube
+        - /usr/share/openshift/examples
+        - /var/lib/atomic-enterprise
+        - /var/lib/etcd
+        - /var/lib/openshift
+        - /var/lib/origin

+ 21 - 0
playbooks/adhoc/upgrades/README.md

@@ -0,0 +1,21 @@
+# [NOTE]
+This playbook will re-run installation steps overwriting any local
+modifications. You should ensure that your inventory has been updated with any
+modifications you've made after your initial installation. If you find any items
+that cannot be configured via ansible please open an issue at
+https://github.com/openshift/openshift-ansible
+
+# Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies latest configuration by re-running the installation playbook
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+# Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml

+ 1 - 0
playbooks/adhoc/upgrades/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins/

+ 1 - 0
playbooks/adhoc/upgrades/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins/

+ 1 - 0
playbooks/adhoc/upgrades/roles

@@ -0,0 +1 @@
+../../../roles/

+ 128 - 0
playbooks/adhoc/upgrades/upgrade.yml

@@ -0,0 +1,128 @@
+---
+- name: Re-Run cluster configuration to apply latest configuration changes
+  include: ../../common/openshift-cluster/config.yml
+  vars:
+    g_etcd_group: "{{ 'etcd' }}"
+    g_masters_group: "{{ 'masters' }}"
+    g_nodes_group: "{{ 'nodes' }}"
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Upgrade masters
+  hosts: masters
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  tasks:
+    - name: Upgrade master packages
+      yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+    - name: Restart master services
+      service: name="{{ openshift.common.service_type}}-master" state=restarted
+
+- name: Upgrade nodes
+  hosts: nodes
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  tasks:
+    - name: Upgrade node packages
+      yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+    - name: Restart node services
+      service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Determine new master version
+  hosts: oo_first_master
+  tasks:
+    - name: Determine new version
+      command: >
+        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
+      register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+  hosts: oo_first_master
+  tasks:
+    fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
+
+- name: Update cluster policy
+  hosts: oo_first_master
+  tasks:
+    - name: oadm policy reconcile-cluster-roles --confirm
+      command: >
+        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+        policy reconcile-cluster-roles --confirm
+
+- name: Update cluster policy bindings
+  hosts: oo_first_master
+  tasks:
+    - name: oadm policy reconcile-cluster-role-bindings --confirm
+      command: >
+        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+        policy reconcile-cluster-role-bindings
+        --exclude-groups=system:authenticated
+        --exclude-groups=system:unauthenticated
+        --exclude-users=system:anonymous
+        --additive-only=true --confirm
+      when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>')
+
+- name: Upgrade default router
+  hosts: oo_first_master
+  vars:
+    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+  tasks:
+    - name: Check for default router
+      command: >
+        {{ oc_cmd }} get -n default dc/router
+      register: _default_router
+      failed_when: false
+      changed_when: false
+    - name: Check for allowHostNetwork and allowHostPorts
+      when: _default_router.rc == 0
+      shell: >
+        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+      register: _scc
+    - name: Grant allowHostNetwork and allowHostPorts
+      when:
+        - _default_router.rc == 0
+        - "'false' in _scc.stdout"
+      command: >
+        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+    - name: Update deployment config to 1.0.4/3.0.1 spec
+      when: _default_router.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/router -p
+        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+    - name: Switch to hostNetwork=true
+      when: _default_router.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+    - name: Update router image to current version
+      when: _default_router.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/router -p
+        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+- name: Upgrade default
+  hosts: oo_first_master
+  vars:
+    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
+    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+  tasks:
+    - name: Check for default registry
+      command: >
+          {{ oc_cmd }} get -n default dc/docker-registry
+      register: _default_registry
+      failed_when: false
+      changed_when: false
+    - name: Update registry image to current version
+      when: _default_registry.rc == 0
+      command: >
+        {{ oc_cmd }} patch dc/docker-registry -p
+        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+  hosts: oo_first_master
+  vars:
+    openshift_examples_import_command: "update"
+    openshift_deployment_type: "{{ deployment_type }}"
+  roles:
+    - openshift_examples

+ 32 - 25
playbooks/adhoc/zabbix_setup/clean_zabbix.yml

@@ -2,50 +2,57 @@
 - hosts: localhost
   gather_facts: no
   vars:
-    g_zserver: http://localhost/zabbix/api_jsonrpc.php
-    g_zuser: Admin
-    g_zpassword: zabbix
+    g_server: http://localhost:8080/zabbix/api_jsonrpc.php
+    g_user: ''
+    g_password: ''
+
   roles:
-  - ../../../roles/os_zabbix
-  post_tasks:
+  - lib_zabbix
 
-  - zbx_template:
-      server: "{{ g_zserver }}"
-      user: "{{ g_zuser }}"
-      password: "{{ g_zpassword }}"
+  post_tasks:
+  - name: CLEAN List template for heartbeat
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
       state: list
       name: 'Template Heartbeat'
     register: templ_heartbeat
 
-  - zbx_template:
-      server: "{{ g_zserver }}"
-      user: "{{ g_zuser }}"
-      password: "{{ g_zpassword }}"
+  - name: CLEAN List template app zabbix server
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
       state: list
       name: 'Template App Zabbix Server'
     register: templ_zabbix_server
 
-  - zbx_template:
-      server: "{{ g_zserver }}"
-      user: "{{ g_zuser }}"
-      password: "{{ g_zpassword }}"
+  - name: CLEAN List template app zabbix server
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
       state: list
       name: 'Template App Zabbix Agent'
     register: templ_zabbix_agent
 
-  - zbx_template:
-      server: "{{ g_zserver }}"
-      user: "{{ g_zuser }}"
-      password: "{{ g_zpassword }}"
+  - name: CLEAN List all templates
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
       state: list
     register: templates
 
   - debug: var=templ_heartbeat.results
 
-  - zbx_template:
-      server: "{{ g_zserver }}"
-      user: "{{ g_zuser }}"
-      password: "{{ g_zpassword }}"
+  - name: Remove templates if heartbeat template is missing
+    zbx_template:
+      zbx_server: "{{ g_server }}"
+      zbx_user: "{{ g_user }}"
+      zbx_password: "{{ g_password }}"
+      name: "{{ item }}"
       state: absent
     with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
     when:  templ_heartbeat.results | length == 0

+ 0 - 57
playbooks/adhoc/zabbix_setup/create_template.yml

@@ -1,57 +0,0 @@
----
-- debug: var=ctp_template
-
-- name: Create Template
-  zbx_template:
-    server: "{{ ctp_zserver }}"
-    user: "{{ ctp_zuser }}"
-    password: "{{ ctp_zpassword }}"
-    name: "{{ ctp_template.name }}"
-  register: ctp_created_template
-
-- debug: var=ctp_created_template
-
-#- name: Create Application
-#  zbxapi:
-#    server: "{{ ctp_zserver }}"
-#    user: "{{ ctp_zuser }}"
-#    password: "{{ ctp_zpassword }}"
-#    zbx_class: Application
-#    state: present
-#    params:
-#      name: "{{ ctp_template.application.name}}"
-#      hostid: "{{ ctp_created_template.results[0].templateid }}"
-#      search:
-#        name: "{{ ctp_template.application.name}}"
-#  register: ctp_created_application
-
-#- debug: var=ctp_created_application
-
-- name: Create Items
-  zbx_item:
-    server: "{{ ctp_zserver }}"
-    user: "{{ ctp_zuser }}"
-    password: "{{ ctp_zpassword }}"
-    key: "{{ item.key }}"
-    name: "{{ item.name | default(item.key, true) }}"
-    value_type: "{{ item.value_type | default('int') }}"
-    template_name: "{{ ctp_template.name }}"
-  with_items: ctp_template.zitems
-  register: ctp_created_items
-
-#- debug: var=ctp_created_items
-
-- name: Create Triggers
-  zbx_trigger:
-    server: "{{ ctp_zserver }}"
-    user: "{{ ctp_zuser }}"
-    password: "{{ ctp_zpassword }}"
-    description: "{{ item.description }}"
-    expression: "{{ item.expression }}"
-    priority: "{{ item.priority }}"
-  with_items: ctp_template.ztriggers
-  when: ctp_template.ztriggers is defined
-
-#- debug: var=ctp_created_triggers
-
-

+ 1 - 1
playbooks/adhoc/zabbix_setup/filter_plugins

@@ -1 +1 @@
-../../../filter_plugins
+../../../filter_plugins/

+ 7 - 0
playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml

@@ -0,0 +1,7 @@
+#!/usr/bin/env ansible-playbook
+---
+- include: clean_zabbix.yml
+  vars:
+    g_server: http://localhost/zabbix/api_jsonrpc.php
+    g_user: Admin
+    g_password: zabbix

+ 13 - 0
playbooks/adhoc/zabbix_setup/oo-config-zaio.yml

@@ -0,0 +1,13 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+  gather_facts: no
+  vars:
+    g_server: http://localhost/zabbix/api_jsonrpc.php
+    g_user: Admin
+    g_password: zabbix
+  roles:
+  - role: os_zabbix
+    ozb_server: "{{ g_server }}"
+    ozb_user: "{{ g_user }}"
+    ozb_password: "{{ g_password }}"

+ 1 - 1
playbooks/adhoc/zabbix_setup/roles

@@ -1 +1 @@
-../../../roles/
+../../../roles

+ 0 - 38
playbooks/adhoc/zabbix_setup/setup_zabbix.yml

@@ -1,38 +0,0 @@
----
-- hosts: localhost
-  gather_facts: no
-  vars_files:
-  - vars/template_heartbeat.yml
-  - vars/template_os_linux.yml
-  vars:
-    g_zserver: http://localhost/zabbix/api_jsonrpc.php
-    g_zuser: Admin
-    g_zpassword: zabbix
-  roles:
-  - ../../../roles/os_zabbix
-  post_tasks:
-  - zbx_template:
-      server: "{{ g_zserver }}"
-      user: "{{ g_zuser }}"
-      password: "{{ g_zpassword }}"
-      state: list
-    register: templates
-
-  - debug: var=templates
-
-  - name: Include Template
-    include: create_template.yml
-    vars:
-      ctp_template: "{{ g_template_heartbeat }}"
-      ctp_zserver: "{{ g_zserver }}"
-      ctp_zuser: "{{ g_zuser }}"
-      ctp_zpassword: "{{ g_zpassword }}"
-
-  - name: Include Template
-    include: create_template.yml
-    vars:
-      ctp_template: "{{ g_template_os_linux }}"
-      ctp_zserver: "{{ g_zserver }}"
-      ctp_zuser: "{{ g_zuser }}"
-      ctp_zpassword: "{{ g_zpassword }}"
-

+ 0 - 11
playbooks/adhoc/zabbix_setup/vars/template_heartbeat.yml

@@ -1,11 +0,0 @@
----
-g_template_heartbeat:
-  name: Template Heartbeat
-  zitems:
-  - name: Heartbeat Ping
-    hostid:
-    key: heartbeat.ping
-  ztriggers:
-  - description: 'Heartbeat.ping has failed on {HOST.NAME}'
-    expression: '{Template Heartbeat:heartbeat.ping.last()}<>0'
-    priority: avg

+ 0 - 27
playbooks/adhoc/zabbix_setup/vars/template_host.yml

@@ -1,27 +0,0 @@
----
-g_template_host:
-  params:
-    name: Template Host
-    host: Template Host
-    groups:
-    - groupid: 1 # FIXME (not real)
-    output: extend
-    search:
-      name: Template Host
-  zitems:
-  - name: Host Ping
-    hostid: 
-    key_: host.ping
-    type: 2
-    value_type: 0
-    output: extend
-    search:
-      key_: host.ping
-  ztriggers:
-  - description: 'Host ping has failed on {HOST.NAME}'
-    expression: '{Template Host:host.ping.last()}<>0'
-    priority: 3
-    searchWildcardsEnabled: True
-    search:
-      description: 'Host ping has failed on*'
-    expandExpression: True

+ 0 - 27
playbooks/adhoc/zabbix_setup/vars/template_master.yml

@@ -1,27 +0,0 @@
----
-g_template_master:
-  params:
-    name: Template Master
-    host: Template Master
-    groups:
-    - groupid: 1 # FIXME (not real)
-    output: extend
-    search:
-      name: Template Master
-  zitems:
-  - name: Master Etcd Ping
-    hostid: 
-    key_: master.etcd.ping
-    type: 2
-    value_type: 0
-    output: extend
-    search:
-      key_: master.etcd.ping
-  ztriggers:
-  - description: 'Master Etcd ping has failed on {HOST.NAME}'
-    expression: '{Template Master:master.etcd.ping.last()}<>0'
-    priority: 3
-    searchWildcardsEnabled: True
-    search:
-      description: 'Master Etcd ping has failed on*'
-    expandExpression: True

+ 0 - 27
playbooks/adhoc/zabbix_setup/vars/template_node.yml

@@ -1,27 +0,0 @@
----
-g_template_node:
-  params:
-    name: Template Node
-    host: Template Node
-    groups:
-    - groupid: 1 # FIXME (not real)
-    output: extend
-    search:
-      name: Template Node
-  zitems:
-  - name: Kubelet Ping
-    hostid: 
-    key_: kubelet.ping
-    type: 2
-    value_type: 0
-    output: extend
-    search:
-      key_: kubelet.ping
-  ztriggers:
-  - description: 'Kubelet ping has failed on {HOST.NAME}'
-    expression: '{Template Node:kubelet.ping.last()}<>0'
-    priority: 3
-    searchWildcardsEnabled: True
-    search:
-      description: 'Kubelet ping has failed on*'
-    expandExpression: True

+ 0 - 90
playbooks/adhoc/zabbix_setup/vars/template_os_linux.yml

@@ -1,90 +0,0 @@
----
-g_template_os_linux:
-  name: Template OS Linux
-  zitems:
-  - key: kernel.uname.sysname
-    value_type: string
-
-  - key: kernel.all.cpu.wait.total
-    value_type: int
-
-  - key: kernel.all.cpu.irq.hard
-    value_type: int
-
-  - key: kernel.all.cpu.idle
-    value_type: int
-
-  - key: kernel.uname.distro
-    value_type: string
-
-  - key: kernel.uname.nodename
-    value_type: string
-
-  - key: kernel.all.cpu.irq.soft
-    value_type: int
-
-  - key: kernel.all.load.15_minute
-    value_type: float
-
-  - key: kernel.all.cpu.sys
-    value_type: int
-
-  - key: kernel.all.load.5_minute
-    value_type: float
-
-  - key: mem.freemem
-    value_type: int
-
-  - key: kernel.all.cpu.nice
-    value_type: int
-
-  - key: mem.util.bufmem
-    value_type: int
-
-  - key: swap.used
-    value_type: int
-
-  - key: kernel.all.load.1_minute
-    value_type: float
-
-  - key: kernel.uname.version
-    value_type: string
-
-  - key: swap.length
-    value_type: int
-
-  - key: mem.physmem
-    value_type: int
-
-  - key: kernel.all.uptime
-    value_type: int
-
-  - key: swap.free
-    value_type: int
-
-  - key: mem.util.used
-    value_type: int
-
-  - key: kernel.all.cpu.user
-    value_type: int
-
-  - key: kernel.uname.machine
-    value_type: string
-
-  - key: hinv.ncpu
-    value_type: int
-
-  - key: mem.util.cached
-    value_type: int
-
-  - key: kernel.all.cpu.steal
-    value_type: int
-
-  - key: kernel.all.pswitch
-    value_type: int
-
-  - key: kernel.uname.release
-    value_type: string
-
-  - key: proc.nprocs
-    value_type: int

+ 0 - 27
playbooks/adhoc/zabbix_setup/vars/template_router.yml

@@ -1,27 +0,0 @@
----
-g_template_router:
-  params:
-    name: Template Router
-    host: Template Router
-    groups:
-    - groupid: 1 # FIXME (not real)
-    output: extend
-    search:
-      name: Template Router
-  zitems:
-  - name: Router Backends down
-    hostid: 
-    key_: router.backends.down
-    type: 2
-    value_type: 0
-    output: extend
-    search:
-      key_: router.backends.down
-  ztriggers:
-  - description: 'Number of router backends down on {HOST.NAME}'
-    expression: '{Template Router:router.backends.down.last()}<>0'
-    priority: 3
-    searchWildcardsEnabled: True
-    search:
-      description: 'Number of router backends down on {HOST.NAME}'
-    expandExpression: True

+ 1 - 1
playbooks/aws/openshift-cluster/config.yml

@@ -17,7 +17,7 @@
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
+    openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ec2_private_ip_address }}"
     openshift_public_hostname: "{{ ec2_ip_address }}"

+ 0 - 5
playbooks/aws/openshift-cluster/launch.yml

@@ -55,9 +55,4 @@
     when: master_names is defined and master_names.0 is defined
 
 - include: update.yml
-
-- include: ../../common/openshift-cluster/create_services.yml
-  vars:
-     g_svc_master: "{{ service_master }}"
-
 - include: list.yml

+ 31 - 0
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -147,6 +147,35 @@
                     tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }},
                     tag_sub-host-type_{{ sub_host_type }}"
 
+- set_fact:
+    node_label:
+      region: "{{ec2_region}}"
+      type: "{{sub_host_type}}"
+  when: host_type == "node"
+
+- set_fact:
+    node_label:
+      region: "{{ec2_region}}"
+      type: "{{host_type}}"
+  when: host_type != "node"
+
+- set_fact:
+    logrotate:
+        - name: syslog
+          path: "/var/log/cron
+                 \n/var/log/maillog
+                 \n/var/log/messages
+                 \n/var/log/secure
+                 \n/var/log/spooler \n"
+          options:
+            - daily
+            - rotate 7
+            - compress
+            - sharedscripts
+            - missingok
+          scripts:
+            postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
+
 - name: Add new instances groups and variables
   add_host:
     hostname: "{{ item.0 }}"
@@ -156,6 +185,8 @@
     groups: "{{ instance_groups }}"
     ec2_private_ip_address: "{{ item.1.private_ip }}"
     ec2_ip_address: "{{ item.1.public_ip }}"
+    openshift_node_labels: "{{ node_label }}"
+    logrotate_scripts: "{{ logrotate }}"
   with_together:
   - instances
   - ec2.instances

+ 1 - 1
playbooks/aws/openshift-cluster/vars.online.int.yml

@@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.small
+ec2_master_instance_type: t2.medium
 ec2_master_security_groups: [ 'integration', 'integration-master' ]
 ec2_infra_instance_type: c4.large
 ec2_infra_security_groups: [ 'integration', 'integration-infra' ]

+ 1 - 1
playbooks/aws/openshift-cluster/vars.online.prod.yml

@@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.small
+ec2_master_instance_type: t2.medium
 ec2_master_security_groups: [ 'production', 'production-master' ]
 ec2_infra_instance_type: c4.large
 ec2_infra_security_groups: [ 'production', 'production-infra' ]

+ 1 - 1
playbooks/aws/openshift-cluster/vars.online.stage.yml

@@ -3,7 +3,7 @@ ec2_image: ami-9101c8fa
 ec2_image_name: libra-ops-rhel7*
 ec2_region: us-east-1
 ec2_keypair: mmcgrath_libra
-ec2_master_instance_type: t2.small
+ec2_master_instance_type: t2.medium
 ec2_master_security_groups: [ 'stage', 'stage-master' ]
 ec2_infra_instance_type: c4.large
 ec2_infra_security_groups: [ 'stage', 'stage-infra' ]

+ 1 - 1
playbooks/byo/openshift-cluster/config.yml

@@ -5,5 +5,5 @@
     g_masters_group: "{{ 'masters' }}"
     g_nodes_group: "{{ 'nodes' }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_debug_level: 4
+    openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"

+ 1 - 1
playbooks/byo/openshift_facts.yml

@@ -1,5 +1,5 @@
 ---
-- name: Gather OpenShift facts
+- name: Gather Cluster facts
   hosts: all
   gather_facts: no
   roles:

+ 0 - 8
playbooks/common/openshift-cluster/create_services.yml

@@ -1,8 +0,0 @@
----
-- name: Deploy OpenShift Services
-  hosts: "{{ g_svc_master }}"
-  connection: ssh
-  gather_facts: yes
-  roles:
-  - openshift_registry
-  - openshift_router

+ 1 - 0
playbooks/common/openshift-etcd/config.yml

@@ -85,6 +85,7 @@
     when: etcd_server_certs_missing
   roles:
   - etcd
+  - role: nickhammond.logrotate
 
 - name: Delete temporary directory on localhost
   hosts: localhost

+ 36 - 7
playbooks/common/openshift-master/config.yml

@@ -37,7 +37,7 @@
           public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
   - name: Check status of external etcd certificatees
     stat:
-      path: "/etc/openshift/master/{{ item }}"
+      path: "{{ openshift.common.config_base }}/master/{{ item }}"
     with_items:
     - master.etcd-client.crt
     - master.etcd-ca.crt
@@ -47,7 +47,7 @@
                                     | map(attribute='stat.exists')
                                     | list | intersect([false])}}"
       etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
-      etcd_cert_config_dir: /etc/openshift/master
+      etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
       etcd_cert_prefix: master.etcd-
     when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
 
@@ -96,7 +96,7 @@
   tasks:
   - name: Ensure certificate directory exists
     file:
-      path: /etc/openshift/master
+      path: "{{ openshift.common.config_base }}/master"
       state: directory
     when: etcd_client_certs_missing is defined and etcd_client_certs_missing
   - name: Unarchive the tarball on the master
@@ -134,7 +134,7 @@
 
   - name: Check status of master certificates
     stat:
-      path: "/etc/openshift/master/{{ item }}"
+      path: "{{ openshift.common.config_base }}/master/{{ item }}"
     with_items: openshift_master_certs
     register: g_master_cert_stat_result
   - set_fact:
@@ -142,12 +142,12 @@
                                 | map(attribute='stat.exists')
                                 | list | intersect([false])}}"
       master_cert_subdir: master-{{ openshift.common.hostname }}
-      master_cert_config_dir: /etc/openshift/master
+      master_cert_config_dir: "{{ openshift.common.config_base }}/master"
 
 - name: Configure master certificates
   hosts: oo_first_master
   vars:
-    master_generated_certs_dir: /etc/openshift/generated-configs
+    master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
     masters_needing_certs: "{{ hostvars
                                | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
                                | oo_filter_list(filter_attr='master_certs_missing') }}"
@@ -186,10 +186,11 @@
   vars:
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
     openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
   pre_tasks:
   - name: Ensure certificate directory exists
     file:
-      path: /etc/openshift/master
+      path: "{{ openshift.common.config_base }}/master"
       state: directory
     when: master_certs_missing and 'oo_first_master' not in group_names
   - name: Unarchive the tarball on the master
@@ -199,6 +200,7 @@
     when: master_certs_missing and 'oo_first_master' not in group_names
   roles:
   - openshift_master
+  - role: nickhammond.logrotate
   - role: fluentd_master
     when: openshift.common.use_fluentd | bool
   post_tasks:
@@ -215,6 +217,17 @@
   - role: openshift_master_cluster
     when: openshift_master_ha | bool
   - openshift_examples
+  - role: openshift_cluster_metrics
+    when: openshift.common.use_cluster_metrics | bool
+
+- name: Enable cockpit
+  hosts: oo_first_master
+  vars:
+    cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"
+  roles:
+  - role: cockpit
+    when: ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and
+      (osm_use_cockpit | bool or osm_use_cockpit is undefined )
 
 # Additional instance config for online deployments
 - name: Additional instance config
@@ -231,3 +244,19 @@
   tasks:
   - file: name={{ g_master_mktemp.stdout }} state=absent
     changed_when: False
+
+- name: Configure service accounts
+  hosts: oo_first_master
+
+  vars:
+    accounts: ["router", "registry"]
+
+  roles:
+  - openshift_serviceaccounts
+
+- name: Create services
+  hosts: oo_first_master
+  roles:
+  - role: openshift_router
+    when: openshift.master.infra_nodes is defined
+  #- role: openshift_registry

+ 2 - 2
playbooks/common/openshift-master/service.yml

@@ -10,9 +10,9 @@
     add_host: name={{ item }} groups=g_service_masters
     with_items: oo_host_group_exp | default([])
 
-- name: Change openshift-master state on master instance(s)
+- name: Change state on master instance(s)
   hosts: g_service_masters
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name=openshift-master state="{{ new_cluster_state }}"
+    - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"

+ 8 - 13
playbooks/common/openshift-node/config.yml

@@ -20,9 +20,10 @@
         local_facts:
           labels: "{{ openshift_node_labels | default(None) }}"
           annotations: "{{ openshift_node_annotations | default(None) }}"
+          schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
   - name: Check status of node certificates
     stat:
-      path: "/etc/openshift/node/{{ item }}"
+      path: "{{ openshift.common.config_base }}/node/{{ item }}"
     with_items:
     - "system:node:{{ openshift.common.hostname }}.crt"
     - "system:node:{{ openshift.common.hostname }}.key"
@@ -35,8 +36,8 @@
       certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
                          | list | intersect([false])}}"
       node_subdir: node-{{ openshift.common.hostname }}
-      config_dir: /etc/openshift/generated-configs/node-{{ openshift.common.hostname }}
-      node_cert_dir: /etc/openshift/node
+      config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
+      node_cert_dir: "{{ openshift.common.config_base }}/node"
 
 - name: Create temp directory for syncing certs
   hosts: localhost
@@ -89,9 +90,9 @@
       path: "{{ node_cert_dir }}"
       state: directory
 
-  # TODO: notify restart openshift-node
+  # TODO: notify restart node
   # possibly test service started time against certificate/config file
-  # timestamps in openshift-node to trigger notify
+  # timestamps in node to trigger notify
   - name: Unarchive the tarball on the node
     unarchive:
       src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
@@ -99,6 +100,7 @@
     when: certs_missing
   roles:
   - openshift_node
+  - role: nickhammond.logrotate
   - role: fluentd_node
     when: openshift.common.use_fluentd | bool
   tasks:
@@ -123,21 +125,14 @@
   - os_env_extras
   - os_env_extras_node
 
-- name: Set scheduleability
+- name: Set schedulability
   hosts: oo_first_master
   vars:
     openshift_nodes: "{{ hostvars
                          | oo_select_keys(groups['oo_nodes_to_config'])
                          | oo_collect('openshift.common.hostname') }}"
-    openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
-                                      | oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}"
     openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
   pre_tasks:
-  - set_fact:
-      openshift_scheduleable_nodes: "{{ hostvars
-                                      | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
-                                      | oo_collect('openshift.common.hostname')
-                                      | difference(openshift_unscheduleable_nodes) }}"
 
   roles:
   - openshift_manage_node

+ 2 - 2
playbooks/common/openshift-node/service.yml

@@ -10,9 +10,9 @@
     add_host: name={{ item }} groups=g_service_nodes
     with_items: oo_host_group_exp | default([])
 
-- name: Change openshift-node state on node instance(s)
+- name: Change state on node instance(s)
   hosts: g_service_nodes
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name=openshift-node state="{{ new_cluster_state }}"
+    - service: name={{ service_type }}-node state="{{ new_cluster_state }}"

+ 6 - 1
playbooks/gce/openshift-cluster/config.yml

@@ -10,6 +10,8 @@
   - set_fact:
       g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
       g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+      use_sdn: "{{ do_we_use_openshift_sdn }}"
+      sdn_plugin: "{{ sdn_network_plugin }}"
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
@@ -18,7 +20,10 @@
     g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+    g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
+    openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ gce_private_ip }}"
+    openshift_use_openshift_sdn: "{{ hostvars.localhost.use_sdn  }}"
+    os_sdn_network_plugin_name: "{{ hostvars.localhost.sdn_plugin }}"

+ 49 - 0
playbooks/gce/openshift-cluster/join_node.yml

@@ -0,0 +1,49 @@
+---
+- name: Populate oo_hosts_to_update group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_hosts_to_update
+    add_host:
+      name: "{{ node_ip }}"
+      groups: oo_hosts_to_update
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- name: Populate oo_masters_to_config host group
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+  - name: Evaluate oo_nodes_to_config
+    add_host:
+      name: "{{ node_ip }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_nodes_to_config
+
+  - name: Evaluate oo_first_master
+    add_host:
+      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+      groups: oo_first_master
+    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+
+#- include: config.yml
+- include: ../../common/openshift-node/config.yml
+  vars:
+    openshift_cluster_id: "{{ cluster_id }}"
+    openshift_debug_level: 4
+    openshift_deployment_type: "{{ deployment_type }}"
+    openshift_hostname: "{{ ansible_default_ipv4.address }}"
+    openshift_use_openshift_sdn: true
+    openshift_node_labels: "{{ lookup('oo_option', 'openshift_node_labels') }} "
+    os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
+    osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
+    osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"

+ 17 - 16
playbooks/gce/openshift-cluster/launch.yml

@@ -34,27 +34,28 @@
       count: "{{ num_infra }}"
   - include: tasks/launch_instances.yml
     vars:
-      instances: "{{ infra_names }}"
+      instances: "{{ node_names }}"
       cluster: "{{ cluster_id }}"
       type: "{{ k8s_type }}"
       g_sub_host_type: "{{ sub_host_type }}"
 
-  - set_fact:
-      a_infra: "{{ infra_names[0] }}"
-  - add_host: name={{ a_infra }} groups=service_master
+  - add_host:
+      name: "{{ master_names.0 }}"
+      groups: service_master
+    when: master_names is defined and master_names.0 is defined
 
 - include: update.yml
-
-- name: Deploy OpenShift Services
-  hosts: service_master
-  connection: ssh
-  gather_facts: yes
-  roles:
-  - openshift_registry
-  - openshift_router
-
-- include: ../../common/openshift-cluster/create_services.yml
-  vars:
-     g_svc_master: "{{ service_master }}"
+#
+#- name: Deploy OpenShift Services
+#  hosts: service_master
+#  connection: ssh
+#  gather_facts: yes
+#  roles:
+#  - openshift_registry
+#  - openshift_router
+#
+#- include: ../../common/openshift-cluster/create_services.yml
+#  vars:
+#     g_svc_master: "{{ service_master }}"
 
 - include: list.yml

+ 2 - 2
playbooks/gce/openshift-cluster/list.yml

@@ -14,11 +14,11 @@
       groups: oo_list_hosts
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+    with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
 
 - name: List instance(s)
   hosts: oo_list_hosts
   gather_facts: no
   tasks:
   - debug:
-      msg: "public ip:{{ hostvars[inventory_hostname].gce_public_ip }} private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"
+      msg: "private ip:{{ hostvars[inventory_hostname].gce_private_ip }}"

+ 26 - 6
playbooks/gce/openshift-cluster/tasks/launch_instances.yml

@@ -10,14 +10,33 @@
     service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
     pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
     project_id: "{{ lookup('env', 'gce_project_id') }}"
+    zone: "{{ lookup('env', 'zone') }}"
+    network: "{{ lookup('env', 'network') }}"
+# unsupported in 1.9.+
+    #service_account_permissions: "datastore,logging-write"
     tags:
       - created-by-{{ lookup('env', 'LOGNAME') |default(cluster, true) }}
       - env-{{ cluster }}
       - host-type-{{ type }}
-      - sub-host-type-{{ sub_host_type }}
+      - sub-host-type-{{ g_sub_host_type }}
       - env-host-type-{{ cluster }}-openshift-{{ type }}
+  when: instances |length > 0
   register: gce
 
+- set_fact:
+    node_label:
+      # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+      region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+      type: "{{ g_sub_host_type }}"
+  when: instances |length > 0 and type == "node"
+
+- set_fact:
+    node_label:
+      # There doesn't seem to be a way to get the region directly, so parse it out of the zone.
+      region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}"
+      type: "{{ type }}"
+  when: instances |length > 0 and type != "node"
+
 - name: Add new instances to groups and set variables needed
   add_host:
     hostname: "{{ item.name }}"
@@ -27,16 +46,17 @@
     groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}"
     gce_public_ip: "{{ item.public_ip }}"
     gce_private_ip: "{{ item.private_ip }}"
-  with_items: gce.instance_data
+    openshift_node_labels: "{{ node_label }}"
+  with_items: gce.instance_data | default([], true)
 
 - name: Wait for ssh
   wait_for: port=22 host={{ item.public_ip }}
-  with_items: gce.instance_data
+  with_items: gce.instance_data | default([], true)
 
 - name: Wait for user setup
   command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup"
   register: result
   until: result.rc == 0
-  retries: 20
-  delay: 10
-  with_items: gce.instance_data
+  retries: 30
+  delay: 5
+  with_items: gce.instance_data | default([], true)

+ 34 - 21
playbooks/gce/openshift-cluster/terminate.yml

@@ -1,25 +1,18 @@
 ---
 - name: Terminate instance(s)
   hosts: localhost
+  connection: local
   gather_facts: no
   vars_files:
   - vars.yml
   tasks:
-  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
+  - set_fact: scratch_group=tag_env-{{ cluster_id }}
   - add_host:
       name: "{{ item }}"
-      groups: oo_hosts_to_terminate, oo_nodes_to_terminate
+      groups: oo_hosts_to_terminate
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
-
-  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
-  - add_host:
-      name: "{{ item }}"
-      groups: oo_hosts_to_terminate, oo_masters_to_terminate
-      ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
-      ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+    with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true))
 
 - name: Unsubscribe VMs
   hosts: oo_hosts_to_terminate
@@ -32,14 +25,34 @@
           lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
             default('no', True) | lower in ['no', 'false']
 
-- include: ../openshift-node/terminate.yml
-  vars:
-    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+- name: Terminate instances(s)
+  hosts: localhost
+  connection: local
+  gather_facts: no
+  vars_files:
+  - vars.yml
+  tasks:
+
+    - name: Terminate instances that were previously launched
+      local_action:
+        module: gce
+        state: 'absent'
+        name: "{{ item }}"
+        service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+        pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+        project_id: "{{ lookup('env', 'gce_project_id') }}"
+        zone: "{{ lookup('env', 'zone') }}"
+      with_items: groups['oo_hosts_to_terminate'] | default([], true)
+      when: item is defined
 
-- include: ../openshift-master/terminate.yml
-  vars:
-    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#- include: ../openshift-node/terminate.yml
+#  vars:
+#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"
+#
+#- include: ../openshift-master/terminate.yml
+#  vars:
+#    gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+#    gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+#    gce_project_id: "{{ lookup('env', 'gce_project_id') }}"

+ 5 - 3
playbooks/gce/openshift-cluster/vars.yml

@@ -1,8 +1,11 @@
 ---
+do_we_use_openshift_sdn: true
+sdn_network_plugin: redhat/openshift-ovs-subnet 
+# os_sdn_network_plugin_name can be ovssubnet or multitenant, see https://docs.openshift.org/latest/architecture/additional_concepts/sdn.html#ovssubnet-plugin-operation
 deployment_vars:
   origin:
-    image: centos-7
-    ssh_user:
+    image: preinstalled-slave-50g-v5
+    ssh_user: root
     sudo: yes
   online:
     image: libra-rhel7
@@ -12,4 +15,3 @@ deployment_vars:
     image: rhel-7
     ssh_user:
     sudo: yes
-

+ 1 - 1
playbooks/libvirt/openshift-cluster/config.yml

@@ -20,5 +20,5 @@
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
+    openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"

+ 8 - 0
playbooks/libvirt/openshift-cluster/launch.yml

@@ -17,6 +17,14 @@
 
   - include: tasks/configure_libvirt.yml
 
+  - include: ../../common/openshift-cluster/set_etcd_launch_facts_tasks.yml
+  - include: tasks/launch_instances.yml
+    vars:
+      instances: "{{ etcd_names }}"
+      cluster: "{{ cluster_id }}"
+      type: "{{ k8s_type }}"
+      g_sub_host_type: "default"
+
   - include: ../../common/openshift-cluster/set_master_launch_facts_tasks.yml
   - include: tasks/launch_instances.yml
     vars:

+ 4 - 3
playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

@@ -63,8 +63,9 @@
   shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | egrep -c ''{{ instances | join("|") }}'''
   register: nb_allocated_ips
   until: nb_allocated_ips.stdout == '{{ instances | length }}'
-  retries: 30
-  delay: 1
+  retries: 60
+  delay: 3
+  when: instances | length != 0
 
 - name: Collect IP addresses of the VMs
   shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases openshift-ansible | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
@@ -72,7 +73,7 @@
   with_items: instances
 
 - set_fact:
-    ips: "{{ scratch_ip.results | oo_collect('stdout') }}"
+    ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
 
 - name: Add new instances
   add_host:

+ 1 - 1
playbooks/libvirt/openshift-cluster/templates/network.xml

@@ -8,7 +8,7 @@
   <!-- TODO: query for first available virbr interface available -->
   <bridge name='virbr3' stp='on' delay='0'/>
   <!-- TODO: make overridable -->
-  <domain name='example.com'/>
+  <domain name='example.com' localOnly='yes' />
   <dns>
     <!-- TODO: automatically add host entries -->
   </dns>

+ 1 - 1
playbooks/libvirt/openshift-cluster/templates/user-data

@@ -19,5 +19,5 @@ system_info:
 ssh_authorized_keys:
   - {{ lookup('file', '~/.ssh/id_rsa.pub') }}
 
-bootcmd:
+runcmd:
   - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart

+ 1 - 1
playbooks/openstack/openshift-cluster/config.yml

@@ -15,6 +15,6 @@
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: 4
+    openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_hostname: "{{ ansible_default_ipv4.address }}"

+ 24 - 18
playbooks/openstack/openshift-cluster/files/heat_stack.yaml

@@ -9,21 +9,6 @@ parameters:
     label: Cluster ID
     description: Identifier of the cluster
 
-  num_masters:
-    type: number
-    label: Number of masters
-    description: Number of masters
-
-  num_nodes:
-    type: number
-    label: Number of compute nodes
-    description: Number of compute nodes
-
-  num_infra:
-    type: number
-    label: Number of infrastructure nodes
-    description: Number of infrastructure nodes
-
   cidr:
     type: string
     label: CIDR
@@ -40,6 +25,12 @@ parameters:
     description: Name of the external network
     default: external
 
+  floating_ip_pool:
+    type: string
+    label: Floating IP pool
+    description: Floating IP pools
+    default: external
+
   ssh_public_key:
     type: string
     label: SSH public key
@@ -52,6 +43,21 @@ parameters:
     description: Source of legitimate ssh connections
     default: 0.0.0.0/0
 
+  num_masters:
+    type: number
+    label: Number of masters
+    description: Number of masters
+
+  num_nodes:
+    type: number
+    label: Number of compute nodes
+    description: Number of compute nodes
+
+  num_infra:
+    type: number
+    label: Number of infrastructure nodes
+    description: Number of infrastructure nodes
+
   master_image:
     type: string
     label: Master image
@@ -290,7 +296,7 @@ resources:
           subnet:     { get_resource: subnet }
           secgrp:
             - { get_resource: master-secgrp }
-          floating_network: { get_param: external_net }
+          floating_network: { get_param: floating_ip_pool }
           net_name:
             str_replace:
               template: openshift-ansible-cluster_id-net
@@ -322,7 +328,7 @@ resources:
           subnet:     { get_resource: subnet }
           secgrp:
             - { get_resource: node-secgrp }
-          floating_network: { get_param: external_net }
+          floating_network: { get_param: floating_ip_pool }
           net_name:
             str_replace:
               template: openshift-ansible-cluster_id-net
@@ -355,7 +361,7 @@ resources:
           secgrp:
             - { get_resource: node-secgrp }
             - { get_resource: infra-secgrp }
-          floating_network: { get_param: external_net }
+          floating_network: { get_param: floating_ip_pool }
           net_name:
             str_replace:
               template: openshift-ansible-cluster_id-net

+ 11 - 22
playbooks/openstack/openshift-cluster/launch.yml

@@ -19,30 +19,21 @@
     changed_when: false
     failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
 
-  - name: Create OpenStack Stack
-    command: 'heat stack-create -f {{ openstack_infra_heat_stack }}
-             -P cluster_id={{ cluster_id }}
-             -P dns_nameservers={{ openstack_network_dns | join(",") }}
-             -P cidr={{ openstack_network_cidr }}
-             -P ssh_incoming={{ openstack_ssh_access_from }}
-             -P num_masters={{ num_masters }}
-             -P num_nodes={{ num_nodes }}
-             -P num_infra={{ num_infra }}
-             -P master_image={{ deployment_vars[deployment_type].image }}
-             -P node_image={{ deployment_vars[deployment_type].image }}
-             -P infra_image={{ deployment_vars[deployment_type].image }}
-             -P master_flavor={{ openstack_flavor["master"] }}
-             -P node_flavor={{ openstack_flavor["node"] }}
-             -P infra_flavor={{ openstack_flavor["infra"] }}
-             -P ssh_public_key="{{ openstack_ssh_public_key }}"
-             openshift-ansible-{{ cluster_id }}-stack'
+  - set_fact:
+      heat_stack_action: 'stack-create'
     when: stack_show_result.rc == 1
+  - set_fact:
+      heat_stack_action: 'stack-update'
+    when: stack_show_result.rc == 0
 
-  - name: Update OpenStack Stack
-    command: 'heat stack-update -f {{ openstack_infra_heat_stack }}
+  - name: Create or Update OpenStack Stack
+    command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }}
              -P cluster_id={{ cluster_id }}
-             -P dns_nameservers={{ openstack_network_dns | join(",") }}
              -P cidr={{ openstack_network_cidr }}
+             -P dns_nameservers={{ openstack_network_dns | join(",") }}
+             -P external_net={{ openstack_network_external_net }}
+             -P floating_ip_pool={{ openstack_floating_ip_pool }}
+             -P ssh_public_key="{{ openstack_ssh_public_key }}"
              -P ssh_incoming={{ openstack_ssh_access_from }}
              -P num_masters={{ num_masters }}
              -P num_nodes={{ num_nodes }}
@@ -53,9 +44,7 @@
              -P master_flavor={{ openstack_flavor["master"] }}
              -P node_flavor={{ openstack_flavor["node"] }}
              -P infra_flavor={{ openstack_flavor["infra"] }}
-             -P ssh_public_key="{{ openstack_ssh_public_key }}"
              openshift-ansible-{{ cluster_id }}-stack'
-    when: stack_show_result.rc == 0
 
   - name: Wait for OpenStack Stack readiness
     shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}'''

+ 0 - 27
playbooks/openstack/openshift-cluster/tasks/configure_openstack.yml

@@ -1,27 +0,0 @@
----
-- name: Check infra
-  command: 'heat stack-show {{ openstack_network_prefix }}-stack'
-  register: stack_show_result
-  changed_when: false
-  failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr
-
-- name: Create infra
-  command: 'heat stack-create -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
-  when: stack_show_result.rc == 1
-
-- name: Update infra
-  command: 'heat stack-update -f {{ openstack_infra_heat_stack }} -P cluster-id={{ cluster_id }} -P network-prefix={{ openstack_network_prefix }} -P dns-nameservers={{ openstack_network_dns | join(",") }} -P cidr={{ openstack_network_cidr }} -P ssh-incoming={{ openstack_ssh_access_from }} {{ openstack_network_prefix }}-stack'
-  when: stack_show_result.rc == 0
-
-- name: Wait for infra readiness
-  shell: 'heat stack-show {{ openstack_network_prefix }}-stack | awk ''$2 == "stack_status" {print $4}'''
-  register: stack_show_status_result
-  until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS']
-  retries: 30
-  delay: 1
-  failed_when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']
-
-- name: Create ssh keypair
-  nova_keypair:
-    name: "{{ openstack_ssh_keypair }}"
-    public_key: "{{ openstack_ssh_public_key }}"

+ 2 - 6
playbooks/openstack/openshift-cluster/vars.yml

@@ -1,18 +1,14 @@
 ---
 openstack_infra_heat_stack:     "{{ lookup('oo_option', 'infra_heat_stack' ) |
                                     default('files/heat_stack.yaml',         True) }}"
-openstack_network_prefix:       "{{ lookup('oo_option', 'network_prefix'   ) |
-                                    default('openshift-ansible-'+cluster_id, True) }}"
 openstack_network_cidr:         "{{ lookup('oo_option', 'net_cidr'         ) |
                                     default('192.168.' + ( ( 1048576 | random % 256 ) | string() ) + '.0/24', True) }}"
 openstack_network_external_net: "{{ lookup('oo_option', 'external_net'     ) |
                                     default('external',                      True) }}"
-openstack_floating_ip_pools:    "{{ lookup('oo_option', 'floating_ip_pools') |
-                                    default('external',                      True) | oo_split() }}"
+openstack_floating_ip_pool:     "{{ lookup('oo_option', 'floating_ip_pool' ) |
+                                    default('external',                      True) }}"
 openstack_network_dns:          "{{ lookup('oo_option', 'dns'              ) |
                                     default('8.8.8.8,8.8.4.4',               True) | oo_split() }}"
-openstack_ssh_keypair:          "{{ lookup('oo_option', 'keypair'          ) |
-                                    default(lookup('env', 'LOGNAME')+'_key', True) }}"
 openstack_ssh_public_key:       "{{ lookup('file', lookup('oo_option', 'public_key') |
                                     default('~/.ssh/id_rsa.pub',             True)) }}"
 openstack_ssh_access_from:      "{{ lookup('oo_option', 'ssh_from')          |

+ 1 - 1
rel-eng/packages/openshift-ansible-bin

@@ -1 +1 @@
-0.0.18-1 bin/
+0.0.19-1 bin/

+ 1 - 1
rel-eng/packages/openshift-ansible-inventory

@@ -1 +1 @@
-0.0.8-1 inventory/
+0.0.9-1 inventory/

+ 1 - 2
roles/ansible_tower/tasks/main.yaml

@@ -9,6 +9,7 @@
   - ansible
   - telnet
   - ack
+  - pylint
 
 - name: download Tower setup
   get_url: url=http://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-2.1.1.tar.gz dest=/opt/ force=no
@@ -38,5 +39,3 @@
     regexp: "^({{ item.option }})( *)="
     line: '\1\2= {{ item.value }}'
   with_items: config_changes | default([], true)
-    
-

+ 5 - 0
roles/cockpit/defaults/main.yml

@@ -0,0 +1,5 @@
+---
+os_firewall_use_firewalld: false
+os_firewall_allow:
+- service: cockpit-ws
+  port: 9090/tcp

+ 15 - 0
roles/cockpit/meta/main.yml

@@ -0,0 +1,15 @@
+---
+galaxy_info:
+  author: Scott Dodson
+  description: Deploy and Enable cockpit-ws plus optional plugins
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.7
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+  - { role: os_firewall }

+ 16 - 0
roles/cockpit/tasks/main.yml

@@ -0,0 +1,16 @@
+---
+- name: Install cockpit-ws
+  yum:
+    name: "{{ item }}"
+    state: present
+  with_items:
+    - cockpit-ws
+    - cockpit-shell
+    - cockpit-bridge
+    - "{{ cockpit_plugins }}"
+
+- name: Enable cockpit-ws
+  service:
+    name: cockpit.socket
+    enabled: true
+    state: started

+ 1 - 0
roles/etcd/tasks/main.yml

@@ -38,6 +38,7 @@
   template:
     src: etcd.conf.j2
     dest: /etc/etcd/etcd.conf
+    backup: true
   notify:
     - restart etcd
 

+ 2 - 0
roles/etcd_ca/tasks/main.yml

@@ -18,6 +18,7 @@
 - template:
     dest: "{{ etcd_ca_dir }}/fragments/openssl_append.cnf"
     src: openssl_append.j2
+    backup: true
 
 - assemble:
     src: "{{ etcd_ca_dir }}/fragments"
@@ -37,6 +38,7 @@
     openssl req -config openssl.cnf -newkey rsa:4096
     -keyout ca.key -new -out ca.crt -x509 -extensions etcd_v3_ca_self
     -batch -nodes -subj /CN=etcd-signer@{{ ansible_date_time.epoch }}
+    -days 365
   args:
     chdir: "{{ etcd_ca_dir }}"
     creates: "{{ etcd_ca_dir }}/ca.crt"

+ 7 - 3
roles/fluentd_master/tasks/main.yml

@@ -39,12 +39,16 @@
     owner: 'td-agent'
     mode: 0444
 
-- name: "Pause before restarting td-agent and openshift-master, depending on the number of nodes."
-  pause: seconds={{ ( num_nodes|int < 3 ) | ternary(15, (num_nodes|int * 5)) }}
+- name: wait for etcd to start up
+  wait_for: port=4001 delay=10
+  when: embedded_etcd | bool
+
+- name: wait for etcd peer to start up
+  wait_for: port=7001 delay=10
+  when: embedded_etcd | bool
 
 - name: ensure td-agent is running
   service:
     name: 'td-agent'
     state: started
     enabled: yes
-

+ 38 - 0
roles/lib_zabbix/README.md

@@ -0,0 +1,38 @@
+zabbix
+=========
+
+Automate zabbix tasks.
+
+Requirements
+------------
+
+This requires the openshift_tools rpm be installed for the zbxapi.py library.  It can be found here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now.
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+This depeonds on the zbxapi.py library located here: https://github.com/openshift/openshift-tools under openshift_tools/monitoring/zbxapi.py for now.
+
+Example Playbook
+----------------
+
+  - zbx_host:
+      server: zab_server
+      user: zab_user
+      password: zab_password
+      name: 'myhost'
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+OpenShift operations, Red Hat, Inc

+ 3 - 0
roles/lib_zabbix/library/__init__.py

@@ -0,0 +1,3 @@
+'''
+ZabbixAPI ansible module
+'''

+ 538 - 0
roles/lib_zabbix/library/zbx_action.py

@@ -0,0 +1,538 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix actions
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+#   Zabbix action ansible module
+#
+#
+#   Copyright 2015 Red Hat Inc.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError
+
+def exists(content, key='result'):
+    ''' Check if key exists in content or the size of content[key] > 0
+    '''
+    if not content.has_key(key):
+        return False
+
+    if not content[key]:
+        return False
+
+    return True
+
+def conditions_equal(zab_conditions, user_conditions):
+    '''Compare two lists of conditions'''
+    c_type = 'conditiontype'
+    _op = 'operator'
+    val = 'value'
+    if len(user_conditions) != len(zab_conditions):
+        return False
+
+    for zab_cond, user_cond in zip(zab_conditions, user_conditions):
+        if zab_cond[c_type] != str(user_cond[c_type]) or zab_cond[_op] != str(user_cond[_op]) or \
+           zab_cond[val] != str(user_cond[val]):
+            return False
+
+    return True
+
+def filter_differences(zabbix_filters, user_filters):
+    '''Determine the differences from user and zabbix for operations'''
+    rval = {}
+    for key, val in user_filters.items():
+
+        if key == 'conditions':
+            if not conditions_equal(zabbix_filters[key], val):
+                rval[key] = val
+
+        elif zabbix_filters[key] != str(val):
+            rval[key] = val
+
+    return rval
+
+# This logic is quite complex.  We are comparing two lists of dictionaries.
+# The outer for-loops allow us to descend down into both lists at the same time
+# and then walk over the key,val pairs of the incoming user dict's changes
+# or updates.  The if-statements are looking at different sub-object types and
+# comparing them.  The other suggestion on how to write this is to write a recursive
+# compare function but for the time constraints and for complexity I decided to go
+# this route.
+# pylint: disable=too-many-branches
+def operation_differences(zabbix_ops, user_ops):
+    '''Determine the differences from user and zabbix for operations'''
+
+    # if they don't match, take the user options
+    if len(zabbix_ops) != len(user_ops):
+        return user_ops
+
+    rval = {}
+    for zab, user in zip(zabbix_ops, user_ops):
+        for key, val in user.items():
+            if key == 'opconditions':
+                for z_cond, u_cond in zip(zab[key], user[key]):
+                    if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
+                                ['conditiontype', 'operator', 'value']]):
+                        rval[key] = val
+                        break
+            elif key == 'opmessage':
+                # Verify each passed param matches
+                for op_msg_key, op_msg_val in val.items():
+                    if zab[key][op_msg_key] != str(op_msg_val):
+                        rval[key] = val
+                        break
+
+            elif key == 'opmessage_grp':
+                zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]])
+                usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val])
+                if usr_grp_ids != zab_grp_ids:
+                    rval[key] = val
+
+            elif key == 'opmessage_usr':
+                zab_usr_ids = set([usr['userid'] for usr in zab[key]])
+                usr_ids = set([usr['userid'] for usr in val])
+                if usr_ids != zab_usr_ids:
+                    rval[key] = val
+
+            elif zab[key] != str(val):
+                rval[key] = val
+    return rval
+
+def get_users(zapi, users):
+    '''get the mediatype id from the mediatype name'''
+    rval_users = []
+
+    for user in users:
+        content = zapi.get_content('user',
+                                   'get',
+                                   {'filter': {'alias': user}})
+        rval_users.append({'userid': content['result'][0]['userid']})
+
+    return rval_users
+
+def get_user_groups(zapi, groups):
+    '''get the mediatype id from the mediatype name'''
+    user_groups = []
+
+    content = zapi.get_content('usergroup',
+                               'get',
+                               {'search': {'name': groups}})
+
+    for usr_grp in content['result']:
+        user_groups.append({'usrgrpid': usr_grp['usrgrpid']})
+
+    return user_groups
+
+def get_mediatype_id_by_name(zapi, m_name):
+    '''get the mediatype id from the mediatype name'''
+    content = zapi.get_content('mediatype',
+                               'get',
+                               {'filter': {'description': m_name}})
+
+    return content['result'][0]['mediatypeid']
+
+def get_priority(priority):
+    ''' determine priority
+    '''
+    prior = 0
+    if 'info' in priority:
+        prior = 1
+    elif 'warn' in priority:
+        prior = 2
+    elif 'avg' == priority or 'ave' in priority:
+        prior = 3
+    elif 'high' in priority:
+        prior = 4
+    elif 'dis' in priority:
+        prior = 5
+
+    return prior
+
+def get_event_source(from_src):
+    '''Translate even str into value'''
+    choices = ['trigger', 'discovery', 'auto', 'internal']
+    rval = 0
+    try:
+        rval = choices.index(from_src)
+    except ValueError as _:
+        ZabbixAPIError('Value not found for event source [%s]' % from_src)
+
+    return rval
+
+def get_status(inc_status):
+    '''determine status for action'''
+    rval = 1
+    if inc_status == 'enabled':
+        rval = 0
+
+    return rval
+
+def get_condition_operator(inc_operator):
+    ''' determine the condition operator'''
+    vals = {'=': 0,
+            '<>': 1,
+            'like': 2,
+            'not like': 3,
+            'in': 4,
+            '>=': 5,
+            '<=': 6,
+            'not in': 7,
+           }
+
+    return vals[inc_operator]
+
+def get_host_id_by_name(zapi, host_name):
+    '''Get host id by name'''
+    content = zapi.get_content('host',
+                               'get',
+                               {'filter': {'name': host_name}})
+
+    return content['result'][0]['hostid']
+
+def get_trigger_value(inc_trigger):
+    '''determine the proper trigger value'''
+    rval = 1
+    if inc_trigger == 'PROBLEM':
+        rval = 1
+    else:
+        rval = 0
+
+    return rval
+
+def get_template_id_by_name(zapi, t_name):
+    '''get the template id by name'''
+    content = zapi.get_content('template',
+                               'get',
+                               {'filter': {'host': t_name}})
+
+    return content['result'][0]['templateid']
+
+
+def get_host_group_id_by_name(zapi, hg_name):
+    '''Get hostgroup id by name'''
+    content = zapi.get_content('hostgroup',
+                               'get',
+                               {'filter': {'name': hg_name}})
+
+    return content['result'][0]['groupid']
+
+def get_condition_type(event_source, inc_condition):
+    '''determine the condition type'''
+    c_types = {}
+    if event_source == 'trigger':
+        c_types = {'host group': 0,
+                   'host': 1,
+                   'trigger': 2,
+                   'trigger name': 3,
+                   'trigger severity': 4,
+                   'trigger value': 5,
+                   'time period': 6,
+                   'host template': 13,
+                   'application': 15,
+                   'maintenance status': 16,
+                  }
+
+    elif event_source == 'discovery':
+        c_types = {'host IP': 7,
+                   'discovered service type': 8,
+                   'discovered service port': 9,
+                   'discovery status': 10,
+                   'uptime or downtime duration': 11,
+                   'received value': 12,
+                   'discovery rule': 18,
+                   'discovery check': 19,
+                   'proxy': 20,
+                   'discovery object': 21,
+                  }
+
+    elif event_source == 'auto':
+        c_types = {'proxy': 20,
+                   'host name': 22,
+                   'host metadata': 24,
+                  }
+
+    elif event_source == 'internal':
+        c_types = {'host group': 0,
+                   'host': 1,
+                   'host template': 13,
+                   'application': 15,
+                   'event type': 23,
+                  }
+    else:
+        raise ZabbixAPIError('Unkown event source %s' % event_source)
+
+    return c_types[inc_condition]
+
+def get_operation_type(inc_operation):
+    ''' determine the correct operation type'''
+    o_types = {'send message': 0,
+               'remote command': 1,
+               'add host': 2,
+               'remove host': 3,
+               'add to host group': 4,
+               'remove from host group': 5,
+               'link to template': 6,
+               'unlink from template': 7,
+               'enable host': 8,
+               'disable host': 9,
+              }
+
+    return o_types[inc_operation]
+
+def get_action_operations(zapi, inc_operations):
+    '''Convert the operations into syntax for api'''
+    for operation in inc_operations:
+        operation['operationtype'] = get_operation_type(operation['operationtype'])
+        if operation['operationtype'] == 0: # send message.  Need to fix the
+            operation['opmessage']['mediatypeid'] = \
+             get_mediatype_id_by_name(zapi, operation['opmessage']['mediatypeid'])
+            operation['opmessage_grp'] = get_user_groups(zapi, operation.get('opmessage_grp', []))
+            operation['opmessage_usr'] = get_users(zapi, operation.get('opmessage_usr', []))
+            if operation['opmessage']['default_msg']:
+                operation['opmessage']['default_msg'] = 1
+            else:
+                operation['opmessage']['default_msg'] = 0
+
+        # NOT supported for remote commands
+        elif operation['operationtype'] == 1:
+            continue
+
+        # Handle Operation conditions:
+        # Currently there is only 1 available which
+        # is 'event acknowledged'.  In the future
+        # if there are any added we will need to pass this
+        # option to a function and return the correct conditiontype
+        if operation.has_key('opconditions'):
+            for condition in operation['opconditions']:
+                if condition['conditiontype'] == 'event acknowledged':
+                    condition['conditiontype'] = 14
+
+                if condition['operator'] == '=':
+                    condition['operator'] = 0
+
+                if condition['value'] == 'acknowledged':
+                    condition['operator'] = 1
+                else:
+                    condition['operator'] = 0
+
+
+    return inc_operations
+
+def get_operation_evaltype(inc_type):
+    '''get the operation evaltype'''
+    rval = 0
+    if inc_type == 'and/or':
+        rval = 0
+    elif inc_type == 'and':
+        rval = 1
+    elif inc_type == 'or':
+        rval = 2
+    elif inc_type == 'custom':
+        rval = 3
+
+    return rval
+
+def get_action_conditions(zapi, event_source, inc_conditions):
+    '''Convert the conditions into syntax for api'''
+
+    calc_type = inc_conditions.pop('calculation_type')
+    inc_conditions['evaltype'] = get_operation_evaltype(calc_type)
+    for cond in inc_conditions['conditions']:
+
+        cond['operator'] = get_condition_operator(cond['operator'])
+        # Based on conditiontype we need to set the proper value
+        # e.g. conditiontype = hostgroup then the value needs to be a hostgroup id
+        # e.g. conditiontype = host the value needs to be a host id
+        cond['conditiontype'] = get_condition_type(event_source, cond['conditiontype'])
+        if cond['conditiontype'] == 0:
+            cond['value'] = get_host_group_id_by_name(zapi, cond['value'])
+        elif cond['conditiontype'] == 1:
+            cond['value'] = get_host_id_by_name(zapi, cond['value'])
+        elif cond['conditiontype'] == 4:
+            cond['value'] = get_priority(cond['value'])
+
+        elif cond['conditiontype'] == 5:
+            cond['value'] = get_trigger_value(cond['value'])
+        elif cond['conditiontype'] == 13:
+            cond['value'] = get_template_id_by_name(zapi, cond['value'])
+        elif cond['conditiontype'] == 16:
+            cond['value'] = ''
+
+    return inc_conditions
+
+
+def get_send_recovery(send_recovery):
+    '''Get the integer value'''
+    rval = 0
+    if send_recovery:
+        rval = 1
+
+    return rval
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+    '''
+    ansible zabbix module for zbx_item
+    '''
+
+
+    module = AnsibleModule(
+        argument_spec=dict(
+            zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+            zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+            zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+            zbx_debug=dict(default=False, type='bool'),
+
+            name=dict(default=None, type='str'),
+            event_source=dict(default='trigger', choices=['trigger', 'discovery', 'auto', 'internal'], type='str'),
+            action_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
+            action_message=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}\r\n" +
+                                "Last value: {ITEM.LASTVALUE}\r\n\r\n{TRIGGER.URL}", type='str'),
+            reply_subject=dict(default="{TRIGGER.NAME}: {TRIGGER.STATUS}", type='str'),
+            reply_message=dict(default="Trigger: {TRIGGER.NAME}\r\nTrigger status: {TRIGGER.STATUS}\r\n" +
+                               "Trigger severity: {TRIGGER.SEVERITY}\r\nTrigger URL: {TRIGGER.URL}\r\n\r\n" +
+                               "Item values:\r\n\r\n1. {ITEM.NAME1} ({HOST.NAME1}:{ITEM.KEY1}): " +
+                               "{ITEM.VALUE1}\r\n2. {ITEM.NAME2} ({HOST.NAME2}:{ITEM.KEY2}): " +
+                               "{ITEM.VALUE2}\r\n3. {ITEM.NAME3} ({HOST.NAME3}:{ITEM.KEY3}): " +
+                               "{ITEM.VALUE3}", type='str'),
+            send_recovery=dict(default=False, type='bool'),
+            status=dict(default=None, type='str'),
+            escalation_time=dict(default=60, type='int'),
+            conditions_filter=dict(default=None, type='dict'),
+            operations=dict(default=None, type='list'),
+            state=dict(default='present', type='str'),
+        ),
+        #supports_check_mode=True
+    )
+
+    zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+                                      module.params['zbx_user'],
+                                      module.params['zbx_password'],
+                                      module.params['zbx_debug']))
+
+    #Set the instance and the template for the rest of the calls
+    zbx_class_name = 'action'
+    state = module.params['state']
+
+    content = zapi.get_content(zbx_class_name,
+                               'get',
+                               {'search': {'name': module.params['name']},
+                                'selectFilter': 'extend',
+                                'selectOperations': 'extend',
+                               })
+
+    #******#
+    # GET
+    #******#
+    if state == 'list':
+        module.exit_json(changed=False, results=content['result'], state="list")
+
+    #******#
+    # DELETE
+    #******#
+    if state == 'absent':
+        if not exists(content):
+            module.exit_json(changed=False, state="absent")
+
+        content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']])
+        module.exit_json(changed=True, results=content['result'], state="absent")
+
+    # Create and Update
+    if state == 'present':
+
+        conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter'])
+        operations = get_action_operations(zapi, module.params['operations'])
+        params = {'name': module.params['name'],
+                  'esc_period': module.params['escalation_time'],
+                  'eventsource': get_event_source(module.params['event_source']),
+                  'status': get_status(module.params['status']),
+                  'def_shortdata': module.params['action_subject'],
+                  'def_longdata': module.params['action_message'],
+                  'r_shortdata': module.params['reply_subject'],
+                  'r_longdata': module.params['reply_message'],
+                  'recovery_msg': get_send_recovery(module.params['send_recovery']),
+                  'filter': conditions,
+                  'operations': operations,
+                 }
+
+        # Remove any None valued params
+        _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+        #******#
+        # CREATE
+        #******#
+        if not exists(content):
+            content = zapi.get_content(zbx_class_name, 'create', params)
+
+            if content.has_key('error'):
+                module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+            module.exit_json(changed=True, results=content['result'], state='present')
+
+
+        ########
+        # UPDATE
+        ########
+        _ = params.pop('hostid', None)
+        differences = {}
+        zab_results = content['result'][0]
+        for key, value in params.items():
+
+            if key == 'operations':
+                ops = operation_differences(zab_results[key], value)
+                if ops:
+                    differences[key] = ops
+
+            elif key == 'filter':
+                filters = filter_differences(zab_results[key], value)
+                if filters:
+                    differences[key] = filters
+
+            elif zab_results[key] != value and zab_results[key] != str(value):
+                differences[key] = value
+
+        if not differences:
+            module.exit_json(changed=False, results=zab_results, state="present")
+
+        # We have differences and need to update.
+        # action update requires an id, filters, and operations
+        differences['actionid'] = zab_results['actionid']
+        differences['operations'] = params['operations']
+        differences['filter'] = params['filter']
+        content = zapi.get_content(zbx_class_name, 'update', differences)
+
+        if content.has_key('error'):
+            module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+        module.exit_json(changed=True, results=content['result'], state="present")
+
+    module.exit_json(failed=True,
+                     changed=False,
+                     results='Unknown state passed. %s' % state,
+                     state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets.  This are required
+from ansible.module_utils.basic import *
+
+main()

+ 41 - 59
roles/os_zabbix/library/zbx_item.py

@@ -1,10 +1,10 @@
 #!/usr/bin/env python
 '''
- Ansible module for zabbix items
+Ansible module for application
 '''
 # vim: expandtab:tabstop=4:shiftwidth=4
 #
-#   Zabbix item ansible module
+#   Zabbix application ansible module
 #
 #
 #   Copyright 2015 Red Hat Inc.
@@ -41,72 +41,52 @@ def exists(content, key='result'):
 
     return True
 
-def get_value_type(value_type):
+def get_template_ids(zapi, template_name):
     '''
-    Possible values:
-    0 - numeric float;
-    1 - character;
-    2 - log;
-    3 - numeric unsigned;
-    4 - text
+    get related templates
     '''
-    vtype = 0
-    if 'int' in value_type:
-        vtype = 3
-    elif 'char' in value_type:
-        vtype = 1
-    elif 'str' in value_type:
-        vtype = 4
-
-    return vtype
+    template_ids = []
+    # Fetch templates by name
+    content = zapi.get_content('template',
+                               'get',
+                               {'search': {'host': template_name}})
+    if content.has_key('result'):
+        template_ids.append(content['result'][0]['templateid'])
+    return template_ids
 
 def main():
-    '''
-    ansible zabbix module for zbx_item
+    ''' Ansible module for application
     '''
 
     module = AnsibleModule(
         argument_spec=dict(
-            server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
-            user=dict(default=None, type='str'),
-            password=dict(default=None, type='str'),
-            name=dict(default=None, type='str'),
-            key=dict(default=None, type='str'),
+            zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+            zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+            zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+            zbx_debug=dict(default=False, type='bool'),
+            name=dict(default=None, type='str', required=True),
             template_name=dict(default=None, type='str'),
-            zabbix_type=dict(default=2, type='int'),
-            value_type=dict(default='int', type='str'),
-            applications=dict(default=[], type='list'),
-            debug=dict(default=False, type='bool'),
             state=dict(default='present', type='str'),
         ),
         #supports_check_mode=True
     )
 
-    user = module.params.get('user', os.environ['ZABBIX_USER'])
-    passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
-
-    zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+    zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+                                      module.params['zbx_user'],
+                                      module.params['zbx_password'],
+                                      module.params['zbx_debug']))
 
-    #Set the instance and the template for the rest of the calls
-    zbx_class_name = 'item'
-    idname = "itemid"
+    #Set the instance and the application for the rest of the calls
+    zbx_class_name = 'application'
+    idname = 'applicationid'
+    aname = module.params['name']
     state = module.params['state']
-    key = module.params['key']
-    template_name = module.params['template_name']
-
-    content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
-    templateid = None
-    if content['result']:
-        templateid = content['result'][0]['templateid']
-    else:
-        module.exit_json(changed=False,
-                         results='Error: Could find template with name %s for item.' % template_name,
-                         state="Unkown")
-
+    # get a applicationid, see if it exists
+    tids = get_template_ids(zapi, module.params['template_name'])
     content = zapi.get_content(zbx_class_name,
                                'get',
-                               {'search': {'key_': key},
-                                'selectApplications': 'applicationid',
+                               {'search': {'name': aname},
+                                'templateids': tids[0],
                                })
     if state == 'list':
         module.exit_json(changed=False, results=content['result'], state="list")
@@ -119,12 +99,8 @@ def main():
         module.exit_json(changed=True, results=content['result'], state="absent")
 
     if state == 'present':
-        params = {'name': module.params.get('name', module.params['key']),
-                  'key_': key,
-                  'hostid': templateid,
-                  'type': module.params['zabbix_type'],
-                  'value_type': get_value_type(module.params['value_type']),
-                  'applications': module.params['applications'],
+        params = {'hostid': tids[0],
+                  'name': aname,
                  }
 
         if not exists(content):
@@ -136,16 +112,22 @@ def main():
         differences = {}
         zab_results = content['result'][0]
         for key, value in params.items():
-
-            if zab_results[key] != value and zab_results[key] != str(value):
+            if key == 'templates' and zab_results.has_key('parentTemplates'):
+                if zab_results['parentTemplates'] != value:
+                    differences[key] = value
+            elif zab_results[key] != str(value) and zab_results[key] != value:
                 differences[key] = value
 
         if not differences:
-            module.exit_json(changed=False, results=zab_results, state="present")
+            module.exit_json(changed=False, results=content['result'], state="present")
 
         # We have differences and need to update
         differences[idname] = zab_results[idname]
         content = zapi.get_content(zbx_class_name, 'update', differences)
+
+        if content.has_key('error'):
+            module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
         module.exit_json(changed=True, results=content['result'], state="present")
 
     module.exit_json(failed=True,

+ 205 - 0
roles/lib_zabbix/library/zbx_discoveryrule.py

@@ -0,0 +1,205 @@
+#!/usr/bin/env python
+'''
+Zabbix discovery rule ansible module
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+#   Copyright 2015 Red Hat Inc.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+    ''' Check if key exists in content or the size of content[key] > 0
+    '''
+    if not content.has_key(key):
+        return False
+
+    if not content[key]:
+        return False
+
+    return True
+
+def get_template(zapi, template_name):
+    '''get a template by name
+    '''
+    content = zapi.get_content('template',
+                               'get',
+                               {'search': {'host': template_name},
+                                'output': 'extend',
+                                'selectInterfaces': 'interfaceid',
+                               })
+    if not content['result']:
+        return None
+    return content['result'][0]
+
+def get_type(vtype):
+    '''
+    Determine which type of discoverrule this is
+    '''
+    _types = {'agent': 0,
+              'SNMPv1': 1,
+              'trapper': 2,
+              'simple': 3,
+              'SNMPv2': 4,
+              'internal': 5,
+              'SNMPv3': 6,
+              'active': 7,
+              'external': 10,
+              'database monitor': 11,
+              'ipmi': 12,
+              'ssh': 13,
+              'telnet': 14,
+              'JMX': 16,
+             }
+
+    for typ in _types.keys():
+        if vtype in typ or vtype == typ:
+            _vtype = _types[typ]
+            break
+    else:
+        _vtype = 2
+
+    return _vtype
+
+def main():
+    '''
+    Ansible module for zabbix discovery rules
+    '''
+
+
+    module = AnsibleModule(
+        argument_spec=dict(
+            zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+            zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+            zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+            zbx_debug=dict(default=False, type='bool'),
+            name=dict(default=None, type='str'),
+            key=dict(default=None, type='str'),
+            description=dict(default=None, type='str'),
+            interfaceid=dict(default=None, type='int'),
+            ztype=dict(default='trapper', type='str'),
+            delay=dict(default=60, type='int'),
+            lifetime=dict(default=30, type='int'),
+            template_name=dict(default=[], type='list'),
+            state=dict(default='present', type='str'),
+        ),
+        #supports_check_mode=True
+    )
+
+    zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+                                      module.params['zbx_user'],
+                                      module.params['zbx_password'],
+                                      module.params['zbx_debug']))
+
+    #Set the instance and the template for the rest of the calls
+    zbx_class_name = 'discoveryrule'
+    idname = "itemid"
+    dname = module.params['name']
+    state = module.params['state']
+    template = get_template(zapi, module.params['template_name'])
+
+    # selectInterfaces doesn't appear to be working but is needed.
+    content = zapi.get_content(zbx_class_name,
+                               'get',
+                               {'search': {'name': dname},
+                                'templateids': template['templateid'],
+                                #'selectDServices': 'extend',
+                                #'selectDChecks': 'extend',
+                                #'selectDhosts': 'dhostid',
+                               })
+
+    #******#
+    # GET
+    #******#
+    if state == 'list':
+        module.exit_json(changed=False, results=content['result'], state="list")
+
+    #******#
+    # DELETE
+    #******#
+    if state == 'absent':
+        if not exists(content):
+            module.exit_json(changed=False, state="absent")
+
+        content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
+        module.exit_json(changed=True, results=content['result'], state="absent")
+
+
+    # Create and Update
+    if state == 'present':
+        params = {'name': dname,
+                  'key_':  module.params['key'],
+                  'hostid':  template['templateid'],
+                  'interfaceid': module.params['interfaceid'],
+                  'lifetime': module.params['lifetime'],
+                  'type': get_type(module.params['ztype']),
+                  'description': module.params['description'],
+                 }
+        if params['type'] in [2, 5, 7, 11]:
+            params.pop('interfaceid')
+
+        # Remove any None valued params
+        _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+        #******#
+        # CREATE
+        #******#
+        if not exists(content):
+            content = zapi.get_content(zbx_class_name, 'create', params)
+
+            if content.has_key('error'):
+                module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+            module.exit_json(changed=True, results=content['result'], state='present')
+
+        ########
+        # UPDATE
+        ########
+        differences = {}
+        zab_results = content['result'][0]
+        for key, value in params.items():
+
+            if zab_results[key] != value and zab_results[key] != str(value):
+                differences[key] = value
+
+        if not differences:
+            module.exit_json(changed=False, results=zab_results, state="present")
+
+        # We have differences and need to update
+        differences[idname] = zab_results[idname]
+        content = zapi.get_content(zbx_class_name, 'update', differences)
+
+        if content.has_key('error'):
+            module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+        module.exit_json(changed=True, results=content['result'], state="present")
+
+    module.exit_json(failed=True,
+                     changed=False,
+                     results='Unknown state passed. %s' % state,
+                     state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets.  This are required
+from ansible.module_utils.basic import *
+
+main()

+ 20 - 19
roles/os_zabbix/library/zbx_host.py

@@ -60,7 +60,7 @@ def get_template_ids(zapi, template_names):
     for template_name in template_names:
         content = zapi.get_content('template', 'get', {'search': {'host': template_name}})
         if content.has_key('result'):
-            template_ids.append({'templateid': content['results'][0]['templateid']})
+            template_ids.append({'templateid': content['result'][0]['templateid']})
     return template_ids
 
 def main():
@@ -70,23 +70,23 @@ def main():
 
     module = AnsibleModule(
         argument_spec=dict(
-            server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
-            user=dict(default=None, type='str'),
-            password=dict(default=None, type='str'),
+            zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+            zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+            zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+            zbx_debug=dict(default=False, type='bool'),
             name=dict(default=None, type='str'),
             hostgroup_names=dict(default=[], type='list'),
             template_names=dict(default=[], type='list'),
-            debug=dict(default=False, type='bool'),
             state=dict(default='present', type='str'),
-            interfaces=dict(default=[], type='list'),
+            interfaces=dict(default=None, type='list'),
         ),
         #supports_check_mode=True
     )
 
-    user = module.params.get('user', os.environ['ZABBIX_USER'])
-    passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
-
-    zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+    zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+                                      module.params['zbx_user'],
+                                      module.params['zbx_password'],
+                                      module.params['zbx_debug']))
 
     #Set the instance and the template for the rest of the calls
     zbx_class_name = 'host'
@@ -113,16 +113,17 @@ def main():
         module.exit_json(changed=True, results=content['result'], state="absent")
 
     if state == 'present':
+        ifs = module.params['interfaces'] or [{'type':  1,         # interface type, 1 = agent
+                                               'main':  1,         # default interface? 1 = true
+                                               'useip':  1,        # default interface? 1 = true
+                                               'ip':  '127.0.0.1', # default interface? 1 = true
+                                               'dns':  '',         # dns for host
+                                               'port':  '10050',   # port for interface? 10050
+                                              }]
         params = {'host': hname,
-                  'groups':  get_group_ids(zapi, module.params('hostgroup_names')),
-                  'templates':  get_template_ids(zapi, module.params('template_names')),
-                  'interfaces': module.params.get('interfaces', [{'type':  1,         # interface type, 1 = agent
-                                                                  'main':  1,         # default interface? 1 = true
-                                                                  'useip':  1,        # default interface? 1 = true
-                                                                  'ip':  '127.0.0.1', # default interface? 1 = true
-                                                                  'dns':  '',         # dns for host
-                                                                  'port':  '10050',   # port for interface? 10050
-                                                                 }])
+                  'groups':  get_group_ids(zapi, module.params['hostgroup_names']),
+                  'templates':  get_template_ids(zapi, module.params['template_names']),
+                  'interfaces': ifs,
                  }
 
         if not exists(content):

+ 8 - 8
roles/os_zabbix/library/zbx_hostgroup.py

@@ -46,20 +46,20 @@ def main():
 
     module = AnsibleModule(
         argument_spec=dict(
-            server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
-            user=dict(default=None, type='str'),
-            password=dict(default=None, type='str'),
+            zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+            zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+            zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+            zbx_debug=dict(default=False, type='bool'),
             name=dict(default=None, type='str'),
-            debug=dict(default=False, type='bool'),
             state=dict(default='present', type='str'),
         ),
         #supports_check_mode=True
     )
 
-    user = module.params.get('user', os.environ['ZABBIX_USER'])
-    passwd = module.params.get('password', os.environ['ZABBIX_PASSWORD'])
-
-    zapi = ZabbixAPI(ZabbixConnection(module.params['server'], user, passwd, module.params['debug']))
+    zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+                                      module.params['zbx_user'],
+                                      module.params['zbx_password'],
+                                      module.params['zbx_debug']))
 
     #Set the instance and the template for the rest of the calls
     zbx_class_name = 'hostgroup'

+ 250 - 0
roles/lib_zabbix/library/zbx_item.py

@@ -0,0 +1,250 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix items
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+#   Zabbix item ansible module
+#
+#
+#   Copyright 2015 Red Hat Inc.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+    ''' Check if key exists in content or the size of content[key] > 0
+    '''
+    if not content.has_key(key):
+        return False
+
+    if not content[key]:
+        return False
+
+    return True
+
+def get_value_type(value_type):
+    '''
+    Possible values:
+    0 - numeric float;
+    1 - character;
+    2 - log;
+    3 - numeric unsigned;
+    4 - text
+    '''
+    vtype = 0
+    if 'int' in value_type:
+        vtype = 3
+    elif 'log' in value_type:
+        vtype = 2
+    elif 'char' in value_type:
+        vtype = 1
+    elif 'str' in value_type:
+        vtype = 4
+
+    return vtype
+
+def get_app_ids(application_names, app_name_ids):
+    ''' get application ids from names
+    '''
+    applications = []
+    if application_names:
+        for app in application_names:
+            applications.append(app_name_ids[app])
+
+    return applications
+
+def get_template_id(zapi, template_name):
+    '''
+    get related templates
+    '''
+    template_ids = []
+    app_ids = {}
+    # Fetch templates by name
+    content = zapi.get_content('template',
+                               'get',
+                               {'search': {'host': template_name},
+                                'selectApplications': ['applicationid', 'name']})
+    if content.has_key('result'):
+        template_ids.append(content['result'][0]['templateid'])
+        for app in content['result'][0]['applications']:
+            app_ids[app['name']] = app['applicationid']
+
+    return template_ids, app_ids
+
+def get_multiplier(inval):
+    ''' Determine the multiplier
+    '''
+    if inval == None or inval == '':
+        return None, 0
+
+    rval = None
+    try:
+        rval = int(inval)
+    except ValueError:
+        pass
+
+    if rval:
+        return rval, 1
+
+    return rval, 0
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+    '''
+    ansible zabbix module for zbx_item
+    '''
+
+    module = AnsibleModule(
+        argument_spec=dict(
+            zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+            zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+            zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+            zbx_debug=dict(default=False, type='bool'),
+            name=dict(default=None, type='str'),
+            key=dict(default=None, type='str'),
+            template_name=dict(default=None, type='str'),
+            zabbix_type=dict(default=2, type='int'),
+            value_type=dict(default='int', type='str'),
+            interval=dict(default=60, type='int'),
+            delta=dict(default=0, type='int'),
+            multiplier=dict(default=None, type='str'),
+            description=dict(default=None, type='str'),
+            units=dict(default=None, type='str'),
+            applications=dict(default=None, type='list'),
+            state=dict(default='present', type='str'),
+        ),
+        #supports_check_mode=True
+    )
+
+    zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+                                      module.params['zbx_user'],
+                                      module.params['zbx_password'],
+                                      module.params['zbx_debug']))
+
+    #Set the instance and the template for the rest of the calls
+    zbx_class_name = 'item'
+    state = module.params['state']
+
+    templateid, app_name_ids = get_template_id(zapi, module.params['template_name'])
+
+    # Fail if a template was not found matching the name
+    if not templateid:
+        module.exit_json(failed=True,
+                         changed=False,
+                         results='Error: Could find template with name %s for item.' % module.params['template_name'],
+                         state="Unkown")
+
+    content = zapi.get_content(zbx_class_name,
+                               'get',
+                               {'search': {'key_': module.params['key']},
+                                'selectApplications': 'applicationid',
+                                'templateids': templateid,
+                               })
+
+    #******#
+    # GET
+    #******#
+    if state == 'list':
+        module.exit_json(changed=False, results=content['result'], state="list")
+
+    #******#
+    # DELETE
+    #******#
+    if state == 'absent':
+        if not exists(content):
+            module.exit_json(changed=False, state="absent")
+
+        content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']])
+        module.exit_json(changed=True, results=content['result'], state="absent")
+
+    # Create and Update
+    if state == 'present':
+
+        formula, use_multiplier = get_multiplier(module.params['multiplier'])
+        params = {'name': module.params.get('name', module.params['key']),
+                  'key_': module.params['key'],
+                  'hostid': templateid[0],
+                  'type': module.params['zabbix_type'],
+                  'value_type': get_value_type(module.params['value_type']),
+                  'applications': get_app_ids(module.params['applications'], app_name_ids),
+                  'formula': formula,
+                  'multiplier': use_multiplier,
+                  'description': module.params['description'],
+                  'units': module.params['units'],
+                  'delay': module.params['interval'],
+                  'delta': module.params['delta'],
+                 }
+
+        # Remove any None valued params
+        _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+        #******#
+        # CREATE
+        #******#
+        if not exists(content):
+            content = zapi.get_content(zbx_class_name, 'create', params)
+
+            if content.has_key('error'):
+                module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+            module.exit_json(changed=True, results=content['result'], state='present')
+
+
+        ########
+        # UPDATE
+        ########
+        _ = params.pop('hostid', None)
+        differences = {}
+        zab_results = content['result'][0]
+        for key, value in params.items():
+
+            if key == 'applications':
+                app_ids = [item['applicationid'] for item in zab_results[key]]
+                if set(app_ids) != set(value):
+                    differences[key] = value
+
+            elif zab_results[key] != value and zab_results[key] != str(value):
+                differences[key] = value
+
+        if not differences:
+            module.exit_json(changed=False, results=zab_results, state="present")
+
+        # We have differences and need to update
+        differences['itemid'] = zab_results['itemid']
+        content = zapi.get_content(zbx_class_name, 'update', differences)
+
+        if content.has_key('error'):
+            module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+        module.exit_json(changed=True, results=content['result'], state="present")
+
+    module.exit_json(failed=True,
+                     changed=False,
+                     results='Unknown state passed. %s' % state,
+                     state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets.  This are required
+from ansible.module_utils.basic import *
+
+main()

+ 0 - 0
roles/lib_zabbix/library/zbx_itemprototype.py


Some files were not shown because too many files changed in this diff