Browse Source

Merge pull request #1059 from twiest/master

sync master -> prod branch
Thomas Wiest 9 years ago
parent
commit
4dfe16e0e5
100 changed files with 522 additions and 389 deletions
  1. 1 1
      .tito/packages/openshift-ansible
  2. 3 2
      README_libvirt.md
  3. 1 1
      README_origin.md
  4. 6 6
      bin/openshift_ansible/awsutil.py
  5. 4 4
      bin/oscp
  6. 4 4
      bin/ossh
  7. 3 3
      bin/ossh_bash_completion
  8. 3 3
      bin/ossh_zsh_completion
  9. 1 1
      bin/zsh_functions/_ossh
  10. 47 0
      docs/best_practices_guide.adoc
  11. 1 0
      filter_plugins/oo_filters.py
  12. 3 0
      inventory/byo/hosts.aep.example
  13. 3 0
      inventory/byo/hosts.origin.example
  14. 3 0
      inventory/byo/hosts.ose.example
  15. 17 1
      openshift-ansible.spec
  16. 11 9
      playbooks/adhoc/create_pv/create_pv.yaml
  17. 1 1
      playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
  18. 1 1
      playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
  19. 4 0
      playbooks/adhoc/noc/create_host.yml
  20. 2 0
      playbooks/adhoc/noc/create_maintenance.yml
  21. 2 0
      playbooks/adhoc/noc/get_zabbix_problems.yml
  22. 1 1
      playbooks/adhoc/s3_registry/s3_registry.yml
  23. 2 34
      playbooks/adhoc/uninstall.yml
  24. 2 0
      playbooks/adhoc/zabbix_setup/clean_zabbix.yml
  25. 2 0
      playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
  26. 2 0
      playbooks/aws/ansible-tower/config.yml
  27. 3 2
      playbooks/aws/ansible-tower/launch.yml
  28. 1 0
      playbooks/aws/openshift-cluster/addNodes.yml
  29. 6 4
      playbooks/aws/openshift-cluster/config.yml
  30. 1 0
      playbooks/aws/openshift-cluster/launch.yml
  31. 2 0
      playbooks/aws/openshift-cluster/list.yml
  32. 6 4
      playbooks/aws/openshift-cluster/scaleup.yml
  33. 4 2
      playbooks/aws/openshift-cluster/service.yml
  34. 1 6
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  35. 3 1
      playbooks/aws/openshift-cluster/terminate.yml
  36. 10 5
      playbooks/aws/openshift-cluster/update.yml
  37. 4 4
      playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  38. 4 4
      playbooks/byo/openshift-cluster/config.yml
  39. 4 4
      playbooks/byo/openshift-cluster/scaleup.yml
  40. 4 4
      playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
  41. 4 4
      playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  42. 23 21
      playbooks/common/openshift-cluster/evaluate_groups.yml
  43. 2 2
      playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
  44. 23 13
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  45. 2 2
      playbooks/common/openshift-etcd/config.yml
  46. 2 0
      playbooks/common/openshift-etcd/service.yml
  47. 8 5
      playbooks/common/openshift-master/config.yml
  48. 2 0
      playbooks/common/openshift-master/service.yml
  49. 2 2
      playbooks/common/openshift-node/config.yml
  50. 2 0
      playbooks/common/openshift-node/service.yml
  51. 6 4
      playbooks/gce/openshift-cluster/config.yml
  52. 11 2
      playbooks/gce/openshift-cluster/join_node.yml
  53. 1 0
      playbooks/gce/openshift-cluster/launch.yml
  54. 2 0
      playbooks/gce/openshift-cluster/list.yml
  55. 4 4
      playbooks/gce/openshift-cluster/service.yml
  56. 0 1
      playbooks/gce/openshift-cluster/tasks/launch_instances.yml
  57. 2 0
      playbooks/gce/openshift-cluster/terminate.yml
  58. 8 3
      playbooks/gce/openshift-cluster/update.yml
  59. 2 1
      playbooks/gce/openshift-cluster/wip.yml
  60. 6 4
      playbooks/libvirt/openshift-cluster/config.yml
  61. 3 0
      playbooks/libvirt/openshift-cluster/launch.yml
  62. 4 0
      playbooks/libvirt/openshift-cluster/list.yml
  63. 4 2
      playbooks/libvirt/openshift-cluster/service.yml
  64. 9 2
      playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
  65. 0 1
      playbooks/libvirt/openshift-cluster/templates/domain.xml
  66. 4 0
      playbooks/libvirt/openshift-cluster/terminate.yml
  67. 9 3
      playbooks/libvirt/openshift-cluster/update.yml
  68. 4 2
      playbooks/libvirt/openshift-cluster/vars.yml
  69. 6 4
      playbooks/openstack/openshift-cluster/config.yml
  70. 0 6
      playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml
  71. 5 4
      playbooks/openstack/openshift-cluster/launch.yml
  72. 4 0
      playbooks/openstack/openshift-cluster/list.yml
  73. 2 0
      playbooks/openstack/openshift-cluster/terminate.yml
  74. 9 3
      playbooks/openstack/openshift-cluster/update.yml
  75. 1 10
      roles/ansible/tasks/main.yml
  76. 1 1
      roles/ansible_tower/tasks/main.yaml
  77. 1 1
      roles/ansible_tower_cli/tasks/main.yml
  78. 1 15
      roles/cockpit/tasks/main.yml
  79. 1 9
      roles/copr_cli/tasks/main.yml
  80. 2 7
      roles/docker/tasks/main.yml
  81. 1 6
      roles/etcd/tasks/main.yml
  82. 1 7
      roles/flannel/tasks/main.yml
  83. 1 10
      roles/fluentd_master/tasks/main.yml
  84. 1 10
      roles/fluentd_node/tasks/main.yml
  85. 1 10
      roles/haproxy/tasks/main.yml
  86. 1 6
      roles/kube_nfs_volumes/tasks/main.yml
  87. 2 7
      roles/kube_nfs_volumes/tasks/nfs.yml
  88. 120 6
      roles/lib_zabbix/library/zbx_action.py
  89. 1 1
      roles/nickhammond.logrotate/tasks/main.yml
  90. 1 13
      roles/openshift_ansible_inventory/tasks/main.yml
  91. 7 0
      roles/openshift_common/tasks/main.yml
  92. 8 1
      roles/openshift_examples/README.md
  93. 1 6
      roles/openshift_expand_partition/tasks/main.yml
  94. 1 0
      roles/openshift_facts/library/openshift_facts.py
  95. 1 10
      roles/openshift_facts/tasks/main.yml
  96. 4 21
      roles/openshift_master/tasks/main.yml
  97. 1 1
      roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2
  98. 1 8
      roles/openshift_master_ca/tasks/main.yml
  99. 3 16
      roles/openshift_node/tasks/main.yml
  100. 0 0
      roles/openshift_node/tasks/storage_plugins/ceph.yml

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.0.19-1 ./
+3.0.20-1 ./

+ 3 - 2
README_libvirt.md

@@ -115,9 +115,10 @@ Configuration
 
 
 The following options can be passed via the `-o` flag of the `create` command or as environment variables:
 The following options can be passed via the `-o` flag of the `create` command or as environment variables:
 
 
-* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2`): URL of the QCOW2 image to download
+* `image_url` (default to `http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz`): URL of the QCOW2 image to download
 * `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on
 * `image_name` (default to `CentOS-7-x86_64-GenericCloud.qcow2`): Name of the QCOW2 image to boot the VMs on
-* `image_sha256` (default to `e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab`): Expected SHA256 checksum of the downloaded image
+* `image_compression` (default to `xz`): Source QCOW2 compression (only xz supported at this time)
+* `image_sha256` (default to `9461006300d65172f5668d8875f2aad7b54f7ba4e9c5435d65a84a5a2d66e39b`): Expected SHA256 checksum of the downloaded image
 * `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible`
 * `skip_image_download` (default to `no`): Skip QCOW2 image download. This requires the `image_name` QCOW2 image to be already present in `$HOME/libvirt-storage-pool-openshift-ansible`
 
 
 Creating a cluster
 Creating a cluster

+ 1 - 1
README_origin.md

@@ -15,7 +15,7 @@
   * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
   * There is currently a known issue with ansible-1.9.0, you can downgrade to 1.8.4 on Fedora by installing one of the builds from Koji: http://koji.fedoraproject.org/koji/packageinfo?packageID=13842
   * Available in Fedora channels
   * Available in Fedora channels
   * Available for EL with EPEL and Optional channel
   * Available for EL with EPEL and Optional channel
-* One or more RHEL 7.1 or CentOS 7.1 VMs
+* One or more RHEL 7.1+, CentOS 7.1+, or Fedora 23+ VMs
 * Either ssh key based auth for the root user or ssh key based auth for a user
 * Either ssh key based auth for the root user or ssh key based auth for a user
   with sudo access (no password)
   with sudo access (no password)
 * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/
 * A checkout of openshift-ansible from https://github.com/openshift/openshift-ansible/

+ 6 - 6
bin/openshift_ansible/awsutil.py

@@ -58,7 +58,7 @@ class AwsUtil(object):
 
 
     def get_environments(self):
     def get_environments(self):
         """Searches for env tags in the inventory and returns all of the envs found."""
         """Searches for env tags in the inventory and returns all of the envs found."""
-        pattern = re.compile(r'^tag_environment_(.*)')
+        pattern = re.compile(r'^tag_env_(.*)')
 
 
         envs = []
         envs = []
         inv = self.get_inventory()
         inv = self.get_inventory()
@@ -106,13 +106,13 @@ class AwsUtil(object):
         inst_by_env = {}
         inst_by_env = {}
         for _, host in inv['_meta']['hostvars'].items():
         for _, host in inv['_meta']['hostvars'].items():
             # If you don't have an environment tag, we're going to ignore you
             # If you don't have an environment tag, we're going to ignore you
-            if 'ec2_tag_environment' not in host:
+            if 'ec2_tag_env' not in host:
                 continue
                 continue
 
 
-            if host['ec2_tag_environment'] not in inst_by_env:
-                inst_by_env[host['ec2_tag_environment']] = {}
+            if host['ec2_tag_env'] not in inst_by_env:
+                inst_by_env[host['ec2_tag_env']] = {}
             host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
             host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
-            inst_by_env[host['ec2_tag_environment']][host_id] = host
+            inst_by_env[host['ec2_tag_env']][host_id] = host
 
 
         return inst_by_env
         return inst_by_env
 
 
@@ -154,7 +154,7 @@ class AwsUtil(object):
     def gen_env_tag(env):
     def gen_env_tag(env):
         """Generate the environment tag
         """Generate the environment tag
         """
         """
-        return "tag_environment_%s" % env
+        return "tag_env_%s" % env
 
 
     def gen_host_type_tag(self, host_type):
     def gen_host_type_tag(self, host_type):
         """Generate the host type tag
         """Generate the host type tag

+ 4 - 4
bin/oscp

@@ -138,7 +138,7 @@ class Oscp(object):
 
 
         # attempt to select the correct environment if specified
         # attempt to select the correct environment if specified
         if self.env:
         if self.env:
-            results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results)
+            results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
 
 
         if results:
         if results:
             return results
             return results
@@ -167,7 +167,7 @@ class Oscp(object):
                     name = server_info['ec2_tag_Name']
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
 
                 if limit:
                 if limit:
                     print
                     print
@@ -180,7 +180,7 @@ class Oscp(object):
                     name = server_info['ec2_tag_Name']
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
 
     def scp(self):
     def scp(self):
         '''scp files to or from a specified host
         '''scp files to or from a specified host
@@ -209,7 +209,7 @@ class Oscp(object):
             if len(results) > 1:
             if len(results) > 1:
                 print "Multiple results found for %s." % self.host
                 print "Multiple results found for %s." % self.host
                 for result in results:
                 for result in results:
-                    print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1])
+                    print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
                 return # early exit, too many results
                 return # early exit, too many results
 
 
             # Assume we have one and only one.
             # Assume we have one and only one.

+ 4 - 4
bin/ossh

@@ -127,7 +127,7 @@ class Ossh(object):
 
 
         # attempt to select the correct environment if specified
         # attempt to select the correct environment if specified
         if self.env:
         if self.env:
-            results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results)
+            results = filter(lambda result: result[1]['ec2_tag_env'] == self.env, results)
 
 
         if results:
         if results:
             return results
             return results
@@ -156,7 +156,7 @@ class Ossh(object):
                     name = server_info['ec2_tag_Name']
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
 
                 if limit:
                 if limit:
                     print
                     print
@@ -169,7 +169,7 @@ class Ossh(object):
                     name = server_info['ec2_tag_Name']
                     name = server_info['ec2_tag_Name']
                     ec2_id = server_info['ec2_id']
                     ec2_id = server_info['ec2_id']
                     ip = server_info['ec2_ip_address']
                     ip = server_info['ec2_ip_address']
-                    print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
+                    print '{ec2_tag_Name:<35} {ec2_tag_env:<8} {ec2_id:<15} {ec2_ip_address:<18} {ec2_private_ip_address}'.format(**server_info)
 
 
     def ssh(self):
     def ssh(self):
         '''SSH to a specified host
         '''SSH to a specified host
@@ -195,7 +195,7 @@ class Ossh(object):
             if len(results) > 1:
             if len(results) > 1:
                 print "Multiple results found for %s." % self.host
                 print "Multiple results found for %s." % self.host
                 for result in results:
                 for result in results:
-                    print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1])
+                    print "{ec2_tag_Name:<35} {ec2_tag_env:<5} {ec2_id:<10}".format(**result[1])
                 return # early exit, too many results
                 return # early exit, too many results
 
 
             # Assume we have one and only one.
             # Assume we have one and only one.

+ 3 - 3
bin/ossh_bash_completion

@@ -1,12 +1,12 @@
 __ossh_known_hosts(){
 __ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
     if python -c 'import openshift_ansible' &>/dev/null; then
-      /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+      /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
 
 
     elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
     elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
-      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
 
 
     elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
     elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
 
 
     fi
     fi
 }
 }

+ 3 - 3
bin/ossh_zsh_completion

@@ -2,13 +2,13 @@
 
 
 _ossh_known_hosts(){
 _ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
     if python -c 'import openshift_ansible' &>/dev/null; then
-      print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+      print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
 
 
     elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
     elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
 
 
     elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
     elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])')
+      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
 
 
     fi
     fi
 
 

+ 1 - 1
bin/zsh_functions/_ossh

@@ -2,7 +2,7 @@
 
 
 _ossh_known_hosts(){
 _ossh_known_hosts(){
   if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
   if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
-    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
+    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items()])')
   fi
   fi
 }
 }
 
 

+ 47 - 0
docs/best_practices_guide.adoc

@@ -466,3 +466,50 @@ If you want to use default with variables that evaluate to false you have to set
 In other words, normally the `default` filter will only replace the value if it's undefined. By setting the second parameter to `true`, it will also replace the value if it defaults to a false value in python, so None, empty list, empty string, etc.
 In other words, normally the `default` filter will only replace the value if it's undefined. By setting the second parameter to `true`, it will also replace the value if it defaults to a false value in python, so None, empty list, empty string, etc.
 
 
 This is almost always more desirable than an empty list, string, etc.
 This is almost always more desirable than an empty list, string, etc.
+
+=== Yum and DNF
+'''
+[cols="2v,v"]
+|===
+| **Rule**
+| Package installation MUST use ansible action module to abstract away dnf/yum.
+| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
+|===
+[cols="2v,v"]
+|===
+| **Rule**
+| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
+|===
+
+This is done primarily because if you're registering the result of the
+installation and you have two conditional tasks based on whether or not yum or
+dnf are in use you'll end up inadvertently overwriting the value. It also
+reduces duplication. name= and state=present are common between dnf and yum
+modules.
+
+.Bad:
+[source,yaml]
+----
+---
+# tasks.yml
+- name: Install etcd (for etcdctl)
+  yum: name=etcd state=latest"
+  when: "ansible_pkg_mgr == yum"
+  register: install_result
+
+- name: Install etcd (for etcdctl)
+  dnf: name=etcd state=latest"
+  when: "ansible_pkg_mgr == dnf"
+  register: install_result
+----
+
+
+.Good:
+[source,yaml]
+----
+---
+# tasks.yml
+- name: Install etcd (for etcdctl)
+  action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+  register: install_result
+  ----

+ 1 - 0
filter_plugins/oo_filters.py

@@ -433,6 +433,7 @@ class FilterModule(object):
             '''
             '''
             for tag in tags:
             for tag in tags:
                 # Skip tag_env-host-type to avoid ambiguity with tag_env
                 # Skip tag_env-host-type to avoid ambiguity with tag_env
+                # Removing env-host-type tag but leaving this here
                 if tag[:17] == 'tag_env-host-type':
                 if tag[:17] == 'tag_env-host-type':
                     continue
                     continue
                 if tag[:len(key)+4] == 'tag_' + key:
                 if tag[:len(key)+4] == 'tag_' + key:

+ 3 - 0
inventory/byo/hosts.aep.example

@@ -21,6 +21,9 @@ ansible_ssh_user=root
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 deployment_type=atomic-enterprise
 deployment_type=atomic-enterprise
 
 
+# Install the openshift examples
+#openshift_install_examples=true
+
 # Enable cluster metrics
 # Enable cluster metrics
 #use_cluster_metrics=true
 #use_cluster_metrics=true
 
 

+ 3 - 0
inventory/byo/hosts.origin.example

@@ -21,6 +21,9 @@ ansible_ssh_user=root
 # deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
 # deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
 deployment_type=origin
 deployment_type=origin
 
 
+# Install the openshift examples
+#openshift_install_examples=true
+
 # Enable cluster metrics
 # Enable cluster metrics
 #use_cluster_metrics=true
 #use_cluster_metrics=true
 
 

+ 3 - 0
inventory/byo/hosts.ose.example

@@ -21,6 +21,9 @@ ansible_ssh_user=root
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 deployment_type=openshift-enterprise
 deployment_type=openshift-enterprise
 
 
+# Install the openshift examples
+#openshift_install_examples=true
+
 # Enable cluster metrics
 # Enable cluster metrics
 #use_cluster_metrics=true
 #use_cluster_metrics=true
 
 

+ 17 - 1
openshift-ansible.spec

@@ -5,7 +5,7 @@
 }
 }
 
 
 Name:           openshift-ansible
 Name:           openshift-ansible
-Version:        3.0.19
+Version:        3.0.20
 Release:        1%{?dist}
 Release:        1%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
 License:        ASL 2.0
@@ -259,6 +259,22 @@ Atomic OpenShift Utilities includes
 
 
 
 
 %changelog
 %changelog
+* Thu Dec 10 2015 Thomas Wiest <twiest@redhat.com> 3.0.20-1
+- Revert "Automatic commit of package [openshift-ansible] release [3.0.20-1]."
+  (twiest@redhat.com)
+- Automatic commit of package [openshift-ansible] release [3.0.20-1].
+  (twiest@redhat.com)
+- Install base package in openshift_common for version facts
+  (abutcher@redhat.com)
+- Make the install of openshift_examples optional (jtslear@gmail.com)
+- add support for remote command actions no support for anything but custom
+  scripts at this time (jdiaz@redhat.com)
+- Remove yum / dnf duplication (sdodson@redhat.com)
+- Remove hacluster user during uninstall. (abutcher@redhat.com)
+- Simplify session secrets overrides. (abutcher@redhat.com)
+- Squash pcs install into one task. (abutcher@redhat.com)
+- Bump ansible requirement to 1.9.4 (sdodson@redhat.com)
+
 * Wed Dec 09 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.19-1
 * Wed Dec 09 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.19-1
 - Fix version dependent image streams (sdodson@redhat.com)
 - Fix version dependent image streams (sdodson@redhat.com)
 - atomic-openshift-installer: Error handling on yaml loading
 - atomic-openshift-installer: Error handling on yaml loading

+ 11 - 9
playbooks/adhoc/create_pv/create_pv.yaml

@@ -1,20 +1,22 @@
 ---
 ---
-#example run: 
+#example run:
 # ansible-playbook -e "cli_volume_size=1" \
 # ansible-playbook -e "cli_volume_size=1" \
 #                  -e "cli_device_name=/dev/xvdf" \
 #                  -e "cli_device_name=/dev/xvdf" \
 #                  -e "cli_hosttype=master" \
 #                  -e "cli_hosttype=master" \
-#                  -e "cli_environment=ops" \
+#                  -e "cli_env=ops" \
 #                  create_pv.yaml
 #                  create_pv.yaml
-# FIXME: we need to change "environment" to "clusterid" as that's what it really is now.
+# FIXME: we need to change "env" to "clusterid" as that's what it really is now.
 #
 #
 - name: Create a volume and attach it to master
 - name: Create a volume and attach it to master
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars:
   vars:
     cli_volume_type: gp2
     cli_volume_type: gp2
     cli_volume_iops: ''
     cli_volume_iops: ''
     oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
     oo_name: "{{ groups['tag_host-type_' ~ cli_hosttype] |
-                 intersect(groups['tag_environment_' ~ cli_environment]) |
+                 intersect(groups['tag_env_' ~ cli_env]) |
                  first }}"
                  first }}"
   pre_tasks:
   pre_tasks:
   - fail:
   - fail:
@@ -24,7 +26,7 @@
     - cli_volume_size
     - cli_volume_size
     - cli_device_name
     - cli_device_name
     - cli_hosttype
     - cli_hosttype
-    - cli_environment
+    - cli_env
 
 
   - name: set oo_name fact
   - name: set oo_name fact
     set_fact:
     set_fact:
@@ -55,7 +57,7 @@
     args:
     args:
       tags:
       tags:
         Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
         Name: "pv-{{ hostvars[oo_name]['ec2_tag_Name'] }}"
-        env: "{{cli_environment}}"
+        env: "{{cli_env}}"
     register: voltags
     register: voltags
 
 
   - debug: var=voltags
   - debug: var=voltags
@@ -103,7 +105,7 @@
     filesystem:
     filesystem:
       dev: "{{ cli_device_name }}"
       dev: "{{ cli_device_name }}"
       fstype: ext4
       fstype: ext4
-    
+
   - name: Mount the dev
   - name: Mount the dev
     mount:
     mount:
       name: "{{ pv_mntdir }}"
       name: "{{ pv_mntdir }}"
@@ -112,7 +114,7 @@
       state: mounted
       state: mounted
 
 
   - name: chgrp g+rwXs
   - name: chgrp g+rwXs
-    file: 
+    file:
       path: "{{ pv_mntdir }}"
       path: "{{ pv_mntdir }}"
       mode: 'g+rwXs'
       mode: 'g+rwXs'
       recurse: yes
       recurse: yes
@@ -154,6 +156,6 @@
 
 
   - debug: var=oc_output
   - debug: var=oc_output
 
 
-  - fail: 
+  - fail:
       msg: "Failed to add {{ pv_template }} to master."
       msg: "Failed to add {{ pv_template }} to master."
     when: oc_output.rc != 0
     when: oc_output.rc != 0

+ 1 - 1
playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml

@@ -113,7 +113,7 @@
     args:
     args:
       tags:
       tags:
         Name: "{{ ec2_tag_Name }}"
         Name: "{{ ec2_tag_Name }}"
-        env: "{{ ec2_tag_environment }}"
+        env: "{{ ec2_tag_env}}"
     register: voltags
     register: voltags
 
 
   - name: Wait for volume to attach
   - name: Wait for volume to attach

+ 1 - 1
playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml

@@ -151,7 +151,7 @@
     args:
     args:
       tags:
       tags:
         Name: "{{ ec2_tag_Name }}"
         Name: "{{ ec2_tag_Name }}"
-        env: "{{ ec2_tag_environment }}"
+        env: "{{ ec2_tag_env }}"
     register: voltags
     register: voltags
 
 
   - name: check for attached drive
   - name: check for attached drive

+ 4 - 0
playbooks/adhoc/noc/create_host.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: 'Create a host object in zabbix'
 - name: 'Create a host object in zabbix'
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   roles:
   roles:
     - os_zabbix
     - os_zabbix
@@ -23,6 +25,8 @@
 #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
 #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
 - name: 'Create a host object in zabbix'
 - name: 'Create a host object in zabbix'
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   roles:
   roles:
     - os_zabbix
     - os_zabbix

+ 2 - 0
playbooks/adhoc/noc/create_maintenance.yml

@@ -2,6 +2,8 @@
 #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
 #ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
 - name: 'Create a maintenace object in zabbix'
 - name: 'Create a maintenace object in zabbix'
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   roles:
   roles:
     - os_zabbix
     - os_zabbix

+ 2 - 0
playbooks/adhoc/noc/get_zabbix_problems.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: 'Get current hosts who have triggers that are alerting by trigger description'
 - name: 'Get current hosts who have triggers that are alerting by trigger description'
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   roles:
   roles:
     - os_zabbix
     - os_zabbix

+ 1 - 1
playbooks/adhoc/s3_registry/s3_registry.yml

@@ -6,7 +6,7 @@
 # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
 # The AWS access/secret keys should be the keys of a separate user (not your main user), containing only the necessary S3 access role.
 # The 'clusterid' is the short name of your cluster.
 # The 'clusterid' is the short name of your cluster.
 
 
-- hosts: tag_env-host-type_{{ clusterid }}-openshift-master
+- hosts: tag_env_{{ clusterid }}:&tag_host-type_openshift-master
   remote_user: root
   remote_user: root
   gather_facts: False
   gather_facts: False
 
 

+ 2 - 34
playbooks/adhoc/uninstall.yml

@@ -47,40 +47,8 @@
         - origin-node
         - origin-node
         - pcsd
         - pcsd
 
 
-    - yum: name={{ item }} state=absent
-      when: ansible_pkg_mgr == "yum" and not is_atomic | bool
-      with_items:
-        - atomic-enterprise
-        - atomic-enterprise-master
-        - atomic-enterprise-node
-        - atomic-enterprise-sdn-ovs
-        - atomic-openshift
-        - atomic-openshift-clients
-        - atomic-openshift-master
-        - atomic-openshift-node
-        - atomic-openshift-sdn-ovs
-        - corosync
-        - etcd
-        - openshift
-        - openshift-master
-        - openshift-node
-        - openshift-sdn
-        - openshift-sdn-ovs
-        - openvswitch
-        - origin
-        - origin-clients
-        - origin-master
-        - origin-node
-        - origin-sdn-ovs
-        - pacemaker
-        - pcs
-        - tuned-profiles-atomic-enterprise-node
-        - tuned-profiles-atomic-openshift-node
-        - tuned-profiles-openshift-node
-        - tuned-profiles-origin-node
-
-    - dnf: name={{ item }} state=absent
-      when: ansible_pkg_mgr == "dnf" and not is_atomic | bool
+    - action: "{{ ansible_pkg_mgr }} name={{ item }} state=absent"
+      when: not is_atomic | bool
       with_items:
       with_items:
         - atomic-enterprise
         - atomic-enterprise
         - atomic-enterprise-master
         - atomic-enterprise-master

+ 2 - 0
playbooks/adhoc/zabbix_setup/clean_zabbix.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - hosts: localhost
 - hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   vars:
   vars:
     g_server: http://localhost:8080/zabbix/api_jsonrpc.php
     g_server: http://localhost:8080/zabbix/api_jsonrpc.php
     g_user: ''
     g_user: ''

+ 2 - 0
playbooks/adhoc/zabbix_setup/oo-config-zaio.yml

@@ -2,6 +2,8 @@
 ---
 ---
 - hosts: localhost
 - hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   vars:
   vars:
     g_server: http://localhost/zabbix/api_jsonrpc.php
     g_server: http://localhost/zabbix/api_jsonrpc.php
     g_user: Admin
     g_user: Admin

+ 2 - 0
playbooks/aws/ansible-tower/config.yml

@@ -2,6 +2,8 @@
 - name: "populate oo_hosts_to_config host group if needed"
 - name: "populate oo_hosts_to_config host group if needed"
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   tasks:
   tasks:
   - name: Evaluate oo_host_group_exp if it's set
   - name: Evaluate oo_host_group_exp if it's set
     add_host: "name={{ item }} groups=oo_hosts_to_config"
     add_host: "name={{ item }} groups=oo_hosts_to_config"

+ 3 - 2
playbooks/aws/ansible-tower/launch.yml

@@ -2,6 +2,7 @@
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
+  become: no
   gather_facts: no
   gather_facts: no
 
 
   vars:
   vars:
@@ -71,8 +72,8 @@
 
 
   tasks:
   tasks:
 
 
-    - name: Yum update
-      yum: name=* state=latest
+    - name: Update All Things
+      action: "{{ ansible_pkg_mgr }} name=* state=latest"
 
 
 # Apply the configs, seprate so that just the configs can be run by themselves
 # Apply the configs, seprate so that just the configs can be run by themselves
 - include: config.yml
 - include: config.yml

+ 1 - 0
playbooks/aws/openshift-cluster/addNodes.yml

@@ -2,6 +2,7 @@
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml

+ 6 - 4
playbooks/aws/openshift-cluster/config.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - hosts: localhost
 - hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -10,10 +12,10 @@
 
 
 - include: ../../common/openshift-cluster/config.yml
 - include: ../../common/openshift-cluster/config.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
-    g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
-    g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
-    g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_nodeonmaster: true
     g_nodeonmaster: true

+ 1 - 0
playbooks/aws/openshift-cluster/launch.yml

@@ -2,6 +2,7 @@
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml

+ 2 - 0
playbooks/aws/openshift-cluster/list.yml

@@ -2,6 +2,8 @@
 - name: Generate oo_list_hosts group
 - name: Generate oo_list_hosts group
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:

+ 6 - 4
playbooks/aws/openshift-cluster/scaleup.yml

@@ -2,6 +2,8 @@
 
 
 - hosts: localhost
 - hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -20,10 +22,10 @@
 
 
 - include: ../../common/openshift-cluster/scaleup.yml
 - include: ../../common/openshift-cluster/scaleup.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
-    g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
-    g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
-    g_new_nodes_group: 'nodes_to_add'
+    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_new_node_hosts: "{{ groups.nodes_to_add }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_nodeonmaster: true
     g_nodeonmaster: true

+ 4 - 2
playbooks/aws/openshift-cluster/service.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Call same systemctl command for openshift on all instance(s)
 - name: Call same systemctl command for openshift on all instance(s)
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -14,7 +16,7 @@
       groups: g_service_masters
       groups: g_service_masters
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([])
+    with_items: "{{ g_master_hosts | default([]) }}"
 
 
   - name: Evaluate g_service_nodes
   - name: Evaluate g_service_nodes
     add_host:
     add_host:
@@ -22,7 +24,7 @@
       groups: g_service_nodes
       groups: g_service_nodes
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([])
+    with_items: "{{ g_node_hosts | default([]) }}"
 
 
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-master/service.yml
 - include: ../../common/openshift-master/service.yml

+ 1 - 6
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -3,7 +3,6 @@
     created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
     created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}"
     docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
     docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}"
     env: "{{ cluster }}"
     env: "{{ cluster }}"
-    env_host_type: "{{ cluster }}-openshift-{{ type }}"
     host_type: "{{ type }}"
     host_type: "{{ type }}"
     sub_host_type: "{{ g_sub_host_type }}"
     sub_host_type: "{{ g_sub_host_type }}"
 
 
@@ -124,10 +123,8 @@
     wait: yes
     wait: yes
     instance_tags:
     instance_tags:
       created-by: "{{ created_by }}"
       created-by: "{{ created_by }}"
-      environment: "{{ env }}"
       env: "{{ env }}"
       env: "{{ env }}"
       host-type: "{{ host_type }}"
       host-type: "{{ host_type }}"
-      env-host-type: "{{ env_host_type }}"
       sub-host-type: "{{ sub_host_type }}"
       sub-host-type: "{{ sub_host_type }}"
     volumes: "{{ volumes }}"
     volumes: "{{ volumes }}"
   register: ec2
   register: ec2
@@ -142,9 +139,7 @@
       Name: "{{ item.0 }}"
       Name: "{{ item.0 }}"
 
 
 - set_fact:
 - set_fact:
-    instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }},
-                    tag_host-type_{{ host_type }}, tag_env-host-type_{{ env_host_type }},
-                    tag_sub-host-type_{{ sub_host_type }}"
+    instance_groups: "tag_created-by_{{ created_by }}, tag_env_{{ env }}, tag_host-type_{{ host_type }}, tag_sub-host-type_{{ sub_host_type }}"
 
 
 - set_fact:
 - set_fact:
     node_label:
     node_label:

+ 3 - 1
playbooks/aws/openshift-cluster/terminate.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -25,6 +27,7 @@
 - name: Terminate instances
 - name: Terminate instances
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars:
   vars:
     host_vars: "{{ hostvars
     host_vars: "{{ hostvars
@@ -36,7 +39,6 @@
         tags:
         tags:
           env: "{{ item['ec2_tag_env'] }}"
           env: "{{ item['ec2_tag_env'] }}"
           host-type: "{{ item['ec2_tag_host-type'] }}"
           host-type: "{{ item['ec2_tag_host-type'] }}"
-          env-host-type: "{{ item['ec2_tag_env-host-type'] }}"
           sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}"
           sub_host_type: "{{ item['ec2_tag_sub-host-type'] }}"
       with_items: host_vars
       with_items: host_vars
       when: "'oo_hosts_to_terminate' in groups"
       when: "'oo_hosts_to_terminate' in groups"

+ 10 - 5
playbooks/aws/openshift-cluster/update.yml

@@ -1,19 +1,24 @@
 ---
 ---
-- name: Populate oo_hosts_to_update group
+- name: Update - Populate oo_hosts_to_update group
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
+  vars:
+    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
+    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
-  - name: Evaluate oo_hosts_to_update
+  - name: Update - Evaluate oo_hosts_to_update
     add_host:
     add_host:
       name: "{{ item }}"
       name: "{{ item }}"
       groups: oo_hosts_to_update
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
-                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
-                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
+    with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
 
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 
 

+ 4 - 4
playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -19,10 +19,10 @@
 
 
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
-    g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
-    g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
-    g_nodes_group: "{{ tmp_nodes_group | default('') }}"
+    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_nodeonmaster: true
     g_nodeonmaster: true

+ 4 - 4
playbooks/byo/openshift-cluster/config.yml

@@ -1,10 +1,10 @@
 ---
 ---
 - include: ../../common/openshift-cluster/config.yml
 - include: ../../common/openshift-cluster/config.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'etcd' }}"
-    g_masters_group: "{{ 'masters' }}"
-    g_nodes_group: "{{ 'nodes' }}"
-    g_lb_group: "{{ 'lb' }}"
+    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+    g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_node_hosts: "{{ groups.nodes | default([]) }}"
+    g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_debug_level: 2
     openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 4 - 4
playbooks/byo/openshift-cluster/scaleup.yml

@@ -1,10 +1,10 @@
 ---
 ---
 - include: ../../common/openshift-cluster/scaleup.yml
 - include: ../../common/openshift-cluster/scaleup.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'etcd' }}"
-    g_masters_group: "{{ 'masters' }}"
-    g_new_nodes_group: "{{ 'new_nodes' }}"
-    g_lb_group: "{{ 'lb' }}"
+    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+    g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
+    g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_debug_level: 2
     openshift_debug_level: 2
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 4 - 4
playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml

@@ -1,9 +1,9 @@
 ---
 ---
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'etcd' }}"
-    g_masters_group: "{{ 'masters' }}"
-    g_nodes_group: "{{ 'nodes' }}"
-    g_lb_group: "{{ 'lb' }}"
+    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+    g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_node_hosts: "{{ groups.nodes | default([]) }}"
+    g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 4 - 4
playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -1,9 +1,9 @@
 ---
 ---
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
 - include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'etcd' }}"
-    g_masters_group: "{{ 'masters' }}"
-    g_nodes_group: "{{ 'nodes' }}"
-    g_lb_group: "{{ 'lb' }}"
+    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+    g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_node_hosts: "{{ groups.nodes | default([]) }}"
+    g_lb_hosts: "{{ groups.lb | default([]) }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_cluster_id: "{{ cluster_id | default('default') }}"
     openshift_deployment_type: "{{ deployment_type }}"
     openshift_deployment_type: "{{ deployment_type }}"

+ 23 - 21
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -1,23 +1,25 @@
 ---
 ---
 - name: Populate config host groups
 - name: Populate config host groups
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - fail:
   - fail:
-      msg: This playbook requires g_etcd_group to be set
-    when: g_etcd_group is not defined
+      msg: This playbook requires g_etcd_hosts to be set
+    when: g_etcd_hosts is not defined
 
 
   - fail:
   - fail:
-      msg: This playbook requires g_masters_group to be set
-    when: g_masters_group is not defined
+      msg: This playbook requires g_master_hosts to be set
+    when: g_master_hosts is not defined
 
 
   - fail:
   - fail:
-      msg: This playbook requires g_nodes_group or g_new_nodes_group to be set
-    when: g_nodes_group is not defined and g_new_nodes_group is not defined
+      msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
+    when: g_node_hosts is not defined and g_new_node_hosts is not defined
 
 
   - fail:
   - fail:
-      msg: This playbook requires g_lb_group to be set
-    when: g_lb_group is not defined
+      msg: This playbook requires g_lb_hosts to be set
+    when: g_lb_hosts is not defined
 
 
   - name: Evaluate oo_etcd_to_config
   - name: Evaluate oo_etcd_to_config
     add_host:
     add_host:
@@ -25,7 +27,7 @@
       groups: oo_etcd_to_config
       groups: oo_etcd_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_etcd_group] | default([])
+    with_items: "{{ g_etcd_hosts | default([]) }}"
 
 
   - name: Evaluate oo_masters_to_config
   - name: Evaluate oo_masters_to_config
     add_host:
     add_host:
@@ -33,11 +35,11 @@
       groups: oo_masters_to_config
       groups: oo_masters_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_masters_group] | default([])
+    with_items: "{{ g_master_hosts | default([]) }}"
 
 
-  # Use g_new_nodes_group if it exists otherwise g_nodes_group
+  # Use g_new_node_hosts if it exists otherwise g_node_hosts
   - set_fact:
   - set_fact:
-      g_nodes_to_config: "{{ g_new_nodes_group | default(g_nodes_group | default([])) }}"
+      g_node_hosts_to_config: "{{ g_new_node_hosts | default(g_node_hosts | default([])) }}"
 
 
   - name: Evaluate oo_nodes_to_config
   - name: Evaluate oo_nodes_to_config
     add_host:
     add_host:
@@ -45,32 +47,32 @@
       groups: oo_nodes_to_config
       groups: oo_nodes_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_nodes_to_config] | default([])
+    with_items: "{{ g_node_hosts_to_config | default([]) }}"
 
 
-  # Skip adding the master to oo_nodes_to_config when g_new_nodes_group is
+  # Skip adding the master to oo_nodes_to_config when g_new_node_hosts is
   - name: Evaluate oo_nodes_to_config
   - name: Evaluate oo_nodes_to_config
     add_host:
     add_host:
       name: "{{ item }}"
       name: "{{ item }}"
       groups: oo_nodes_to_config
       groups: oo_nodes_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_masters_group] | default([])
-    when: g_nodeonmaster | default(false) == true and g_new_nodes_group is not defined
+    with_items: "{{ g_master_hosts | default([]) }}"
+    when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined
 
 
   - name: Evaluate oo_first_etcd
   - name: Evaluate oo_first_etcd
     add_host:
     add_host:
-      name: "{{ groups[g_etcd_group][0] }}"
+      name: "{{ g_etcd_hosts[0] }}"
       groups: oo_first_etcd
       groups: oo_first_etcd
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-    when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
+    when: g_etcd_hosts|length > 0
 
 
   - name: Evaluate oo_first_master
   - name: Evaluate oo_first_master
     add_host:
     add_host:
-      name: "{{ groups[g_masters_group][0] }}"
+      name: "{{ g_master_hosts[0] }}"
       groups: oo_first_master
       groups: oo_first_master
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
-    when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
+    when: g_master_hosts|length > 0
 
 
   - name: Evaluate oo_lb_to_config
   - name: Evaluate oo_lb_to_config
     add_host:
     add_host:
@@ -78,4 +80,4 @@
       groups: oo_lb_to_config
       groups: oo_lb_to_config
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
       ansible_sudo: "{{ g_sudo | default(omit) }}"
-    with_items: groups[g_lb_group] | default([])
+    with_items: "{{ g_lb_hosts | default([]) }}"

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml

@@ -11,7 +11,7 @@
     openshift_version: "{{ openshift_pkg_version | default('') }}"
     openshift_version: "{{ openshift_pkg_version | default('') }}"
   tasks:
   tasks:
     - name: Upgrade master packages
     - name: Upgrade master packages
-      yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+      action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest"
     - name: Restart master services
     - name: Restart master services
       service: name="{{ openshift.common.service_type}}-master" state=restarted
       service: name="{{ openshift.common.service_type}}-master" state=restarted
 
 
@@ -21,7 +21,7 @@
     openshift_version: "{{ openshift_pkg_version | default('') }}"
     openshift_version: "{{ openshift_pkg_version | default('') }}"
   tasks:
   tasks:
     - name: Upgrade node packages
     - name: Upgrade node packages
-      yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+      action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest"
     - name: Restart node services
     - name: Restart node services
       service: name="{{ openshift.common.service_type }}-node" state=restarted
       service: name="{{ openshift.common.service_type }}-node" state=restarted
 
 

+ 23 - 13
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -12,6 +12,8 @@
 
 
 - name: Evaluate additional groups for upgrade
 - name: Evaluate additional groups for upgrade
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   tasks:
   tasks:
   - name: Evaluate etcd_hosts_to_backup
   - name: Evaluate etcd_hosts_to_backup
     add_host:
     add_host:
@@ -54,8 +56,8 @@
 - name: Verify upgrade can proceed
 - name: Verify upgrade can proceed
   hosts: oo_masters_to_config:oo_nodes_to_config
   hosts: oo_masters_to_config:oo_nodes_to_config
   tasks:
   tasks:
-  - name: Clean yum cache
-    command: yum clean all
+  - name: Clean package cache
+    command: "{{ ansible_pkg_mgr }} clean all"
 
 
   - set_fact:
   - set_fact:
       g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
       g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
@@ -87,6 +89,8 @@
 ##############################################################################
 ##############################################################################
 - name: Gate on pre-upgrade checks
 - name: Gate on pre-upgrade checks
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   vars:
   vars:
     pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
     pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
   tasks:
   tasks:
@@ -149,9 +153,7 @@
     when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
     when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
 
 
   - name: Install etcd (for etcdctl)
   - name: Install etcd (for etcdctl)
-    yum:
-      pkg: etcd
-      state: latest
+    action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
 
 
   - name: Generate etcd backup
   - name: Generate etcd backup
     command: >
     command: >
@@ -171,6 +173,8 @@
 ##############################################################################
 ##############################################################################
 - name: Gate on etcd backup
 - name: Gate on etcd backup
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   tasks:
   tasks:
   - set_fact:
   - set_fact:
       etcd_backup_completed: "{{ hostvars
       etcd_backup_completed: "{{ hostvars
@@ -189,6 +193,8 @@
 ###############################################################################
 ###############################################################################
 - name: Create temp directory for syncing certs
 - name: Create temp directory for syncing certs
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - name: Create local temp directory for syncing certs
   - name: Create local temp directory for syncing certs
@@ -222,17 +228,13 @@
     openshift_version: "{{ openshift_pkg_version | default('') }}"
     openshift_version: "{{ openshift_pkg_version | default('') }}"
   tasks:
   tasks:
   - name: Upgrade to latest available kernel
   - name: Upgrade to latest available kernel
-    yum:
-      pkg: kernel
-      state: latest
+    action: "{{ ansible_pkg_mgr}} name=kernel state=latest"
 
 
   - name: Upgrade master packages
   - name: Upgrade master packages
-    command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}
+    command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
 
 
   - name: Ensure python-yaml present for config upgrade
   - name: Ensure python-yaml present for config upgrade
-    yum:
-      pkg: PyYAML
-      state: installed
+    action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
 
 
   - name: Upgrade master configuration
   - name: Upgrade master configuration
     openshift_upgrade_config:
     openshift_upgrade_config:
@@ -339,6 +341,8 @@
 
 
 - name: Delete temporary directory on localhost
 - name: Delete temporary directory on localhost
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - file: name={{ g_master_mktemp.stdout }} state=absent
   - file: name={{ g_master_mktemp.stdout }} state=absent
@@ -357,6 +361,8 @@
 ##############################################################################
 ##############################################################################
 - name: Gate on master update
 - name: Gate on master update
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   tasks:
   tasks:
   - set_fact:
   - set_fact:
       master_update_completed: "{{ hostvars
       master_update_completed: "{{ hostvars
@@ -380,7 +386,7 @@
   - openshift_facts
   - openshift_facts
   tasks:
   tasks:
   - name: Upgrade node packages
   - name: Upgrade node packages
-    command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
+    command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
 
 
   - name: Restart node service
   - name: Restart node service
     service: name="{{ openshift.common.service_type }}-node" state=restarted
     service: name="{{ openshift.common.service_type }}-node" state=restarted
@@ -397,6 +403,8 @@
 ##############################################################################
 ##############################################################################
 - name: Gate on nodes update
 - name: Gate on nodes update
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   tasks:
   tasks:
   - set_fact:
   - set_fact:
       node_update_completed: "{{ hostvars
       node_update_completed: "{{ hostvars
@@ -464,6 +472,8 @@
 ##############################################################################
 ##############################################################################
 - name: Gate on reconcile
 - name: Gate on reconcile
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   tasks:
   tasks:
   - set_fact:
   - set_fact:
       reconcile_completed: "{{ hostvars
       reconcile_completed: "{{ hostvars

+ 2 - 2
playbooks/common/openshift-etcd/config.yml

@@ -33,7 +33,7 @@
 - name: Create temp directory for syncing certs
 - name: Create temp directory for syncing certs
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
-  sudo: false
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - name: Create local temp directory for syncing certs
   - name: Create local temp directory for syncing certs
@@ -92,7 +92,7 @@
 - name: Delete temporary directory on localhost
 - name: Delete temporary directory on localhost
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
-  sudo: false
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - file: name={{ g_etcd_mktemp.stdout }} state=absent
   - file: name={{ g_etcd_mktemp.stdout }} state=absent

+ 2 - 0
playbooks/common/openshift-etcd/service.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Populate g_service_masters host group if needed
 - name: Populate g_service_masters host group if needed
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - fail: msg="new_cluster_state is required to be injected in this playbook"
   - fail: msg="new_cluster_state is required to be injected in this playbook"

+ 8 - 5
playbooks/common/openshift-master/config.yml

@@ -70,7 +70,7 @@
 - name: Create temp directory for syncing certs
 - name: Create temp directory for syncing certs
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
-  sudo: false
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - name: Create local temp directory for syncing certs
   - name: Create local temp directory for syncing certs
@@ -207,7 +207,7 @@
 - name: Compute haproxy_backend_servers
 - name: Compute haproxy_backend_servers
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
-  sudo: false
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - set_fact:
   - set_fact:
@@ -245,7 +245,7 @@
       msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
       msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
     when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
     when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
   - name: Install OpenSSL package
   - name: Install OpenSSL package
-    action: "{{ansible_pkg_mgr}} pkg=openssl state=present"
+    action: "{{ ansible_pkg_mgr }} name=openssl state=present"
   - name: Generate session authentication key
   - name: Generate session authentication key
     command: /usr/bin/openssl rand -base64 24
     command: /usr/bin/openssl rand -base64 24
     register: session_auth_output
     register: session_auth_output
@@ -260,6 +260,8 @@
 
 
 - name: Parse named certificates
 - name: Parse named certificates
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   vars:
   vars:
     internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}"
     internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}"
     named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}"
     named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}"
@@ -341,7 +343,8 @@
   roles:
   roles:
   - role: openshift_master_cluster
   - role: openshift_master_cluster
     when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
     when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker"
-  - openshift_examples
+  - role: openshift_examples
+    when: openshift.common.install_examples | bool
   - role: openshift_cluster_metrics
   - role: openshift_cluster_metrics
     when: openshift.common.use_cluster_metrics | bool
     when: openshift.common.use_cluster_metrics | bool
   - role: openshift_manageiq
   - role: openshift_manageiq
@@ -374,7 +377,7 @@
 - name: Delete temporary directory on localhost
 - name: Delete temporary directory on localhost
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
-  sudo: false
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - file: name={{ g_master_mktemp.stdout }} state=absent
   - file: name={{ g_master_mktemp.stdout }} state=absent

+ 2 - 0
playbooks/common/openshift-master/service.yml

@@ -2,6 +2,8 @@
 - name: Populate g_service_masters host group if needed
 - name: Populate g_service_masters host group if needed
   hosts: localhost
   hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   tasks:
   tasks:
   - fail: msg="new_cluster_state is required to be injected in this playbook"
   - fail: msg="new_cluster_state is required to be injected in this playbook"
     when: new_cluster_state is not defined
     when: new_cluster_state is not defined

+ 2 - 2
playbooks/common/openshift-node/config.yml

@@ -58,7 +58,7 @@
 - name: Create temp directory for syncing certs
 - name: Create temp directory for syncing certs
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
-  sudo: false
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - name: Create local temp directory for syncing certs
   - name: Create local temp directory for syncing certs
@@ -191,7 +191,7 @@
 - name: Delete temporary directory on localhost
 - name: Delete temporary directory on localhost
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
-  sudo: false
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - file: name={{ mktemp.stdout }} state=absent
   - file: name={{ mktemp.stdout }} state=absent

+ 2 - 0
playbooks/common/openshift-node/service.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Populate g_service_nodes host group if needed
 - name: Populate g_service_nodes host group if needed
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   tasks:
   tasks:
   - fail: msg="new_cluster_state is required to be injected in this playbook"
   - fail: msg="new_cluster_state is required to be injected in this playbook"

+ 6 - 4
playbooks/gce/openshift-cluster/config.yml

@@ -4,6 +4,8 @@
 
 
 - hosts: localhost
 - hosts: localhost
   gather_facts: no
   gather_facts: no
+  connection: local
+  become: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -15,10 +17,10 @@
 
 
 - include: ../../common/openshift-cluster/config.yml
 - include: ../../common/openshift-cluster/config.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
-    g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
-    g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
-    g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
+    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([]))     | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
+    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_nodeonmaster: true
     g_nodeonmaster: true

+ 11 - 2
playbooks/gce/openshift-cluster/join_node.yml

@@ -1,7 +1,14 @@
 ---
 ---
 - name: Populate oo_hosts_to_update group
 - name: Populate oo_hosts_to_update group
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
+  vars:
+    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -16,6 +23,8 @@
 
 
 - name: Populate oo_masters_to_config host group
 - name: Populate oo_masters_to_config host group
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -29,11 +38,11 @@
 
 
   - name: Evaluate oo_first_master
   - name: Evaluate oo_first_master
     add_host:
     add_host:
-      name: "{{ groups['tag_env-host-type-' ~ cluster_id ~ '-openshift-master'][0] }}"
+      name: "{{ g_master_hosts | first }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       groups: oo_first_master
       groups: oo_first_master
-    when: "'tag_env-host-type-{{ cluster_id }}-openshift-master' in groups"
+    when: g_master_hosts is defined and g_master_hosts|length > 0
 
 
 #- include: config.yml
 #- include: config.yml
 - include: ../../common/openshift-node/config.yml
 - include: ../../common/openshift-node/config.yml

+ 1 - 0
playbooks/gce/openshift-cluster/launch.yml

@@ -2,6 +2,7 @@
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml

+ 2 - 0
playbooks/gce/openshift-cluster/list.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Generate oo_list_hosts group
 - name: Generate oo_list_hosts group
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml

+ 4 - 4
playbooks/gce/openshift-cluster/service.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Call same systemctl command for openshift on all instance(s)
 - name: Call same systemctl command for openshift on all instance(s)
   hosts: localhost
   hosts: localhost
+  connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -8,21 +10,19 @@
   - fail: msg="cluster_id is required to be injected in this playbook"
   - fail: msg="cluster_id is required to be injected in this playbook"
     when: cluster_id is not defined
     when: cluster_id is not defined
 
 
-  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-node
   - add_host:
   - add_host:
       name: "{{ item }}"
       name: "{{ item }}"
       groups: g_service_nodes
       groups: g_service_nodes
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+    with_items: "{{ g_node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
 
 
-  - set_fact: scratch_group=tag_env-host-type-{{ cluster_id }}-openshift-master
   - add_host:
   - add_host:
       name: "{{ item }}"
       name: "{{ item }}"
       groups: g_service_masters
       groups: g_service_masters
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups[scratch_group] | default([]) | difference(['localhost']) | difference(groups.status_terminated)
+    with_items: "{{ g_master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}"
 
 
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-master/service.yml
 - include: ../../common/openshift-master/service.yml

+ 0 - 1
playbooks/gce/openshift-cluster/tasks/launch_instances.yml

@@ -19,7 +19,6 @@
       - env-{{ cluster }}
       - env-{{ cluster }}
       - host-type-{{ type }}
       - host-type-{{ type }}
       - sub-host-type-{{ g_sub_host_type }}
       - sub-host-type-{{ g_sub_host_type }}
-      - env-host-type-{{ cluster }}-openshift-{{ type }}
   when: instances |length > 0
   when: instances |length > 0
   register: gce
   register: gce
 
 

+ 2 - 0
playbooks/gce/openshift-cluster/terminate.yml

@@ -2,6 +2,7 @@
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
   connection: local
   connection: local
+  become: no
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -27,6 +28,7 @@
 
 
 - name: Terminate instances(s)
 - name: Terminate instances(s)
   hosts: localhost
   hosts: localhost
+  become: no
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:

+ 8 - 3
playbooks/gce/openshift-cluster/update.yml

@@ -1,7 +1,14 @@
 ---
 ---
 - name: Populate oo_hosts_to_update group
 - name: Populate oo_hosts_to_update group
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
+  vars:
+    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -11,9 +18,7 @@
       groups: oo_hosts_to_update
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
-                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
-                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
+    with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
 
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 
 

+ 2 - 1
playbooks/gce/openshift-cluster/wip.yml

@@ -1,6 +1,7 @@
 ---
 ---
 - name: WIP
 - name: WIP
   hosts: localhost
   hosts: localhost
+  become: no
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
@@ -12,7 +13,7 @@
       groups: oo_masters_for_deploy
       groups: oo_masters_for_deploy
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user | default(ansible_ssh_user, true) }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+    with_items: "{{ g_master_hosts | default([]) }}"
 
 
 - name: Deploy OpenShift Services
 - name: Deploy OpenShift Services
   hosts: oo_masters_for_deploy
   hosts: oo_masters_for_deploy

+ 6 - 4
playbooks/libvirt/openshift-cluster/config.yml

@@ -5,6 +5,8 @@
 
 
 - hosts: localhost
 - hosts: localhost
   gather_facts: no
   gather_facts: no
+  become: no
+  connection: local
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -14,10 +16,10 @@
 
 
 - include: ../../common/openshift-cluster/config.yml
 - include: ../../common/openshift-cluster/config.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-etcd' }}"
-    g_lb_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-lb' }}"
-    g_masters_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-master' }}"
-    g_nodes_group: "{{ 'tag_env-host-type-' ~ cluster_id ~ '-openshift-node' }}"
+    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([]))     | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
+    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([]))   | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
     openshift_cluster_id: "{{ cluster_id }}"

+ 3 - 0
playbooks/libvirt/openshift-cluster/launch.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -11,6 +13,7 @@
     image_url: "{{ deployment_vars[deployment_type].image.url }}"
     image_url: "{{ deployment_vars[deployment_type].image.url }}"
     image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
     image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}"
     image_name: "{{ deployment_vars[deployment_type].image.name }}"
     image_name: "{{ deployment_vars[deployment_type].image.name }}"
+    image_compression: "{{ deployment_vars[deployment_type].image.compression }}"
   tasks:
   tasks:
   - fail: msg="Deployment type not supported for libvirt provider yet"
   - fail: msg="Deployment type not supported for libvirt provider yet"
     when: deployment_type == 'online'
     when: deployment_type == 'online'

+ 4 - 0
playbooks/libvirt/openshift-cluster/list.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Generate oo_list_hosts group
 - name: Generate oo_list_hosts group
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -21,6 +23,8 @@
 
 
 - name: List Hosts
 - name: List Hosts
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml

+ 4 - 2
playbooks/libvirt/openshift-cluster/service.yml

@@ -5,6 +5,8 @@
 
 
 - name: Call same systemctl command for openshift on all instance(s)
 - name: Call same systemctl command for openshift on all instance(s)
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -18,7 +20,7 @@
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       groups: g_service_masters
       groups: g_service_masters
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([])
+    with_items: "{{ g_master_hosts | default([]) }}"
 
 
   - name: Evaluate g_service_nodes
   - name: Evaluate g_service_nodes
     add_host:
     add_host:
@@ -26,7 +28,7 @@
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       groups: g_service_nodes
       groups: g_service_nodes
-    with_items: groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([])
+    with_items: "{{ g_node_hosts | default([]) }}"
 
 
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-node/service.yml
 - include: ../../common/openshift-master/service.yml
 - include: ../../common/openshift-master/service.yml

+ 9 - 2
playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

@@ -13,8 +13,15 @@
   get_url:
   get_url:
     url: '{{ image_url }}'
     url: '{{ image_url }}'
     sha256sum: '{{ image_sha256 }}'
     sha256sum: '{{ image_sha256 }}'
-    dest: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+    dest: '{{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
   when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
   when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
+  register: downloaded_image
+
+- name: Uncompress Base Cloud image
+  command: 'unxz -kf {{ os_libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}'
+  args:
+    creates: '{{ os_libvirt_storage_pool_path }}/{{ image_name }}'
+  when: image_compression in ["xz"] and downloaded_image.changed
 
 
 - name: Create the cloud-init config drive path
 - name: Create the cloud-init config drive path
   file:
   file:
@@ -81,7 +88,7 @@
     ansible_ssh_host: '{{ item.1 }}'
     ansible_ssh_host: '{{ item.1 }}'
     ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
     ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
     ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
     ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}'
+    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}'
   with_together:
   with_together:
     - instances
     - instances
     - ips
     - ips

+ 0 - 1
playbooks/libvirt/openshift-cluster/templates/domain.xml

@@ -4,7 +4,6 @@
   <metadata xmlns:ansible="https://github.com/ansible/ansible">
   <metadata xmlns:ansible="https://github.com/ansible/ansible">
     <ansible:tags>
     <ansible:tags>
       <ansible:tag>env-{{ cluster }}</ansible:tag>
       <ansible:tag>env-{{ cluster }}</ansible:tag>
-      <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
       <ansible:tag>host-type-{{ type }}</ansible:tag>
       <ansible:tag>host-type-{{ type }}</ansible:tag>
       <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
       <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
     </ansible:tags>
     </ansible:tags>

+ 4 - 0
playbooks/libvirt/openshift-cluster/terminate.yml

@@ -3,6 +3,8 @@
 
 
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -28,6 +30,8 @@
 
 
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml

+ 9 - 3
playbooks/libvirt/openshift-cluster/update.yml

@@ -1,7 +1,15 @@
 ---
 ---
 - name: Populate oo_hosts_to_update group
 - name: Populate oo_hosts_to_update group
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
+  vars:
+    g_etcd_hosts:   "{{ (groups['tag_host-type-etcd']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type-lb']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_master_hosts: "{{ (groups['tag_host-type-master']|default([])) | intersect(groups['tag_env-' ~ cluster_id]) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type-node']|default([])) | intersect((groups['tag_env-' ~ cluster_id]|default([]))) }}"
+
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -11,9 +19,7 @@
       groups: oo_hosts_to_update
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: (groups["tag_env-host-type-{{ cluster_id }}-openshift-master"] | default([]))
-                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-node"] | default([]))
-                | union(groups["tag_env-host-type-{{ cluster_id }}-openshift-etcd"] | default([]))
+    with_items: "{{ g_master_hosts  | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
 
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 
 

+ 4 - 2
playbooks/libvirt/openshift-cluster/vars.yml

@@ -8,11 +8,13 @@ deployment_vars:
   origin:
   origin:
     image:
     image:
       url:    "{{ lookup('oo_option', 'image_url') |
       url:    "{{ lookup('oo_option', 'image_url') |
-                  default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
+                  default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz', True) }}"
+      compression:   "{{ lookup('oo_option', 'image_compression') |
+                         default('xz', True) }}"
       name:   "{{ lookup('oo_option', 'image_name') |
       name:   "{{ lookup('oo_option', 'image_name') |
                   default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
                   default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
       sha256: "{{ lookup('oo_option', 'image_sha256') |
       sha256: "{{ lookup('oo_option', 'image_sha256') |
-                  default('e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab', True) }}"
+                  default('9461006300d65172f5668d8875f2aad7b54f7ba4e9c5435d65a84a5a2d66e39b', True) }}"
     ssh_user: openshift
     ssh_user: openshift
     sudo: yes
     sudo: yes
   online:
   online:

+ 6 - 4
playbooks/openstack/openshift-cluster/config.yml

@@ -1,5 +1,7 @@
 - hosts: localhost
 - hosts: localhost
   gather_facts: no
   gather_facts: no
+  become: no
+  connection: local
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -9,10 +11,10 @@
 
 
 - include: ../../common/openshift-cluster/config.yml
 - include: ../../common/openshift-cluster/config.yml
   vars:
   vars:
-    g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
-    g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
-    g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
-    g_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([]))     | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([]))   | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
     openshift_cluster_id: "{{ cluster_id }}"
     openshift_cluster_id: "{{ cluster_id }}"

+ 0 - 6
playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml

@@ -107,12 +107,6 @@ resources:
       metadata:
       metadata:
         env: { get_param: cluster_id }
         env: { get_param: cluster_id }
         host-type: { get_param: type }
         host-type: { get_param: type }
-        env-host-type:
-          str_replace:
-            template: cluster_id-openshift-type
-            params:
-              cluster_id: { get_param: cluster_id }
-              type:       { get_param: type }
         sub-host-type:    { get_param: subtype }
         sub-host-type:    { get_param: subtype }
 
 
   port:
   port:

+ 5 - 4
playbooks/openstack/openshift-cluster/launch.yml

@@ -1,6 +1,7 @@
 ---
 ---
 - name: Launch instance(s)
 - name: Launch instance(s)
   hosts: localhost
   hosts: localhost
+  become: no
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
@@ -70,7 +71,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_etcd, tag_env-host-type_{{ cluster_id }}-openshift-etcd, tag_sub-host-type_default'
+      groups: 'tag_env_{{ cluster_id }}, tag_host-type_etcd, tag_sub-host-type_default'
     with_together:
     with_together:
       - parsed_outputs.etcd_names
       - parsed_outputs.etcd_names
       - parsed_outputs.etcd_ips
       - parsed_outputs.etcd_ips
@@ -82,7 +83,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_env-host-type_{{ cluster_id }}-openshift-master, tag_sub-host-type_default'
+      groups: 'tag_env_{{ cluster_id }}, tag_host-type_master, tag_sub-host-type_default'
     with_together:
     with_together:
       - parsed_outputs.master_names
       - parsed_outputs.master_names
       - parsed_outputs.master_ips
       - parsed_outputs.master_ips
@@ -94,7 +95,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_compute'
+      groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_sub-host-type_compute'
     with_together:
     with_together:
       - parsed_outputs.node_names
       - parsed_outputs.node_names
       - parsed_outputs.node_ips
       - parsed_outputs.node_ips
@@ -106,7 +107,7 @@
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_host: '{{ item[2] }}'
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-      groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_env-host-type_{{ cluster_id }}-openshift-node, tag_sub-host-type_infra'
+      groups: 'tag_env_{{ cluster_id }}, tag_host-type_node, tag_sub-host-type_infra'
     with_together:
     with_together:
       - parsed_outputs.infra_names
       - parsed_outputs.infra_names
       - parsed_outputs.infra_ips
       - parsed_outputs.infra_ips

+ 4 - 0
playbooks/openstack/openshift-cluster/list.yml

@@ -1,6 +1,8 @@
 ---
 ---
 - name: Generate oo_list_hosts group
 - name: Generate oo_list_hosts group
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
@@ -22,6 +24,8 @@
 
 
 - name: List Hosts
 - name: List Hosts
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml

+ 2 - 0
playbooks/openstack/openshift-cluster/terminate.yml

@@ -1,5 +1,6 @@
 - name: Terminate instance(s)
 - name: Terminate instance(s)
   hosts: localhost
   hosts: localhost
+  become: no
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:
@@ -25,6 +26,7 @@
             default('no', True) | lower in ['no', 'false']
             default('no', True) | lower in ['no', 'false']
 
 
 - hosts: localhost
 - hosts: localhost
+  become: no
   connection: local
   connection: local
   gather_facts: no
   gather_facts: no
   vars_files:
   vars_files:

+ 9 - 3
playbooks/openstack/openshift-cluster/update.yml

@@ -1,7 +1,15 @@
 ---
 ---
 - name: Populate oo_hosts_to_update group
 - name: Populate oo_hosts_to_update group
   hosts: localhost
   hosts: localhost
+  become: no
+  connection: local
   gather_facts: no
   gather_facts: no
+  vars:
+    g_etcd_hosts:   "{{ (groups['tag_host-type_etcd']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
+    g_lb_hosts:     "{{ (groups['tag_host-type_lb']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
+    g_master_hosts: "{{ (groups['tag_host-type_master']|default([])) | intersect(groups['tag_env_' ~ cluster_id]) }}"
+    g_node_hosts:   "{{ (groups['tag_host-type_node']|default([])) | intersect((groups['tag_env_' ~ cluster_id]|default([]))) }}"
+
   vars_files:
   vars_files:
   - vars.yml
   - vars.yml
   tasks:
   tasks:
@@ -11,9 +19,7 @@
       groups: oo_hosts_to_update
       groups: oo_hosts_to_update
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
       ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    with_items: (groups["tag_env-host-type_{{ cluster_id }}-openshift-master"] | default([]))
-                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-node"] | default([]))
-                | union(groups["tag_env-host-type_{{ cluster_id }}-openshift-etcd"] | default([]))
+    with_items: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) | default([]) }}"
 
 
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 - include: ../../common/openshift-cluster/update_repos_and_packages.yml
 
 

+ 1 - 10
roles/ansible/tasks/main.yml

@@ -2,16 +2,7 @@
 # Install ansible client
 # Install ansible client
 
 
 - name: Install Ansible
 - name: Install Ansible
-  yum:
-    pkg: ansible
-    state: installed
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install Ansible
-  dnf:
-    pkg: ansible
-    state: installed
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name=ansible state=present"
 
 
 - include: config.yml
 - include: config.yml
   vars:
   vars:

+ 1 - 1
roles/ansible_tower/tasks/main.yaml

@@ -1,6 +1,6 @@
 ---
 ---
 - name: install some useful packages
 - name: install some useful packages
-  yum: name={{ item }}
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
   with_items:
   with_items:
   - git
   - git
   - python-pip
   - python-pip

+ 1 - 1
roles/ansible_tower_cli/tasks/main.yml

@@ -1,6 +1,6 @@
 ---
 ---
 - name: Install python-ansible-tower-cli
 - name: Install python-ansible-tower-cli
-  yum: name=python-ansible-tower-cli
+  action: "{{ ansible_pkg_mgr }} name=python-ansible-tower-cli state=present"
 
 
 - template:
 - template:
     src: tower_cli.cfg.j2
     src: tower_cli.cfg.j2

+ 1 - 15
roles/cockpit/tasks/main.yml

@@ -1,25 +1,11 @@
 ---
 ---
 - name: Install cockpit-ws
 - name: Install cockpit-ws
-  yum:
-    name: "{{ item }}"
-    state: present
+  action: "{{ ansible_pkg_mgr }} name={{ item }} state=present"
   with_items:
   with_items:
     - cockpit-ws
     - cockpit-ws
     - cockpit-shell
     - cockpit-shell
     - cockpit-bridge
     - cockpit-bridge
     - "{{ cockpit_plugins }}"
     - "{{ cockpit_plugins }}"
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install cockpit-ws
-  dnf:
-    name: "{{ item }}"
-    state: present
-  with_items:
-    - cockpit-ws
-    - cockpit-shell
-    - cockpit-bridge
-    - "{{ cockpit_plugins }}"
-  when: ansible_pkg_mgr == "dnf"
 
 
 - name: Enable cockpit-ws
 - name: Enable cockpit-ws
   service:
   service:

+ 1 - 9
roles/copr_cli/tasks/main.yml

@@ -1,10 +1,2 @@
 ---
 ---
-- yum:
-    name: copr-cli
-    state: present
-  when: ansible_pkg_mgr == "yum"
-
-- dnf:
-    name: copr-cli
-    state: present
-  when: ansible_pkg_mgr == "dnf"
+- action: "{{ ansible_pkg_mgr }} name=copr-cli state=present"

+ 2 - 7
roles/docker/tasks/main.yml

@@ -1,13 +1,8 @@
 ---
 ---
 # tasks file for docker
 # tasks file for docker
 - name: Install docker
 - name: Install docker
-  yum: pkg=docker
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install docker
-  dnf: pkg=docker
-  when: ansible_pkg_mgr == "dnf"
-
+  action: "{{ ansible_pkg_mgr }} name=docker state=present"
+  
 - name: enable and start the docker service
 - name: enable and start the docker service
   service: name=docker enabled=yes state=started
   service: name=docker enabled=yes state=started
 
 

+ 1 - 6
roles/etcd/tasks/main.yml

@@ -8,12 +8,7 @@
   when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
   when: "'ipv4' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface] or 'address' not in hostvars[inventory_hostname]['ansible_' ~ etcd_interface].ipv4"
 
 
 - name: Install etcd
 - name: Install etcd
-  yum: pkg=etcd-2.* state=present
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install etcd
-  dnf: pkg=etcd* state=present
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present"
 
 
 - name: Validate permissions on the config dir
 - name: Validate permissions on the config dir
   file:
   file:

+ 1 - 7
roles/flannel/tasks/main.yml

@@ -1,13 +1,7 @@
 ---
 ---
 - name: Install flannel
 - name: Install flannel
   sudo: true
   sudo: true
-  yum: pkg=flannel state=present
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install flannel
-  sudo: true
-  dnf: pkg=flannel state=present
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name=flannel state=present"
 
 
 - name: Set flannel etcd url
 - name: Set flannel etcd url
   sudo: true
   sudo: true

+ 1 - 10
roles/fluentd_master/tasks/main.yml

@@ -1,16 +1,7 @@
 ---
 ---
 # TODO: Update fluentd install and configuration when packaging is complete
 # TODO: Update fluentd install and configuration when packaging is complete
 - name: download and install td-agent
 - name: download and install td-agent
-  yum:
-    name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
-    state: present
-  when: ansible_pkg_mgr == "yum"
-
-- name: download and install td-agent
-  dnf:
-    name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
-    state: present
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
 
 
 - name: Verify fluentd plugin installed
 - name: Verify fluentd plugin installed
   command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
   command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'

+ 1 - 10
roles/fluentd_node/tasks/main.yml

@@ -1,16 +1,7 @@
 ---
 ---
 # TODO: Update fluentd install and configuration when packaging is complete
 # TODO: Update fluentd install and configuration when packaging is complete
 - name: download and install td-agent
 - name: download and install td-agent
-  yum:
-    name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
-    state: present
-  when: ansible_pkg_mgr == "yum"
-
-- name: download and install td-agent
-  dnf:
-    name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
-    state: present
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name='http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm' state=present"
 
 
 - name: Verify fluentd plugin installed
 - name: Verify fluentd plugin installed
   command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
   command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'

+ 1 - 10
roles/haproxy/tasks/main.yml

@@ -1,15 +1,6 @@
 ---
 ---
 - name: Install haproxy
 - name: Install haproxy
-  yum:
-    pkg: haproxy
-    state: present
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install haproxy
-  dnf:
-    pkg: haproxy
-    state: present
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name=haproxy state=present"
 
 
 - name: Configure haproxy
 - name: Configure haproxy
   template:
   template:

+ 1 - 6
roles/kube_nfs_volumes/tasks/main.yml

@@ -1,11 +1,6 @@
 ---
 ---
 - name: Install pyparted (RedHat/Fedora)
 - name: Install pyparted (RedHat/Fedora)
-  yum: name=pyparted,python-httplib2 state=present
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install pyparted (RedHat/Fedora)
-  dnf: name=pyparted,python-httplib2 state=present
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name=pyparted,python-httplib2 state=present"
 
 
 - name: partition the drives
 - name: partition the drives
   partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}
   partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}

+ 2 - 7
roles/kube_nfs_volumes/tasks/nfs.yml

@@ -1,11 +1,6 @@
 ---
 ---
-- name: Install NFS server on Fedora/Red Hat
-  yum: name=nfs-utils state=present
-  when: ansible_pkg_mgr == "yum"
-
-- name: Install NFS server on Fedora/Red Hat
-  dnf: name=nfs-utils state=present
-  when: ansible_pkg_mgr == "dnf"
+- name: Install NFS server
+  action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
 
 
 - name: Start rpcbind on Fedora/Red Hat
 - name: Start rpcbind on Fedora/Red Hat
   service: name=rpcbind state=started enabled=yes
   service: name=rpcbind state=started enabled=yes

+ 120 - 6
roles/lib_zabbix/library/zbx_action.py

@@ -30,6 +30,17 @@
 # pylint: disable=import-error
 # pylint: disable=import-error
 from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError
 from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection, ZabbixAPIError
 
 
+CUSTOM_SCRIPT_ACTION = '0'
+IPMI_ACTION = '1'
+SSH_ACTION = '2'
+TELNET_ACTION = '3'
+GLOBAL_SCRIPT_ACTION = '4'
+
+EXECUTE_ON_ZABBIX_AGENT = '0'
+EXECUTE_ON_ZABBIX_SERVER = '1'
+
+OPERATION_REMOTE_COMMAND = '1'
+
 def exists(content, key='result'):
 def exists(content, key='result'):
     ''' Check if key exists in content or the size of content[key] > 0
     ''' Check if key exists in content or the size of content[key] > 0
     '''
     '''
@@ -70,6 +81,40 @@ def filter_differences(zabbix_filters, user_filters):
 
 
     return rval
     return rval
 
 
+def host_in_zabbix(zab_hosts, usr_host):
+    ''' Check whether a particular user host is already in the
+        Zabbix list of hosts '''
+
+    for usr_hst_key, usr_hst_val in usr_host.items():
+        for zab_host in zab_hosts:
+            if usr_hst_key in zab_host and \
+               zab_host[usr_hst_key] == str(usr_hst_val):
+                return True
+
+    return False
+
+def hostlist_in_zabbix(zab_hosts, usr_hosts):
+    ''' Check whether user-provided list of hosts are already in
+        the Zabbix action '''
+
+    if len(zab_hosts) != len(usr_hosts):
+        return False
+
+    for usr_host in usr_hosts:
+        if not host_in_zabbix(zab_hosts, usr_host):
+            return False
+
+    return True
+
+def opcommand_diff(zab_op_cmd, usr_op_cmd):
+    ''' Check whether user-provided opcommand matches what's already
+        stored in Zabbix '''
+
+    for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
+        if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
+            return True
+    return False
+
 # This logic is quite complex.  We are comparing two lists of dictionaries.
 # This logic is quite complex.  We are comparing two lists of dictionaries.
 # The outer for-loops allow us to descend down into both lists at the same time
 # The outer for-loops allow us to descend down into both lists at the same time
 # and then walk over the key,val pairs of the incoming user dict's changes
 # and then walk over the key,val pairs of the incoming user dict's changes
@@ -116,6 +161,18 @@ def operation_differences(zabbix_ops, user_ops):
                 if usr_ids != zab_usr_ids:
                 if usr_ids != zab_usr_ids:
                     rval[key] = val
                     rval[key] = val
 
 
+            elif key == 'opcommand':
+                if opcommand_diff(zab[key], val):
+                    rval[key] = val
+                    break
+
+            # opcommand_grp can be treated just like opcommand_hst
+            # as opcommand_grp[] is just a list of groups
+            elif key == 'opcommand_hst' or key == 'opcommand_grp':
+                if not hostlist_in_zabbix(zab[key], val):
+                    rval[key] = val
+                    break
+
             elif zab[key] != str(val):
             elif zab[key] != str(val):
                 rval[key] = val
                 rval[key] = val
     return rval
     return rval
@@ -288,7 +345,7 @@ def get_condition_type(event_source, inc_condition):
 def get_operation_type(inc_operation):
 def get_operation_type(inc_operation):
     ''' determine the correct operation type'''
     ''' determine the correct operation type'''
     o_types = {'send message': 0,
     o_types = {'send message': 0,
-               'remote command': 1,
+               'remote command': OPERATION_REMOTE_COMMAND,
                'add host': 2,
                'add host': 2,
                'remove host': 3,
                'remove host': 3,
                'add to host group': 4,
                'add to host group': 4,
@@ -301,7 +358,64 @@ def get_operation_type(inc_operation):
 
 
     return o_types[inc_operation]
     return o_types[inc_operation]
 
 
-def get_action_operations(zapi, inc_operations):
+def get_opcommand_type(opcommand_type):
+    ''' determine the opcommand type '''
+    oc_types = {'custom script': CUSTOM_SCRIPT_ACTION,
+                'IPMI': IPMI_ACTION,
+                'SSH': SSH_ACTION,
+                'Telnet': TELNET_ACTION,
+                'global script': GLOBAL_SCRIPT_ACTION,
+               }
+
+    return oc_types[opcommand_type]
+
+def get_execute_on(execute_on):
+    ''' determine the execution target '''
+    e_types = {'zabbix agent': EXECUTE_ON_ZABBIX_AGENT,
+               'zabbix server': EXECUTE_ON_ZABBIX_SERVER,
+              }
+
+    return e_types[execute_on]
+
+def action_remote_command(ansible_module, zapi, operation):
+    ''' Process remote command type of actions '''
+
+    if 'type' not in operation['opcommand']:
+        ansible_module.exit_json(failed=True, changed=False, state='unknown',
+                                 results="No Operation Type provided")
+
+    operation['opcommand']['type'] = get_opcommand_type(operation['opcommand']['type'])
+
+    if operation['opcommand']['type'] == CUSTOM_SCRIPT_ACTION:
+
+        if 'execute_on' in operation['opcommand']:
+            operation['opcommand']['execute_on'] = get_execute_on(operation['opcommand']['execute_on'])
+
+        # custom script still requires the target hosts/groups to be set
+        operation['opcommand_hst'] = []
+        operation['opcommand_grp'] = []
+        for usr_host in operation['target_hosts']:
+            if usr_host['target_type'] == 'zabbix server':
+                # 0 = target host local/current host
+                operation['opcommand_hst'].append({'hostid': 0})
+            elif usr_host['target_type'] == 'group':
+                group_name = usr_host['target']
+                gid = get_host_group_id_by_name(zapi, group_name)
+                operation['opcommand_grp'].append({'groupid': gid})
+            elif usr_host['target_type'] == 'host':
+                host_name = usr_host['target']
+                hid = get_host_id_by_name(zapi, host_name)
+                operation['opcommand_hst'].append({'hostid': hid})
+
+        # 'target_hosts' is just to make it easier to build zbx_actions
+        # not part of ZabbixAPI
+        del operation['target_hosts']
+    else:
+        ansible_module.exit_json(failed=True, changed=False, state='unknown',
+                                 results="Unsupported remote command type")
+
+
+def get_action_operations(ansible_module, zapi, inc_operations):
     '''Convert the operations into syntax for api'''
     '''Convert the operations into syntax for api'''
     for operation in inc_operations:
     for operation in inc_operations:
         operation['operationtype'] = get_operation_type(operation['operationtype'])
         operation['operationtype'] = get_operation_type(operation['operationtype'])
@@ -315,9 +429,8 @@ def get_action_operations(zapi, inc_operations):
             else:
             else:
                 operation['opmessage']['default_msg'] = 0
                 operation['opmessage']['default_msg'] = 0
 
 
-        # NOT supported for remote commands
-        elif operation['operationtype'] == 1:
-            continue
+        elif operation['operationtype'] == OPERATION_REMOTE_COMMAND:
+            action_remote_command(ansible_module, zapi, operation)
 
 
         # Handle Operation conditions:
         # Handle Operation conditions:
         # Currently there is only 1 available which
         # Currently there is only 1 available which
@@ -464,7 +577,8 @@ def main():
     if state == 'present':
     if state == 'present':
 
 
         conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter'])
         conditions = get_action_conditions(zapi, module.params['event_source'], module.params['conditions_filter'])
-        operations = get_action_operations(zapi, module.params['operations'])
+        operations = get_action_operations(module, zapi,
+                                           module.params['operations'])
         params = {'name': module.params['name'],
         params = {'name': module.params['name'],
                   'esc_period': module.params['escalation_time'],
                   'esc_period': module.params['escalation_time'],
                   'eventsource': get_event_source(module.params['event_source']),
                   'eventsource': get_event_source(module.params['event_source']),

+ 1 - 1
roles/nickhammond.logrotate/tasks/main.yml

@@ -1,6 +1,6 @@
 ---
 ---
 - name: nickhammond.logrotate | Install logrotate
 - name: nickhammond.logrotate | Install logrotate
-  action: "{{ansible_pkg_mgr}} pkg=logrotate state=present"
+  action: "{{ ansible_pkg_mgr }} name=logrotate state=present"
 
 
 - name: nickhammond.logrotate | Setup logrotate.d scripts
 - name: nickhammond.logrotate | Setup logrotate.d scripts
   template:
   template:

+ 1 - 13
roles/openshift_ansible_inventory/tasks/main.yml

@@ -1,17 +1,5 @@
 ---
 ---
-- yum:
-    name: "{{ item }}"
-    state: present
-  when: ansible_pkg_mgr == "yum"
-  with_items:
-  - openshift-ansible-inventory
-  - openshift-ansible-inventory-aws
-  - openshift-ansible-inventory-gce
-
-- dnf:
-    name: "{{ item }}"
-    state: present
-  when: ansible_pkg_mgr == "dnf"
+- action: "{{ ansible_pkg_mgr }} name={{ item}} state=present"
   with_items:
   with_items:
   - openshift-ansible-inventory
   - openshift-ansible-inventory
   - openshift-ansible-inventory-aws
   - openshift-ansible-inventory-aws

+ 7 - 0
roles/openshift_common/tasks/main.yml

@@ -14,6 +14,7 @@
       cluster_id: "{{ openshift_cluster_id | default('default') }}"
       cluster_id: "{{ openshift_cluster_id | default('default') }}"
       debug_level: "{{ openshift_debug_level | default(2) }}"
       debug_level: "{{ openshift_debug_level | default(2) }}"
       hostname: "{{ openshift_hostname | default(None) }}"
       hostname: "{{ openshift_hostname | default(None) }}"
+      install_examples: "{{ openshift_install_examples | default(True) }}"
       ip: "{{ openshift_ip | default(None) }}"
       ip: "{{ openshift_ip | default(None) }}"
       public_hostname: "{{ openshift_public_hostname | default(None) }}"
       public_hostname: "{{ openshift_public_hostname | default(None) }}"
       public_ip: "{{ openshift_public_ip | default(None) }}"
       public_ip: "{{ openshift_public_ip | default(None) }}"
@@ -24,6 +25,12 @@
       use_flannel: "{{ openshift_use_flannel | default(None) }}"
       use_flannel: "{{ openshift_use_flannel | default(None) }}"
       use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
       use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
 
 
+- name: Install the base package for versioning
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') }} state=present"
+
+- name: Set version facts
+  openshift_facts:
+
   # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
   # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
   # hostname by default.
   # hostname by default.
 - set_fact:
 - set_fact:

+ 8 - 1
roles/openshift_examples/README.md

@@ -11,6 +11,13 @@ ansible.
 Requirements
 Requirements
 ------------
 ------------
 
 
+Facts
+-----
+
+| Name                       | Default Value | Description                            |
+-----------------------------|---------------|----------------------------------------|
+| openshift_install_examples | true          | Runs the role with the below variables |
+
 Role Variables
 Role Variables
 --------------
 --------------
 
 
@@ -32,7 +39,7 @@ Example Playbook
 TODO
 TODO
 ----
 ----
 Currently we use `oc create -f` against various files and we accept non zero return code as a success
 Currently we use `oc create -f` against various files and we accept non zero return code as a success
-if (and only iff) stderr also contains the string 'already exists'. This means that if one object in the file exists already
+if (and only if) stderr also contains the string 'already exists'. This means that if one object in the file exists already
 but others fail to create you won't be aware of the failure. This also means that we do not currently support
 but others fail to create you won't be aware of the failure. This also means that we do not currently support
 updating existing objects.
 updating existing objects.
 
 

+ 1 - 6
roles/openshift_expand_partition/tasks/main.yml

@@ -1,11 +1,6 @@
 ---
 ---
 - name: Ensure growpart is installed
 - name: Ensure growpart is installed
-  yum: pkg=cloud-utils-growpart state=present
-  when: ansible_pkg_mgr == "yum"
-
-- name: Ensure growpart is installed
-  dnf: pkg=cloud-utils-growpart state=present
-  when: ansible_pkg_mgr == "dnf"
+  action: "{{ ansible_pkg_mgr }} name=cloud-utils-growpart state=present"
 
 
 - name: Grow the partitions
 - name: Grow the partitions
   command: "growpart {{oep_drive}} {{oep_partition}}"
   command: "growpart {{oep_drive}} {{oep_partition}}"

+ 1 - 0
roles/openshift_facts/library/openshift_facts.py

@@ -1057,6 +1057,7 @@ class OpenShiftFacts(object):
         common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
         common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
         common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
         common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
         common['dns_domain'] = 'cluster.local'
         common['dns_domain'] = 'cluster.local'
+        common['install_examples'] = True
         defaults['common'] = common
         defaults['common'] = common
 
 
         if 'master' in roles:
         if 'master' in roles:

+ 1 - 10
roles/openshift_facts/tasks/main.yml

@@ -7,16 +7,7 @@
     - ansible_version | version_compare('1.9.0.1', 'ne')
     - ansible_version | version_compare('1.9.0.1', 'ne')
 
 
 - name: Ensure PyYaml is installed
 - name: Ensure PyYaml is installed
-  yum: pkg={{ item }} state=installed
-  when: ansible_pkg_mgr == "yum"
-  with_items:
-    - PyYAML
-
-- name: Ensure PyYaml is installed
-  dnf: pkg={{ item }} state=installed
-  when: ansible_pkg_mgr == "dnf"
-  with_items:
-    - PyYAML
+  action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
 
 
 - name: Gather Cluster facts
 - name: Gather Cluster facts
   openshift_facts:
   openshift_facts:

+ 4 - 21
roles/openshift_master/tasks/main.yml

@@ -78,14 +78,7 @@
       controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
       controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
 
 
 - name: Install Master package
 - name: Install Master package
-  yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present
-  when: ansible_pkg_mgr == "yum"
-  register: install_result
-
-- name: Install Master package
-  dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present
-  when: ansible_pkg_mgr == "dnf"
-  register: install_result
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present"
 
 
 - name: Re-gather package dependent master facts
 - name: Re-gather package dependent master facts
   openshift_facts:
   openshift_facts:
@@ -117,13 +110,8 @@
   - restart master controllers
   - restart master controllers
 
 
 - name: Install httpd-tools if needed
 - name: Install httpd-tools if needed
-  yum: pkg=httpd-tools state=present
-  when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider')
-  with_items: openshift.master.identity_providers
-
-- name: Install httpd-tools if needed
-  dnf: pkg=httpd-tools state=present
-  when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider')
+  action: "{{ ansible_pkg_mgr }} name=httpd-tools state=present"
+  when: (item.kind == 'HTPasswdPasswordIdentityProvider')
   with_items: openshift.master.identity_providers
   with_items: openshift.master.identity_providers
 
 
 - name: Ensure htpasswd directory exists
 - name: Ensure htpasswd directory exists
@@ -147,13 +135,11 @@
   template:
   template:
     src: atomic-openshift-master-api.service.j2
     src: atomic-openshift-master-api.service.j2
     dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
     dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-api.service
-    force: no
   when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
   when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
 - name: Create the controllers service file
 - name: Create the controllers service file
   template:
   template:
     src: atomic-openshift-master-controllers.service.j2
     src: atomic-openshift-master-controllers.service.j2
     dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
     dest: /usr/lib/systemd/system/{{ openshift.common.service_type }}-master-controllers.service
-    force: no
   when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
   when: openshift_master_ha | bool and openshift_master_cluster_method == "native"
 - name: Create the api env file
 - name: Create the api env file
   template:
   template:
@@ -254,20 +240,17 @@
     master_api_service_status_changed = start_result | changed
     master_api_service_status_changed = start_result | changed
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
 
 
-# TODO: fix the ugly workaround of setting ignore_errors
-#       the controllers service tries to start even if it is already started
 - name: Start and enable master controller
 - name: Start and enable master controller
   service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
   service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
   register: start_result
   register: start_result
-  ignore_errors: yes
 
 
 - set_fact:
 - set_fact:
     master_controllers_service_status_changed = start_result | changed
     master_controllers_service_status_changed = start_result | changed
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
 
 
 - name: Install cluster packages
 - name: Install cluster packages
-  action: "{{ansible_pkg_mgr}} pkg=pcs state=present"
+  action: "{{ ansible_pkg_mgr }} name=pcs state=present"
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
   register: install_result
   register: install_result
 
 

+ 1 - 1
roles/openshift_master/templates/atomic-openshift-master-controllers.service.j2

@@ -7,7 +7,7 @@ Before={{ openshift.common.service_type }}-node.service
 Requires=network.target
 Requires=network.target
 
 
 [Service]
 [Service]
-Type=notify
+Type=simple
 EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
 EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
 Environment=GOTRACEBACK=crash
 Environment=GOTRACEBACK=crash
 ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS
 ExecStart=/usr/bin/openshift start master controllers --config=${CONFIG_FILE} $OPTIONS

+ 1 - 8
roles/openshift_master_ca/tasks/main.yml

@@ -1,13 +1,6 @@
 ---
 ---
 - name: Install the base package for admin tooling
 - name: Install the base package for admin tooling
-  yum: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=present
-  when: ansible_pkg_mgr == "yum"
-  register: install_result
-
-- name: Install the base package for admin tooling
-  dnf: pkg={{ openshift.common.service_type }}{{ openshift_version  }} state=present
-  when: ansible_pkg_mgr == "dnf"
-  register: install_result
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version  }} state=present"
 
 
 - name: Reload generated facts
 - name: Reload generated facts
   openshift_facts:
   openshift_facts:

+ 3 - 16
roles/openshift_node/tasks/main.yml

@@ -37,24 +37,11 @@
 # We have to add tuned-profiles in the same transaction otherwise we run into depsolving
 # We have to add tuned-profiles in the same transaction otherwise we run into depsolving
 # problems because the rpms don't pin the version properly.
 # problems because the rpms don't pin the version properly.
 - name: Install Node package
 - name: Install Node package
-  yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version  }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version  }} state=present
-  when: ansible_pkg_mgr == "yum"
-  register: node_install_result
-
-- name: Install Node package
-  dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version  }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version  }} state=present
-  when: ansible_pkg_mgr == "dnf"
-  register: node_install_result
-
-- name: Install sdn-ovs package
-  yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
-  register: sdn_install_result
-  when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version  }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version  }} state=present"
 
 
 - name: Install sdn-ovs package
 - name: Install sdn-ovs package
-  dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
-  register: sdn_install_result
-  when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present"
+  when: openshift.common.use_openshift_sdn
 
 
 # TODO: add the validate parameter when there is a validation command to run
 # TODO: add the validate parameter when there is a validation command to run
 - name: Create the Node config
 - name: Create the Node config

+ 0 - 0
roles/openshift_node/tasks/storage_plugins/ceph.yml


Some files were not shown because too many files changed in this diff