Jelajahi Sumber

Merge pull request #1214 from openshift/master

Merge master into prod
Matt Woodson 9 tahun lalu
induk
melakukan
3bed4d0c4b
62 mengubah file dengan 1127 tambahan dan 250 penghapusan
  1. 1 1
      .tito/packages/openshift-ansible
  2. 1 1
      README_AWS.md
  3. 1 0
      bin/opssh
  4. 5 0
      bin/ossh
  5. 6 6
      bin/ossh_bash_completion
  6. 3 3
      bin/ossh_zsh_completion
  7. 1 1
      bin/zsh_functions/_ossh
  8. 45 22
      docs/best_practices_guide.adoc
  9. 12 6
      docs/style_guide.adoc
  10. 17 1
      filter_plugins/oo_filters.py
  11. 27 1
      filter_plugins/openshift_master.py
  12. 2 2
      inventory/aws/hosts/ec2.ini
  13. 75 1
      openshift-ansible.spec
  14. 1 1
      playbooks/adhoc/bootstrap-fedora.yml
  15. 2 2
      playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
  16. 4 0
      playbooks/adhoc/uninstall.yml
  17. 17 0
      playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
  18. 14 0
      playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
  19. 1 0
      playbooks/byo/openshift-master/filter_plugins
  20. 1 0
      playbooks/byo/openshift-master/lookup_plugins
  21. 4 0
      playbooks/byo/openshift-master/restart.yml
  22. 1 0
      playbooks/byo/openshift-master/roles
  23. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
  24. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
  25. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
  26. 50 0
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
  27. 87 0
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
  28. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
  29. 137 0
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
  30. 2 0
      playbooks/common/openshift-master/config.yml
  31. 143 0
      playbooks/common/openshift-master/restart.yml
  32. 39 0
      playbooks/common/openshift-master/restart_hosts.yml
  33. 25 0
      playbooks/common/openshift-master/restart_hosts_pacemaker.yml
  34. 27 0
      playbooks/common/openshift-master/restart_services.yml
  35. 10 0
      playbooks/common/openshift-master/restart_services_pacemaker.yml
  36. 52 9
      playbooks/common/openshift-node/config.yml
  37. 0 16
      roles/etcd/tasks/main.yml
  38. 1 0
      roles/haproxy/handlers/main.yml
  39. 2 3
      roles/haproxy/tasks/main.yml
  40. 91 56
      roles/lib_zabbix/library/zbx_action.py
  41. 1 16
      roles/openshift_cli/tasks/main.yml
  42. 2 1
      roles/openshift_common/tasks/main.yml
  43. 1 0
      roles/openshift_facts/tasks/main.yml
  44. 13 0
      roles/openshift_master/handlers/main.yml
  45. 15 16
      roles/openshift_master/tasks/main.yml
  46. 2 2
      roles/openshift_master/templates/master.yaml.v1.j2
  47. 2 8
      roles/openshift_master_ca/tasks/main.yml
  48. 16 28
      roles/openshift_node/tasks/main.yml
  49. 4 0
      roles/openshift_node/tasks/storage_plugins/nfs.yml
  50. 1 3
      roles/openshift_node/templates/node.yaml.v1.j2
  51. 0 4
      roles/os_zabbix/tasks/main.yml
  52. 54 0
      roles/oso_monitoring_tools/README.md
  53. 2 0
      roles/oso_monitoring_tools/defaults/main.yml
  54. 2 0
      roles/oso_monitoring_tools/handlers/main.yml
  55. 8 0
      roles/oso_monitoring_tools/meta/main.yml
  56. 17 0
      roles/oso_monitoring_tools/tasks/main.yml
  57. 12 0
      roles/oso_monitoring_tools/vars/main.yml
  58. 14 13
      utils/src/ooinstall/cli_installer.py
  59. 3 1
      utils/src/ooinstall/oo_config.py
  60. 13 2
      utils/src/ooinstall/openshift_ansible.py
  61. 21 21
      utils/test/cli_installer_tests.py
  62. 13 3
      utils/test/fixture.py

+ 1 - 1
.tito/packages/openshift-ansible

@@ -1 +1 @@
-3.0.27-1 ./
+3.0.34-1 ./

+ 1 - 1
README_AWS.md

@@ -51,7 +51,7 @@ to setup a private key file to allow ansible to connect to the created hosts.
 To do so, add the the following entry to your $HOME/.ssh/config file and make it point to the private key file which allows you to login on AWS.
 ```
 Host *.compute-1.amazonaws.com
-  PrivateKey $HOME/.ssh/my_private_key.pem
+  IdentityFile $HOME/.ssh/my_private_key.pem
 ```
 
 Alternatively, you can configure your ssh-agent to hold the credentials to connect to your AWS instances.

+ 1 - 0
bin/opssh

@@ -16,6 +16,7 @@ Options:
   -c CLUSTER, --cluster CLUSTER
                         which cluster to use
   -e ENV, --env ENV     which environment to use
+  --v3                  When working with v3 environments.  v2 by default
   -t HOST_TYPE, --host-type HOST_TYPE
                         which host type to use
   --list-host-types     list all of the host types

+ 5 - 0
bin/ossh

@@ -72,6 +72,8 @@ class Ossh(object):
         parser.add_argument('-o', '--ssh_opts', action='store',
                             help='options to pass to SSH.\n \
                                   "-oForwardX11=yes,TCPKeepAlive=yes"')
+        parser.add_argument('-A', default=False, action="store_true",
+                            help='Forward authentication agent')
         parser.add_argument('host', nargs='?', default='')
 
         self.args = parser.parse_args()
@@ -177,6 +179,9 @@ class Ossh(object):
             if self.user:
                 ssh_args.append('-l%s' % self.user)
 
+            if self.args.A:
+                ssh_args.append('-A')
+
             if self.args.verbose:
                 ssh_args.append('-vvv')
 

+ 6 - 6
bin/ossh_bash_completion

@@ -1,12 +1,12 @@
 __ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
-      /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+      /usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
 
     elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
-      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
 
     elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])'
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])'
 
     fi
 }
@@ -26,13 +26,13 @@ complete -F _ossh ossh oscp
 
 __opssh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
-      /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in m.result["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+      /usr/bin/python -c 'from openshift_ansible.multi_inventory import MultiInventory; m=MultiInventory(); m.run(); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in m.result["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
 
     elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
-      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+      /usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
 
     elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
-      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
+      /usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("/dev/shm/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(set(["%s" % (host["oo_hosttype"]) for dns, host in z["_meta"]["hostvars"].items() if "oo_hosttype" in host]))'
 
     fi
 }

+ 3 - 3
bin/ossh_zsh_completion

@@ -2,13 +2,13 @@
 
 _ossh_known_hosts(){
     if python -c 'import openshift_ansible' &>/dev/null; then
-      print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+      print $(/usr/bin/python -c 'from openshift_ansible import multi_inventory; m=multi_inventory.MultiInventory(); m.run(); z=m.result; print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
 
     elif [[ -f /dev/shm/.ansible/tmp/multi_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+      print $(/usr/bin/python -c 'import json; loc="/dev/shm/.ansible/tmp/multi_inventory.cache"; z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
 
     elif [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
-      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_env"))])')
+      print $(/usr/bin/python -c 'import json,os; loc="%s" % os.path.expanduser("~/.ansible/tmp/multi_inventory.cache"); z=json.loads(open(loc).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("oo_name", "oo_environment"))])')
 
     fi
 

+ 1 - 1
bin/zsh_functions/_ossh

@@ -2,7 +2,7 @@
 
 _ossh_known_hosts(){
   if [[ -f ~/.ansible/tmp/multi_inventory.cache ]]; then
-    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_env"]) for dns, host in z["_meta"]["hostvars"].items()])')
+    print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["oo_name"],host["oo_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
   fi
 }
 

+ 45 - 22
docs/best_practices_guide.adoc

@@ -13,9 +13,12 @@ This guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
 
 == Pull Requests
 
+
+
+[[All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<All-pull-requests-MUST-pass-the-build-bot-before-they-are-merged, Rule>>
 | All pull requests MUST pass the build bot *before* they are merged.
 |===
 
@@ -30,9 +33,10 @@ The tooling is flexible enough that exceptions can be made so that the tool the
 === Python Source Files
 
 '''
+[[Python-source-files-MUST-contain-the-following-vim-mode-line]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Python-source-files-MUST-contain-the-following-vim-mode-line, Rule>>
 | Python source files MUST contain the following vim mode line.
 |===
 
@@ -48,9 +52,10 @@ If mode lines for other editors are needed, please open a GitHub issue.
 === Method Signatures
 
 '''
+[[When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<When-adding-a-new-paramemter-to-an-existing-method-a-default-value-SHOULD-be-used, Rule>>
 | When adding a new paramemter to an existing method, a default value SHOULD be used
 |===
 The purpose of this rule is to make it so that method signatures are backwards compatible.
@@ -74,18 +79,20 @@ def add_person(first_name, last_name, age=None):
 http://www.pylint.org/[PyLint] is used in an attempt to keep the python code as clean and as managable as possible. The build bot runs each pull request through PyLint and any warnings or errors cause the build bot to fail the pull request.
 
 '''
+[[PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<PyLint-rules-MUST-NOT-be-disabled-on-a-whole-file, Rule>>
 | PyLint rules MUST NOT be disabled on a whole file.
 |===
 
 Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-particular-message[disable the PyLint check on the line where PyLint is complaining].
 
 '''
+[[PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<PyLint-rules-MUST-NOT-be-disabled-unless-they-meet-one-of-the-following-exceptions, Rule>>
 | PyLint rules MUST NOT be disabled unless they meet one of the following exceptions
 |===
 
@@ -95,9 +102,10 @@ Instead, http://docs.pylint.org/faq.html#is-it-possible-to-locally-disable-a-par
 1. When PyLint fails, but the code makes more sense the way it is formatted (stylistic exception). For this exception, the description of the PyLint disable MUST state why the code is more clear, AND the person reviewing the PR will decide if they agree or not. The reviewer may reject the PR if they disagree with the reason for the disable.
 
 '''
+[[All-PyLint-rule-disables-MUST-be-documented-in-the-code]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<All-PyLint-rule-disables-MUST-be-documented-in-the-code, Rule>>
 | All PyLint rule disables MUST be documented in the code.
 |===
 
@@ -124,9 +132,10 @@ metadata[line] = results.pop()
 === Yaml Files (Playbooks, Roles, Vars, etc)
 
 '''
+[[Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Ansible-files-SHOULD-NOT-use-JSON-use-pure-YAML-instead, Rule>>
 | Ansible files SHOULD NOT use JSON (use pure YAML instead).
 |===
 
@@ -144,9 +153,10 @@ Every effort should be made to keep our Ansible YAML files in pure YAML.
 
 === Modules
 '''
+[[Custom-Ansible-modules-SHOULD-be-embedded-in-a-role]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Custom-Ansible-modules-SHOULD-be-embedded-in-a-role, Rule>>
 | Custom Ansible modules SHOULD be embedded in a role.
 |===
 
@@ -177,9 +187,10 @@ The purpose of this rule is to make it easy to include custom modules in our pla
 
 
 '''
+[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-3-or-more-parameters-are-being-passed, Rule>>
 | Parameters to Ansible modules SHOULD use the Yaml dictionary format when 3 or more parameters are being passed
 |===
 
@@ -204,9 +215,10 @@ When a module has several parameters that are being passed in, it's hard to see
 
 
 '''
+[[Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Parameters-to-Ansible-modules-SHOULD-use-the-Yaml-dictionary-format-when-the-line-length-exceeds-120-characters, Rule>>
 | Parameters to Ansible modules SHOULD use the Yaml dictionary format when the line length exceeds 120 characters
 |===
 
@@ -228,9 +240,10 @@ Lines that are long quickly become a wall of text that isn't easily parsable. It
 ----
 
 '''
+[[The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<The-Ansible-command-module-SHOULD-be-used-instead-of-the-Ansible-shell-module, Rule>>
 | The Ansible `command` module SHOULD be used instead of the Ansible `shell` module.
 |===
 .Context
@@ -251,9 +264,10 @@ The Ansible `shell` module can run most commands that can be run from a bash CLI
 ----
 
 '''
+[[The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<The-Ansible-quote-filter-MUST-be-used-with-any-variable-passed-into-the-shell-module, Rule>>
 | The Ansible `quote` filter MUST be used with any variable passed into the shell module.
 |===
 .Context
@@ -279,9 +293,10 @@ It is recommended not to use the `shell` module. However, if it absolutely must
 * http://docs.ansible.com/fail_module.html[Ansible Fail Module]
 
 '''
+[[Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Ansible-playbooks-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>>
 | Ansible playbooks MUST begin with checks for any variables that they require.
 |===
 
@@ -299,9 +314,10 @@ If an Ansible playbook requires certain variables to be set, it's best to check
 ----
 
 '''
+[[Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Ansible-roles-tasks-main-yml-file-MUST-begin-with-checks-for-any-variables-that-they-require, Rule>>
 | Ansible roles tasks/main.yml file MUST begin with checks for any variables that they require.
 |===
 
@@ -318,9 +334,10 @@ If an Ansible role requires certain variables to be set, it's best to check for
 
 === Tasks
 '''
+[[Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Ansible-tasks-SHOULD-NOT-be-used-in-ansible-playbooks-Instead-use-pre_tasks-and-post_tasks, Rule>>
 | Ansible tasks SHOULD NOT be used in ansible playbooks. Instead, use pre_tasks and post_tasks.
 |===
 An Ansible play is defined as a Yaml dictionary. Because of that, ansible doesn't know if the play's tasks list or roles list was specified first. Therefore Ansible always runs tasks after roles.
@@ -370,9 +387,10 @@ Therefore, we SHOULD use pre_tasks and post_tasks to make it more clear when the
 === Roles
 
 '''
+[[All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<All-tasks-in-a-role-SHOULD-be-tagged-with-the-role-name, Rule>>
 | All tasks in a role SHOULD be tagged with the role name.
 |===
 
@@ -395,9 +413,10 @@ This is very useful when developing and debugging new tasks. It can also signifi
 
 
 '''
+[[The-Ansible-roles-directory-MUST-maintain-a-flat-structure]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<The-Ansible-roles-directory-MUST-maintain-a-flat-structure, Rule>>
 | The Ansible roles directory MUST maintain a flat structure.
 |===
 
@@ -410,9 +429,10 @@ This is very useful when developing and debugging new tasks. It can also signifi
 * Make it compatible with Ansible Galaxy
 
 '''
+[[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent]]
 [cols="2v,v"]
 |===
-| **Rule**
+| [[Ansible-Roles-SHOULD-be-named-like-technology_component_subcomponent, Rule]]
 | Ansible Roles SHOULD be named like technology_component[_subcomponent].
 |===
 
@@ -430,9 +450,10 @@ Many times the `technology` portion of the pattern will line up with a package n
 * http://jinja.pocoo.org/docs/dev/templates/#builtin-filters[Jinja2 Builtin Filters]
 
 '''
+[[The-default-filter-SHOULD-replace-empty-strings-lists-etc]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<The-default-filter-SHOULD-replace-empty-strings-lists-etc, Rule>>
 | The `default` filter SHOULD replace empty strings, lists, etc.
 |===
 
@@ -469,15 +490,17 @@ This is almost always more desirable than an empty list, string, etc.
 
 === Yum and DNF
 '''
+[[Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Package-installation-MUST-use-ansible-action-module-to-abstract-away-dnf-yum, Rule>>
 | Package installation MUST use ansible action module to abstract away dnf/yum.
-| Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
 |===
+
+[[Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Package-installation-MUST-use-name-and-state-present-rather-than-pkg-and-state-installed-respectively, Rule>>
 | Package installation MUST use name= and state=present rather than pkg= and state=installed respectively.
 |===
 

+ 12 - 6
docs/style_guide.adoc

@@ -19,9 +19,10 @@ This style guide complies with https://www.ietf.org/rfc/rfc2119.txt[RFC2119].
 * https://www.python.org/dev/peps/pep-0008/#maximum-line-length[Python Pep8 Line Length]
 
 '''
+[[All-lines-SHOULD-be-no-longer-than-80-characters]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<All-lines-SHOULD-be-no-longer-than-80-characters, Rule>>
 | All lines SHOULD be no longer than 80 characters.
 |===
 
@@ -31,9 +32,10 @@ Code readability is subjective, therefore pull-requests SHOULD still be merged,
 
 
 '''
+[[All-lines-MUST-be-no-longer-than-120-characters]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<All-lines-MUST-be-no-longer-than-120-characters, Rule>>
 | All lines MUST be no longer than 120 characters.
 |===
 
@@ -46,9 +48,10 @@ This is a hard limit and is enforced by the build bot. This check MUST NOT be di
 
 === Ansible Yaml file extension
 '''
+[[All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<All-Ansible-Yaml-files-MUST-have-a-yml-extension-and-NOT-YML-yaml-etc, Rule>>
 | All Ansible Yaml files MUST have a .yml extension (and NOT .YML, .yaml etc).
 |===
 
@@ -59,9 +62,10 @@ Example: `tasks.yml`
 
 === Ansible CLI Variables
 '''
+[[Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Variables-meant-to-be-passed-in-from-the-ansible-CLI-MUST-have-a-prefix-of-cli, Rule>>
 | Variables meant to be passed in from the ansible CLI MUST have a prefix of cli_
 |===
 
@@ -76,9 +80,10 @@ ansible-playbook -e cli_foo=bar someplays.yml
 
 === Ansible Global Variables
 '''
+[[Global-variables-MUST-have-a-prefix-of-g]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Global-variables-MUST-have-a-prefix-of-g, Rule>>
 | Global variables MUST have a prefix of g_
 |===
 Ansible global variables are defined as any variables outside of ansible roles. Examples include playbook variables, variables passed in on the cli, etc.
@@ -94,9 +99,10 @@ g_environment: someval
 Ansible role variables are defined as variables contained in (or passed into) a role.
 
 '''
+[[Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules]]
 [cols="2v,v"]
 |===
-| **Rule**
+| <<Role-variables-MUST-have-a-prefix-of-atleast-3-characters-See.below.for.specific.naming.rules, Rule>>
 | Role variables MUST have a prefix of atleast 3 characters. See below for specific naming rules.
 |===
 

+ 17 - 1
filter_plugins/oo_filters.py

@@ -12,6 +12,8 @@ import os
 import pdb
 import re
 import json
+import yaml
+from ansible.utils.unicode import to_unicode
 
 class FilterModule(object):
     ''' Custom ansible filters '''
@@ -474,6 +476,19 @@ class FilterModule(object):
         secret = os.urandom(num_bytes)
         return secret.encode('base-64').strip()
 
+    @staticmethod
+    def to_padded_yaml(data, level=0, indent=2, **kw):
+        ''' returns a yaml snippet padded to match the indent level you specify '''
+        if data in [None, ""]:
+            return ""
+
+        try:
+            transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw)
+            padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
+            return to_unicode("\n{0}".format(padded))
+        except Exception as my_e:
+            raise errors.AnsibleFilterError('Failed to convert: %s', my_e)
+
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {
@@ -493,5 +508,6 @@ class FilterModule(object):
             "oo_parse_named_certificates": self.oo_parse_named_certificates,
             "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
             "oo_pretty_print_cluster": self.oo_pretty_print_cluster,
-            "oo_generate_secret": self.oo_generate_secret
+            "oo_generate_secret": self.oo_generate_secret,
+            "to_padded_yaml": self.to_padded_yaml,
         }

+ 27 - 1
filter_plugins/openshift_master.py

@@ -463,6 +463,32 @@ class FilterModule(object):
         IdentityProviderBase.validate_idp_list(idp_list)
         return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
 
+    @staticmethod
+    def validate_pcs_cluster(data, masters=None):
+        ''' Validates output from "pcs status", ensuring that each master
+            provided is online.
+            Ex: data = ('...',
+                        'PCSD Status:',
+                        'master1.example.com: Online',
+                        'master2.example.com: Online',
+                        'master3.example.com: Online',
+                        '...')
+                masters = ['master1.example.com',
+                           'master2.example.com',
+                           'master3.example.com']
+               returns True
+        '''
+        if not issubclass(type(data), str):
+            raise errors.AnsibleFilterError("|failed expects data is a string")
+        if not issubclass(type(masters), list):
+            raise errors.AnsibleFilterError("|failed expects masters is a list")
+        valid = True
+        for master in masters:
+            if "{0}: Online".format(master) not in data:
+                valid = False
+        return valid
+
     def filters(self):
         ''' returns a mapping of filters to methods '''
-        return {"translate_idps": self.translate_idps}
+        return {"translate_idps": self.translate_idps,
+                "validate_pcs_cluster": self.validate_pcs_cluster}

+ 2 - 2
inventory/aws/hosts/ec2.ini

@@ -45,10 +45,10 @@ vpc_destination_variable = ip_address
 route53 = False
 
 # To exclude RDS instances from the inventory, uncomment and set to False.
-#rds = False
+rds = False
 
 # To exclude ElastiCache instances from the inventory, uncomment and set to False.
-#elasticache = False
+elasticache = False
 
 # Additionally, you can specify the list of zones to exclude looking up in
 # 'route53_excluded_zones' as a comma-separated list.

+ 75 - 1
openshift-ansible.spec

@@ -5,7 +5,7 @@
 }
 
 Name:           openshift-ansible
-Version:        3.0.27
+Version:        3.0.34
 Release:        1%{?dist}
 Summary:        Openshift and Atomic Enterprise Ansible
 License:        ASL 2.0
@@ -259,6 +259,80 @@ Atomic OpenShift Utilities includes
 
 
 %changelog
+* Mon Jan 18 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.34-1
+- clean up too-many-branches / logic (jdiaz@redhat.com)
+- atomic-openshift-installer: add containerized to inventory
+  (smunilla@redhat.com)
+- Add 'unknown' to possible output for the is-active check.
+  (abutcher@redhat.com)
+- Fix cluster_method conditional in master restart playbook.
+  (abutcher@redhat.com)
+- Use IdentityFile instead of PrivateKey (donovan.muller@gmail.com)
+- atomic-openshift-installer: Remove containerized install for 3.0
+  (smunilla@redhat.com)
+- Host group should be OSEv3 not OSv3 (donovan.muller@gmail.com)
+- Remove pause after haproxy start (abutcher@redhat.com)
+- Ensure nfs-utils installed for non-atomic hosts. (abutcher@redhat.com)
+
+* Fri Jan 15 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.33-1
+- Configure nodes which are also masters prior to nodes in containerized
+  install. (abutcher@redhat.com)
+- Call attention to openshift_master_rolling_restart_mode variable in restart
+  prompt. (abutcher@redhat.com)
+- Added anchors for rules in style_guide.adoc in order to make it easier to
+  reference specific rules in PRs. (twiest@redhat.com)
+- Update ec2.ini (jdetiber@redhat.com)
+
+* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.32-1
+- Uninstall remove containerized wrapper and symlinks (abutcher@redhat.com)
+
+* Thu Jan 14 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.31-1
+- Check api prior to starting node. (abutcher@redhat.com)
+- added anchors (twiest@redhat.com)
+
+* Wed Jan 13 2016 Joel Diaz <jdiaz@redhat.com> 3.0.30-1
+- Add -A and detail --v3 flags
+
+* Wed Jan 13 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.29-1
+- 3.1.1 upgrade playbook (bleanhar@redhat.com)
+- Updated help menu for v3 flag (kwoodson@redhat.com)
+- Add wait in between api and controllers start for native ha.
+  (abutcher@redhat.com)
+- atomic-openshift-installer: Error handling for unicode hostnames
+  (smunilla@redhat.com)
+- Update api verification. (abutcher@redhat.com)
+- Add a Verify API Server handler that waits for the API server to become
+  available (sdodson@redhat.com)
+- Add -A parameter to forward ssh agent (jdiaz@redhat.com)
+- Validate pacemaker cluster members. (abutcher@redhat.com)
+- Removed atomic host check (kwoodson@redhat.com)
+- Add is_containerized inputs to nosetests. (abutcher@redhat.com)
+- Add wait for API before starting controllers w/ native ha install.
+  (abutcher@redhat.com)
+- Fix for to_padded_yaml filter (jdetiber@redhat.com)
+- - sqashed to one commit (llange@redhat.com)
+- Switch to using hostnamectl as it works on atomic and rhel7
+  (sdodson@redhat.com)
+- Update rolling restart playbook for pacemaker support. Replace fail with a
+  warn and prompt if running ansible from a host that will be rebooted. Re-
+  organize playbooks. (abutcher@redhat.com)
+- Implement simple master rolling restarts. (dgoodwin@redhat.com)
+- re-enable containerize installs (sdodson@redhat.com)
+- Set portal net in master playbook (jdetiber@redhat.com)
+- Set the cli image to match osm_image in openshift_cli role
+  (sdodson@redhat.com)
+- atomic-openshift-installer: Populate new_nodes group (smunilla@redhat.com)
+- Always pull docker images (sdodson@redhat.com)
+
+* Mon Jan 11 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.28-1
+- added the rhe7-host-monitoring service file (mwoodson@redhat.com)
+- Fixing tab completion for latest metadata changes (kwoodson@redhat.com)
+- Removing some internal hostnames (bleanhar@redhat.com)
+- Fixing tab completion for latest metadata changes (kwoodson@redhat.com)
+- Make bin/cluster able to spawn OSE 3.1 clusters (lhuard@amadeus.com)
+- oso_host_monitoring role: removed the f22 and zagg client, replaced it with
+  oso-rhel7-host-monitoring container (mwoodson@redhat.com)
+
 * Fri Jan 08 2016 Kenny Woodson <kwoodson@redhat.com> 3.0.27-1
 - Update to metadata tooling. (kwoodson@redhat.com)
 - Fix VM drive cleanup during terminate on libvirt (lhuard@amadeus.com)

+ 1 - 1
playbooks/adhoc/bootstrap-fedora.yml

@@ -1,4 +1,4 @@
-- hosts: OSv3
+- hosts: OSEv3
   gather_facts: false
   tasks:
     - name: install python and deps for ansible modules

+ 2 - 2
playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml

@@ -20,7 +20,7 @@
 #   ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
 #
 #  Notes:
-#  * By default this will do a 55GB GP2 volume.  The can be overidden with the "-e 'cli_volume_size=100'" variable
+#  * By default this will do a 200GB GP2 volume.  The can be overidden with the "-e 'cli_volume_size=100'" variable
 #  * This does a GP2 by default.  Support for Provisioned IOPS has not been added
 #  * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
 #  * This can be done with NO downtime on the host
@@ -36,7 +36,7 @@
 
   vars:
     cli_volume_type: gp2
-    cli_volume_size: 55
+    cli_volume_size: 200
 #    cli_volume_iops: "{{ 30 * cli_volume_size }}"
 
   pre_tasks:

+ 4 - 0
playbooks/adhoc/uninstall.yml

@@ -202,6 +202,10 @@
         - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
         - /usr/lib/systemd/system/origin-master-api.service
         - /usr/lib/systemd/system/origin-master-controllers.service
+        - /usr/local/bin/openshift
+        - /usr/local/bin/oadm
+        - /usr/local/bin/oc
+        - /usr/local/bin/kubectl
 
     # Since we are potentially removing the systemd unit files for separated
     # master-api and master-controllers services, so we need to reload the

+ 17 - 0
playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md

@@ -0,0 +1,17 @@
+# v3.1 minor upgrade playbook
+This upgrade will preserve all locally made configuration modifications to the
+Masters and Nodes.
+
+## Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml

+ 14 - 0
playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml

@@ -0,0 +1,14 @@
+---
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+  vars:
+    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+    g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+    g_node_hosts: "{{ groups.nodes | default([]) }}"
+    g_lb_hosts: "{{ groups.lb | default([]) }}"
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml

+ 1 - 0
playbooks/byo/openshift-master/filter_plugins

@@ -0,0 +1 @@
+../../../filter_plugins

+ 1 - 0
playbooks/byo/openshift-master/lookup_plugins

@@ -0,0 +1 @@
+../../../lookup_plugins

+ 4 - 0
playbooks/byo/openshift-master/restart.yml

@@ -0,0 +1,4 @@
+---
+- include: ../../common/openshift-master/restart.yml
+  vars_files:
+  - ../../byo/openshift-cluster/cluster_hosts.yml

+ 1 - 0
playbooks/byo/openshift-master/roles

@@ -0,0 +1 @@
+../../../roles

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins

@@ -0,0 +1 @@
+../../../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_minor/library

@@ -0,0 +1 @@
+../library

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins

@@ -0,0 +1 @@
+../../../../../lookup_plugins

+ 50 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml

@@ -0,0 +1,50 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+  hosts: oo_first_master
+  vars:
+    openshift_deployment_type: "{{ deployment_type }}"
+    registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + g_new_version  ) }}"
+    router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+    oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+  roles:
+  # Create the new templates shipped in 3.1.z, existing templates are left
+  # unmodified. This prevents the subsequent role definition for
+  # openshift_examples from failing when trying to replace templates that do
+  # not already exist. We could have potentially done a replace --force to
+  # create and update in one step.
+  - openshift_examples
+  # Update the existing templates
+  - role: openshift_examples
+    openshift_examples_import_command: replace
+  pre_tasks:
+  - name: Check for default router
+    command: >
+      {{ oc_cmd }} get -n default dc/router
+    register: _default_router
+    failed_when: false
+    changed_when: false
+
+  - name: Check for default registry
+    command: >
+      {{ oc_cmd }} get -n default dc/docker-registry
+    register: _default_registry
+    failed_when: false
+    changed_when: false
+
+  - name: Update router image to current version
+    when: _default_router.rc == 0
+    command: >
+      {{ oc_cmd }} patch dc/router -p
+      '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+      --api-version=v1
+
+  - name: Update registry image to current version
+    when: _default_registry.rc == 0
+    command: >
+      {{ oc_cmd }} patch dc/docker-registry -p
+      '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+      --api-version=v1
+

+ 87 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml

@@ -0,0 +1,87 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+  roles:
+  - openshift_facts
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+  hosts: oo_first_master
+  vars:
+    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+    target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+  gather_facts: no
+  tasks:
+  - fail:
+      msg: >
+        This upgrade is only supported for origin, openshift-enterprise, and online
+        deployment types
+    when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+  - fail:
+      msg: >
+        openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+        valid version for a {{ target_version }} upgrade
+    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+  hosts: oo_masters_to_config:oo_nodes_to_config
+  vars:
+    target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
+  tasks:
+  - name: Clean package cache
+    command: "{{ ansible_pkg_mgr }} clean all"
+
+  - set_fact:
+      g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+
+  - name: Determine available versions
+    script: ../files/versions.sh {{ g_new_service_name }} openshift
+    register: g_versions_result
+
+  - set_fact:
+      g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+  - set_fact:
+      g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+
+  - fail:
+      msg: This playbook requires Origin 1.1 or later
+    when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+  - fail:
+      msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+    when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+  - fail:
+      msg: Upgrade packages not found
+    when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+  - set_fact:
+      pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+  hosts: localhost
+  connection: local
+  become: no
+  vars:
+    pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+  tasks:
+  - set_fact:
+      pre_upgrade_completed: "{{ hostvars
+                                 | oo_select_keys(pre_upgrade_hosts)
+                                 | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+  - set_fact:
+      pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+    when: pre_upgrade_failed | length > 0

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles

@@ -0,0 +1 @@
+../../../../../roles

+ 137 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml

@@ -0,0 +1,137 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master packages and configuration
+  hosts: oo_masters_to_config
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  tasks:
+  - name: Upgrade master packages
+    command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
+
+  - name: Ensure python-yaml present for config upgrade
+    action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+    when: not openshift.common.is_atomic | bool
+
+# Currently 3.1.1 does not have any new configuration settings
+#
+#  - name: Upgrade master configuration
+#    openshift_upgrade_config:
+#      from_version: '3.0'
+#      to_version: '3.1'
+#      role: master
+#      config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+  hosts: oo_masters_to_config
+  tasks:
+  - set_fact:
+      master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - set_fact:
+      master_update_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_masters_to_config)
+                                 | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+  - set_fact:
+      master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+    when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+  hosts: oo_nodes_to_config
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  roles:
+  - openshift_facts
+  tasks:
+  - name: Upgrade node packages
+    command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
+
+  - name: Restart node service
+    service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+  - set_fact:
+      node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - set_fact:
+      node_update_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_nodes_to_config)
+                                 | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+  - set_fact:
+      node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+    when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles and Cluster Role Bindings
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings
+  hosts: oo_masters_to_config
+  vars:
+    origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+    ent_reconcile_bindings: true
+    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+  tasks:
+  - name: Reconcile Cluster Roles
+    command: >
+      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      policy reconcile-cluster-roles --confirm
+    run_once: true
+
+  - name: Reconcile Cluster Role Bindings
+    command: >
+      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      policy reconcile-cluster-role-bindings
+      --exclude-groups=system:authenticated
+      --exclude-groups=system:unauthenticated
+      --exclude-users=system:anonymous
+      --additive-only=true --confirm
+    when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+    run_once: true
+
+  - set_fact:
+      reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - set_fact:
+      reconcile_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_masters_to_config)
+                                 | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+  - set_fact:
+      reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+    when: reconcile_failed | length > 0

+ 2 - 0
playbooks/common/openshift-master/config.yml

@@ -51,6 +51,7 @@
           console_url: "{{ openshift_master_console_url | default(None) }}"
           console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
           public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
+          portal_net: "{{ openshift_master_portal_net | default(None) }}"
   - name: Check status of external etcd certificatees
     stat:
       path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -312,6 +313,7 @@
 
 - name: Configure master instances
   hosts: oo_masters_to_config
+  any_errors_fatal: true
   serial: 1
   vars:
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"

+ 143 - 0
playbooks/common/openshift-master/restart.yml

@@ -0,0 +1,143 @@
+---
+- include: ../openshift-cluster/evaluate_groups.yml
+
+- name: Validate configuration for rolling restart
+  hosts: oo_masters_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  - fail:
+      msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'"
+    when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"]
+  - openshift_facts:
+      role: "{{ item.role }}"
+      local_facts: "{{ item.local_facts }}"
+    with_items:
+      - role: common
+        local_facts:
+          rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+      - role: master
+        local_facts:
+          cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
+# Creating a temp file on localhost, we then check each system that will
+# be rebooted to see if that file exists, if so we know we're running
+# ansible on a machine that needs a reboot, and we need to error out.
+- name: Create temp file on localhost
+  hosts: localhost
+  connection: local
+  become: no
+  gather_facts: no
+  tasks:
+  - local_action: command mktemp
+    register: mktemp
+    changed_when: false
+
+- name: Check if temp file exists on any masters
+  hosts: oo_masters_to_config
+  tasks:
+  - stat: path="{{ hostvars.localhost.mktemp.stdout }}"
+    register: exists
+    changed_when: false
+
+- name: Cleanup temp file on localhost
+  hosts: localhost
+  connection: local
+  become: no
+  gather_facts: no
+  tasks:
+  - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent
+    changed_when: false
+
+- name: Warn if restarting the system where ansible is running
+  hosts: oo_masters_to_config
+  tasks:
+  - pause:
+      prompt: >
+        Warning: Running playbook from a host that will be restarted!
+        Press CTRL+C and A to abort playbook execution. You may
+        continue by pressing ENTER but the playbook will stop
+        executing after this system has been restarted and services
+        must be verified manually. To only restart services, set
+        openshift_master_rolling_restart_mode=services in host
+        inventory and relaunch the playbook.
+    when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system'
+  - set_fact:
+      current_host: "{{ exists.stat.exists }}"
+    when: openshift.common.rolling_restart_mode == 'system'
+
+- name: Determine which masters are currently active
+  hosts: oo_masters_to_config
+  tasks:
+  - name: Check master service status
+    command: >
+      systemctl is-active {{ openshift.common.service_type }}-master
+    register: active_check_output
+    when: openshift.master.cluster_method | default(None) == 'pacemaker'
+    failed_when: active_check_output.stdout not in ['active', 'inactive', 'unknown']
+    changed_when: false
+  - set_fact:
+      is_active: "{{ active_check_output.stdout == 'active' }}"
+    when: openshift.master.cluster_method | default(None) == 'pacemaker'
+
+- name: Evaluate master groups
+  hosts: localhost
+  become: no
+  tasks:
+  - name: Evaluate oo_active_masters
+    add_host:
+      name: "{{ item }}"
+      groups: oo_active_masters
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+    when: (hostvars[item]['is_active'] | default(false)) | bool
+  - name: Evaluate oo_current_masters
+    add_host:
+      name: "{{ item }}"
+      groups: oo_current_masters
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ groups.oo_masters_to_config | default([]) }}"
+    when: (hostvars[item]['current_host'] | default(false)) | bool
+
+- name: Validate pacemaker cluster
+  hosts: oo_active_masters
+  tasks:
+  - name: Retrieve pcs status
+    command: pcs status
+    register: pcs_status_output
+    changed_when: false
+  - fail:
+      msg: >
+        Pacemaker cluster validation failed. One or more nodes are not online.
+    when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool
+
+- name: Restart masters
+  hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters
+  vars:
+    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+  serial: 1
+  tasks:
+  - include: restart_hosts.yml
+    when: openshift.common.rolling_restart_mode == 'system'
+  - include: restart_services.yml
+    when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart active masters
+  hosts: oo_active_masters
+  serial: 1
+  tasks:
+  - include: restart_hosts_pacemaker.yml
+    when: openshift.common.rolling_restart_mode == 'system'
+  - include: restart_services_pacemaker.yml
+    when: openshift.common.rolling_restart_mode == 'services'
+
+- name: Restart current masters
+  hosts: oo_current_masters
+  serial: 1
+  tasks:
+  - include: restart_hosts.yml
+    when: openshift.common.rolling_restart_mode == 'system'
+  - include: restart_services.yml
+    when: openshift.common.rolling_restart_mode == 'services'

+ 39 - 0
playbooks/common/openshift-master/restart_hosts.yml

@@ -0,0 +1,39 @@
+- name: Restart master system
+  # https://github.com/ansible/ansible/issues/10616
+  shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+  async: 1
+  poll: 0
+  ignore_errors: true
+  become: yes
+# When cluster_method != pacemaker we can ensure the api_port is
+# available.
+- name: Wait for master API to come back online
+  become: no
+  local_action:
+    module: wait_for
+      host="{{ inventory_hostname }}"
+      state=started
+      delay=10
+      port="{{ openshift.master.api_port }}"
+  when: openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master to start
+  become: no
+  local_action:
+    module: wait_for
+      host="{{ inventory_hostname }}"
+      state=started
+      delay=10
+      port=22
+  when: openshift.master.cluster_method == 'pacemaker'
+- name: Wait for master to become available
+  command: pcs status
+  register: pcs_status_output
+  until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool
+  retries: 15
+  delay: 2
+  changed_when: false
+  when: openshift.master.cluster_method == 'pacemaker'
+- fail:
+    msg: >
+      Pacemaker cluster validation failed {{ inventory hostname }} is not online.
+  when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool

+ 25 - 0
playbooks/common/openshift-master/restart_hosts_pacemaker.yml

@@ -0,0 +1,25 @@
+- name: Fail over master resource
+  command: >
+    pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }}
+- name: Wait for master API to come back online
+  become: no
+  local_action:
+    module: wait_for
+      host="{{ openshift.master.cluster_hostname }}"
+      state=started
+      delay=10
+      port="{{ openshift.master.api_port }}"
+- name: Restart master system
+  # https://github.com/ansible/ansible/issues/10616
+  shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"
+  async: 1
+  poll: 0
+  ignore_errors: true
+  become: yes
+- name: Wait for master to start
+  become: no
+  local_action:
+   module: wait_for
+      host="{{ inventory_hostname }}"
+      state=started
+      delay=10

+ 27 - 0
playbooks/common/openshift-master/restart_services.yml

@@ -0,0 +1,27 @@
+- name: Restart master
+  service:
+    name: "{{ openshift.common.service_type }}-master"
+    state: restarted
+  when: not openshift_master_ha | bool
+- name: Restart master API
+  service:
+    name: "{{ openshift.common.service_type }}-master-api"
+    state: restarted
+  when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Wait for master API to come back online
+  become: no
+  local_action:
+    module: wait_for
+      host="{{ inventory_hostname }}"
+      state=started
+      delay=10
+      port="{{ openshift.master.api_port }}"
+  when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+- name: Restart master controllers
+  service:
+    name: "{{ openshift.common.service_type }}-master-controllers"
+    state: restarted
+  # Ignore errrors since it is possible that type != simple for
+  # pre-3.1.1 installations.
+  ignore_errors: true
+  when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'

+ 10 - 0
playbooks/common/openshift-master/restart_services_pacemaker.yml

@@ -0,0 +1,10 @@
+- name: Restart master services
+  command: pcs resource restart master
+- name: Wait for master API to come back online
+  become: no
+  local_action:
+    module: wait_for
+      host="{{ openshift.master.cluster_hostname }}"
+      state=started
+      delay=10
+      port="{{ openshift.master.api_port }}"

+ 52 - 9
playbooks/common/openshift-node/config.yml

@@ -154,21 +154,15 @@
       validate_checksum: yes
     with_items: nodes_needing_certs
 
-- name: Configure node instances
+- name: Deploy node certificates
   hosts: oo_nodes_to_config
   vars:
     sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
-    openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
-    # TODO: Prefix flannel role variables.
-    etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
-    embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
-    openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
-  pre_tasks:
+  tasks:
   - name: Ensure certificate directory exists
     file:
       path: "{{ node_cert_dir }}"
       state: directory
-
   # TODO: notify restart node
   # possibly test service started time against certificate/config file
   # timestamps in node to trigger notify
@@ -177,8 +171,44 @@
       src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
       dest: "{{ node_cert_dir }}"
     when: certs_missing
+
+- name: Evaluate node groups
+  hosts: localhost
+  become: no
+  tasks:
+  - name: Evaluate oo_containerized_master_nodes
+    add_host:
+      name: "{{ item }}"
+      groups: oo_containerized_master_nodes
+      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
+      ansible_sudo: "{{ g_sudo | default(omit) }}"
+    with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
+    when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+
+- name: Configure node instances
+  hosts: oo_containerized_master_nodes
+  serial: 1
+  vars:
+    openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+    openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
   roles:
   - openshift_node
+
+- name: Configure node instances
+  hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+  vars:
+    openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
+    openshift_node_first_master_ip: "{{ hostvars[groups.oo_first_master.0].openshift.common.ip }}"
+  roles:
+  - openshift_node
+
+- name: Additional node config
+  hosts: oo_nodes_to_config
+  vars:
+    # TODO: Prefix flannel role variables.
+    etcd_urls: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls }}"
+    embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+  roles:
   - role: flannel
     when: openshift.common.use_flannel | bool
   - role: nickhammond.logrotate
@@ -215,6 +245,19 @@
                          | oo_collect('openshift.common.hostname') }}"
     openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}"
   pre_tasks:
-
+  # Necessary because when you're on a node that's also a master the master will be
+  # restarted after the node restarts docker and it will take up to 60 seconds for
+  # systemd to start the master again
+  - name: Wait for master API to become available before proceeding
+    # Using curl here since the uri module requires python-httplib2 and
+    # wait_for port doesn't provide health information.
+    command: >
+      curl -k --head --silent {{ openshift.master.api_url }}
+    register: api_available_output
+    until: api_available_output.stdout.find("200 OK") != -1
+    retries: 120
+    delay: 1
+    changed_when: false
+    when: openshift.common.is_containerized | bool
   roles:
   - openshift_manage_node

+ 0 - 16
roles/etcd/tasks/main.yml

@@ -11,24 +11,8 @@
   action: "{{ ansible_pkg_mgr }} name=etcd-2.* state=present"
   when: not openshift.common.is_containerized | bool
 
-- name: Get docker images
-  command: docker images
-  changed_when: false
-  when: openshift.common.is_containerized | bool
-  register: docker_images
-
 - name: Pull etcd container
   command: docker pull {{ openshift.etcd.etcd_image }}
-  when: openshift.common.is_containerized | bool and openshift.etcd.etcd_image not in docker_images.stdout
-  
-- name: Wait for etcd image
-  command: >
-      docker images
-  register: docker_images
-  until: openshift.etcd.etcd_image in docker_images.stdout
-  retries: 30
-  delay: 10
-  changed_when: false
   when: openshift.common.is_containerized | bool
 
 - name: Install etcd container service file

+ 1 - 0
roles/haproxy/handlers/main.yml

@@ -3,3 +3,4 @@
   service:
     name: haproxy
     state: restarted
+  when: not (haproxy_start_result_changed | default(false) | bool)

+ 2 - 3
roles/haproxy/tasks/main.yml

@@ -19,6 +19,5 @@
     enabled: yes
   register: start_result
 
-- name: Pause 30 seconds if haproxy was just started
-  pause: seconds=30
-  when: start_result | changed
+- set_fact:
+    haproxy_start_result_changed: "{{ start_result | changed }}"

+ 91 - 56
roles/lib_zabbix/library/zbx_action.py

@@ -81,6 +81,61 @@ def filter_differences(zabbix_filters, user_filters):
 
     return rval
 
+def opconditions_diff(zab_val, user_val):
+    ''' Report whether there are differences between opconditions on
+        zabbix and opconditions supplied by user '''
+
+    if len(zab_val) != len(user_val):
+        return True
+
+    for z_cond, u_cond in zip(zab_val, user_val):
+        if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
+                    ['conditiontype', 'operator', 'value']]):
+            return True
+
+    return False
+
+def opmessage_diff(zab_val, user_val):
+    ''' Report whether there are differences between opmessage on
+        zabbix and opmessage supplied by user '''
+
+    for op_msg_key, op_msg_val in user_val.items():
+        if zab_val[op_msg_key] != str(op_msg_val):
+            return True
+
+    return False
+
+def opmessage_grp_diff(zab_val, user_val):
+    ''' Report whether there are differences between opmessage_grp
+        on zabbix and opmessage_grp supplied by user '''
+
+    zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab_val])
+    usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in user_val])
+    if usr_grp_ids != zab_grp_ids:
+        return True
+
+    return False
+
+def opmessage_usr_diff(zab_val, user_val):
+    ''' Report whether there are differences between opmessage_usr
+        on zabbix and opmessage_usr supplied by user '''
+
+    zab_usr_ids = set([usr['usrid'] for usr in zab_val])
+    usr_ids = set([usr['usrid'] for usr in user_val])
+    if usr_ids != zab_usr_ids:
+        return True
+
+    return False
+
+def opcommand_diff(zab_op_cmd, usr_op_cmd):
+    ''' Check whether user-provided opcommand matches what's already
+        stored in Zabbix '''
+
+    for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
+        if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
+            return True
+    return False
+
 def host_in_zabbix(zab_hosts, usr_host):
     ''' Check whether a particular user host is already in the
         Zabbix list of hosts '''
@@ -106,23 +161,11 @@ def hostlist_in_zabbix(zab_hosts, usr_hosts):
 
     return True
 
-def opcommand_diff(zab_op_cmd, usr_op_cmd):
-    ''' Check whether user-provided opcommand matches what's already
-        stored in Zabbix '''
-
-    for usr_op_cmd_key, usr_op_cmd_val in usr_op_cmd.items():
-        if zab_op_cmd[usr_op_cmd_key] != str(usr_op_cmd_val):
-            return True
-    return False
-
-# This logic is quite complex.  We are comparing two lists of dictionaries.
-# The outer for-loops allow us to descend down into both lists at the same time
-# and then walk over the key,val pairs of the incoming user dict's changes
-# or updates.  The if-statements are looking at different sub-object types and
-# comparing them.  The other suggestion on how to write this is to write a recursive
-# compare function but for the time constraints and for complexity I decided to go
-# this route.
-# pylint: disable=too-many-branches
+# We are comparing two lists of dictionaries (the one stored on zabbix and the
+# one the user is providing). For each type of operation, determine whether there
+# is a difference between what is stored on zabbix and what the user is providing.
+# If there is a difference, we take the user-provided data for what needs to
+# be stored/updated into zabbix.
 def operation_differences(zabbix_ops, user_ops):
     '''Determine the differences from user and zabbix for operations'''
 
@@ -132,49 +175,41 @@ def operation_differences(zabbix_ops, user_ops):
 
     rval = {}
     for zab, user in zip(zabbix_ops, user_ops):
-        for key, val in user.items():
-            if key == 'opconditions':
-                if len(zab[key]) != len(val):
-                    rval[key] = val
-                    break
-                for z_cond, u_cond in zip(zab[key], user[key]):
-                    if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
-                                ['conditiontype', 'operator', 'value']]):
-                        rval[key] = val
-                        break
-            elif key == 'opmessage':
-                # Verify each passed param matches
-                for op_msg_key, op_msg_val in val.items():
-                    if zab[key][op_msg_key] != str(op_msg_val):
-                        rval[key] = val
-                        break
-
-            elif key == 'opmessage_grp':
-                zab_grp_ids = set([ugrp['usrgrpid'] for ugrp in zab[key]])
-                usr_grp_ids = set([ugrp['usrgrpid'] for ugrp in val])
-                if usr_grp_ids != zab_grp_ids:
-                    rval[key] = val
-
-            elif key == 'opmessage_usr':
-                zab_usr_ids = set([usr['userid'] for usr in zab[key]])
-                usr_ids = set([usr['userid'] for usr in val])
-                if usr_ids != zab_usr_ids:
-                    rval[key] = val
-
-            elif key == 'opcommand':
-                if opcommand_diff(zab[key], val):
-                    rval[key] = val
-                    break
+        for oper in user.keys():
+            if oper == 'opconditions' and opconditions_diff(zab[oper], \
+                                                                user[oper]):
+                rval[oper] = user[oper]
+
+            elif oper == 'opmessage' and opmessage_diff(zab[oper], \
+                                                        user[oper]):
+                rval[oper] = user[oper]
+
+            elif oper == 'opmessage_grp' and opmessage_grp_diff(zab[oper], \
+                                                                user[oper]):
+                rval[oper] = user[oper]
+
+            elif oper == 'opmessage_usr' and opmessage_usr_diff(zab[oper], \
+                                                                user[oper]):
+                rval[oper] = user[oper]
+
+            elif oper == 'opcommand' and opcommand_diff(zab[oper], \
+                                                        user[oper]):
+                rval[oper] = user[oper]
 
             # opcommand_grp can be treated just like opcommand_hst
             # as opcommand_grp[] is just a list of groups
-            elif key == 'opcommand_hst' or key == 'opcommand_grp':
-                if not hostlist_in_zabbix(zab[key], val):
-                    rval[key] = val
-                    break
+            elif oper == 'opcommand_hst' or oper == 'opcommand_grp':
+                if not hostlist_in_zabbix(zab[oper], user[oper]):
+                    rval[oper] = user[oper]
+
+            # if it's any other type of operation than the ones tested above
+            # just do a direct compare
+            elif oper not in ['opconditions', 'opmessage', 'opmessage_grp',
+                              'opmessage_usr', 'opcommand', 'opcommand_hst',
+                              'opcommand_grp'] \
+                        and str(zab[oper]) != str(user[oper]):
+                rval[oper] = user[oper]
 
-            elif zab[key] != str(val):
-                rval[key] = val
     return rval
 
 def get_users(zapi, users):

+ 1 - 16
roles/openshift_cli/tasks/main.yml

@@ -3,32 +3,17 @@
     role: common
     local_facts:
       deployment_type: "{{ openshift_deployment_type }}"
+      cli_image: "{{ osm_image | default(None) }}"
       
 - name: Install clients
   yum: pkg={{ openshift.common.service_type }}-clients state=installed
   when: not openshift.common.is_containerized | bool
   
-- name: List Docker images
-  command: >
-    docker images
-  register: docker_images
-  
 - name: Pull CLI Image
   command: >
     docker pull {{ openshift.common.cli_image }}
-  when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
-  
-- name: Wait for CLI image
-  command: >
-      docker images
-  register: docker_images
-  until: openshift.common.cli_image in docker_images.stdout
-  retries: 30
-  delay: 10
-  changed_when: false
   when: openshift.common.is_containerized | bool
 
-  
 - name: Create /usr/local/bin/openshift cli wrapper
   template:
     src: openshift.j2

+ 2 - 1
roles/openshift_common/tasks/main.yml

@@ -38,5 +38,6 @@
     set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}"
 
 - name: Set hostname
-  hostname: name={{ openshift.common.hostname }}
+  command: >
+    hostnamectl set-hostname {{ openshift.common.hostname }}
   when: openshift_set_hostname | default(set_hostname_default) | bool

+ 1 - 0
roles/openshift_facts/tasks/main.yml

@@ -10,6 +10,7 @@
   shell: ls /run/ostree-booted
   ignore_errors: yes
   failed_when: false
+  changed_when: false
   register: ostree_output
 
 # Locally setup containerized facts for now

+ 13 - 0
roles/openshift_master/handlers/main.yml

@@ -2,11 +2,24 @@
 - name: restart master
   service: name={{ openshift.common.service_type }}-master state=restarted
   when: (not openshift_master_ha | bool) and (not (master_service_status_changed | default(false) | bool))
+  notify: Verify API Server
 
 - name: restart master api
   service: name={{ openshift.common.service_type }}-master-api state=restarted
   when: (openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  notify: Verify API Server
 
 - name: restart master controllers
   service: name={{ openshift.common.service_type }}-master-controllers state=restarted
   when: (openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+
+- name: Verify API Server
+  # Using curl here since the uri module requires python-httplib2 and
+  # wait_for port doesn't provide health information.
+  command: >
+    curl -k --head --silent {{ openshift.master.api_url }}
+  register: api_available_output
+  until: api_available_output.stdout.find("200 OK") != -1
+  retries: 120
+  delay: 1
+  changed_when: false

+ 15 - 16
roles/openshift_master/tasks/main.yml

@@ -86,25 +86,9 @@
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present"
   when: not openshift.common.is_containerized | bool
 
-- name: Get docker images
-  command: docker images
-  changed_when: false
-  when: openshift.common.is_containerized | bool
-  register: docker_images
-
 - name: Pull master image
   command: >
     docker pull {{ openshift.master.master_image }}
-  when: openshift.common.is_containerized | bool and openshift.master.master_image not in docker_images.stdout
-  
-- name: Wait for master image
-  command: >
-      docker images
-  register: docker_images
-  until: openshift.master.master_image in docker_images.stdout
-  retries: 30
-  delay: 10
-  changed_when: false
   when: openshift.common.is_containerized | bool
 
 - name: Install Master docker service file
@@ -285,6 +269,7 @@
   service: name={{ openshift.common.service_type }}-master enabled=yes state=started
   when: not openshift_master_ha | bool
   register: start_result
+  notify: Verify API Server
 
 - name: Stop and disable non HA master when running HA
   service: name={{ openshift.common.service_type }}-master enabled=no state=stopped
@@ -303,6 +288,20 @@
     master_api_service_status_changed: "{{ start_result | changed }}"
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'
 
+# A separate wait is required here for native HA since notifies will
+# be resolved after all tasks in the role.
+- name: Wait for API to become available
+  # Using curl here since the uri module requires python-httplib2 and
+  # wait_for port doesn't provide health information.
+  command: >
+    curl -k --head --silent {{ openshift.master.api_url }}
+  register: api_available_output
+  until: api_available_output.stdout.find("200 OK") != -1
+  retries: 120
+  delay: 1
+  changed_when: false
+  when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
+
 - name: Start and enable master controller
   service: name={{ openshift.common.service_type }}-master-controllers enabled=yes state=started
   when: openshift_master_ha | bool and openshift.master.cluster_method == 'native'

+ 2 - 2
roles/openshift_master/templates/master.yaml.v1.j2

@@ -87,8 +87,8 @@ kubernetesMasterConfig:
   - v1beta3
   - v1
 {% endif %}
-  apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_json }}
-  controllerArguments: {{ openshift.master.controller_args | default(None) | to_json }}
+  apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
+  controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
   masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
   masterIP: {{ openshift.common.ip }}
   podEvictionTimeout: ""

+ 2 - 8
roles/openshift_master_ca/tasks/main.yml

@@ -13,16 +13,10 @@
     path: "{{ openshift_master_config_dir }}"
     state: directory
 
-- name: Get docker images
-  command: docker images
-  changed_when: false
-  when: openshift.common.is_containerized | bool
-  register: docker_images
-
-- name: Pull required docker image
+- name: Pull master docker image
   command: >
     docker pull {{ openshift.common.cli_image }}
-  when: openshift.common.is_containerized | bool and openshift.common.cli_image not in docker_images.stdout
+  when: openshift.common.is_containerized | bool
 
 - name: Create the master certificates if they do not already exist
   command: >

+ 16 - 28
roles/openshift_node/tasks/main.yml

@@ -44,41 +44,14 @@
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present"
   when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
 
-- name: Get docker images
-  command: docker images
-  changed_when: false
-  when: openshift.common.is_containerized | bool
-  register: docker_images
-
 - name: Pull node image
   command: >
     docker pull {{ openshift.node.node_image }}
-  when: openshift.common.is_containerized | bool and openshift.node.node_image not in docker_images.stdout
-  
-- name: Wait for node image
-  command: >
-      docker images
-  register: docker_images
-  until: openshift.node.node_image in docker_images.stdout
-  retries: 30
-  delay: 10
-  changed_when: false
   when: openshift.common.is_containerized | bool
-    
+
 - name: Pull OpenVSwitch image
   command: >
     docker pull {{ openshift.node.ovs_image }}
-  when: openshift.common.is_containerized | bool and openshift.node.ovs_image not in docker_images.stdout
-    and openshift.common.use_openshift_sdn | bool
-  
-- name: Wait for OpenVSwitch image
-  command: >
-      docker images
-  register: docker_images
-  until: openshift.node.ovs_image in docker_images.stdout
-  retries: 30
-  delay: 10
-  changed_when: false
   when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
 
 - name: Install Node docker service file
@@ -130,6 +103,21 @@
 - name: Additional storage plugin configuration
   include: storage_plugins/main.yml
 
+# Necessary because when you're on a node that's also a master the master will be
+# restarted after the node restarts docker and it will take up to 60 seconds for
+# systemd to start the master again
+- name: Wait for master API to become available before proceeding
+  # Using curl here since the uri module requires python-httplib2 and
+  # wait_for port doesn't provide health information.
+  command: >
+    curl -k --head --silent {{ openshift_node_master_api_url }}
+  register: api_available_output
+  until: api_available_output.stdout.find("200 OK") != -1
+  retries: 120
+  delay: 1
+  changed_when: false
+  when: openshift.common.is_containerized | bool
+
 - name: Start and enable node
   service: name={{ openshift.common.service_type }}-node enabled=yes state=started
   register: start_result

+ 4 - 0
roles/openshift_node/tasks/storage_plugins/nfs.yml

@@ -1,4 +1,8 @@
 ---
+- name: Install NFS storage plugin dependencies
+  action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
+  when: not openshift.common.is_atomic | bool
+
 - name: Set seboolean to allow nfs storage plugin access from containers
   seboolean:
     name: virt_use_nfs

+ 1 - 3
roles/openshift_node/templates/node.yaml.v1.j2

@@ -11,9 +11,7 @@ imageConfig:
   format: {{ openshift.node.registry_url }}
   latest: false
 kind: NodeConfig
-{% if openshift.node.kubelet_args is defined and openshift.node.kubelet_args %}
-kubeletArguments: {{ openshift.node.kubelet_args | to_json }}
-{% endif %}
+kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
 masterKubeConfig: system:node:{{ openshift.common.hostname }}.kubeconfig
 {% if openshift.common.use_openshift_sdn %}
 networkPluginName: {{ openshift.common.sdn_network_plugin_name }}

+ 0 - 4
roles/os_zabbix/tasks/main.yml

@@ -1,8 +1,4 @@
 ---
-- fail:
-    msg: "Zabbix config is not yet supported on atomic hosts"
-  when: openshift.common.is_containerized | bool
-
 - name: Main List all templates
   zbx_template:
     zbx_server: "{{ ozb_server }}"

+ 54 - 0
roles/oso_monitoring_tools/README.md

@@ -0,0 +1,54 @@
+Role Name
+=========
+
+This role will install the Openshift Monitoring Utilities
+
+Requirements
+------------
+
+Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+
+Role Variables
+--------------
+
+osomt_zagg_client_config
+
+from vars/main.yml:
+
+osomt_zagg_client_config:
+  host:
+    name: "{{ osomt_host_name }}"
+  zagg:
+    url: "{{ osomt_zagg_url }}"
+    user: "{{ osomt_zagg_user }}"
+    pass: "{{ osomt_zagg_password }}"
+    ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+    verbose: "{{ osomt_zagg_verbose }}"
+    debug: "{{ osomt_zagg_debug }}"
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+- role: "oso_monitoring_tools"
+  osomt_host_name: hostname
+  osomt_zagg_url: http://path.to/zagg_web
+  osomt_zagg_user: admin
+  osomt_zagg_password: password
+  osomt_zagg_ssl_verify: True
+  osomt_zagg_verbose: False
+  osomt_zagg_debug: False
+
+License
+-------
+
+BSD
+
+Author Information
+------------------
+
+Openshift Operations

+ 2 - 0
roles/oso_monitoring_tools/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for oso_monitoring_tools

+ 2 - 0
roles/oso_monitoring_tools/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for oso_monitoring_tools

+ 8 - 0
roles/oso_monitoring_tools/meta/main.yml

@@ -0,0 +1,8 @@
+---
+galaxy_info:
+  author: OpenShift Operations
+  description: Install Openshift Monitoring tools
+  company: Red Hat, Inc
+  license: ASL 2.0
+  min_ansible_version: 1.2
+dependencies: []

+ 17 - 0
roles/oso_monitoring_tools/tasks/main.yml

@@ -0,0 +1,17 @@
+---
+# tasks file for oso_monitoring_tools
+- name: Install the Openshift Tools RPMS
+  yum:
+    name: "{{ item }}"
+    state: latest
+  with_items:
+    - openshift-tools-scripts-monitoring-zagg-client
+    - python-openshift-tools-monitoring-zagg
+
+- debug: var=g_zagg_client_config
+
+- name: Generate the /etc/openshift_tools/zagg_client.yaml config file
+  copy:
+    content: "{{ osomt_zagg_client_config | to_nice_yaml }}"
+    dest: /etc/openshift_tools/zagg_client.yaml
+    mode: "644"

+ 12 - 0
roles/oso_monitoring_tools/vars/main.yml

@@ -0,0 +1,12 @@
+---
+# vars file for oso_monitoring_tools
+osomt_zagg_client_config:
+  host:
+    name: "{{ osomt_host_name }}"
+  zagg:
+    url: "{{ osomt_zagg_url }}"
+    user: "{{ osomt_zagg_user }}"
+    pass: "{{ osomt_zagg_password }}"
+    ssl_verify: "{{ osomt_zagg_ssl_verify }}"
+    verbose: "{{ osomt_zagg_verbose }}"
+    debug: "{{ osomt_zagg_debug }}"

+ 14 - 13
utils/src/ooinstall/cli_installer.py

@@ -33,9 +33,7 @@ def is_valid_hostname(hostname):
 def validate_prompt_hostname(hostname):
     if '' == hostname or is_valid_hostname(hostname):
         return hostname
-    raise click.BadParameter('"{}" appears to be an invalid hostname. ' \
-                             'Please double-check this value i' \
-                             'and re-enter it.'.format(hostname))
+    raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
 
 def get_ansible_ssh_user():
     click.clear()
@@ -72,7 +70,7 @@ def delete_hosts(hosts):
                 click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
     return hosts, None
 
-def collect_hosts(oo_cfg, masters_set=False, print_summary=True):
+def collect_hosts(oo_cfg, existing_env=False, masters_set=False, print_summary=True):
     """
         Collect host information from user. This will later be filled in using
         ansible.
@@ -129,15 +127,18 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
                     masters_set = True
         host_props['node'] = True
 
-        #TODO: Reenable this option once container installs are out of tech preview
-        #rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
-        #                                type=click.Choice(['rpm', 'container']),
-        #                                default='rpm')
-        #if rpm_or_container == 'container':
-        #    host_props['containerized'] = True
-        #else:
-        #    host_props['containerized'] = False
         host_props['containerized'] = False
+        if oo_cfg.settings['variant_version'] != '3.0':
+            rpm_or_container = click.prompt('Will this host be RPM or Container based (rpm/container)?',
+                                            type=click.Choice(['rpm', 'container']),
+                                            default='rpm')
+            if rpm_or_container == 'container':
+                host_props['containerized'] = True
+
+        if existing_env:
+            host_props['new_host'] = True
+        else:
+            host_props['new_host'] = False
 
         host = Host(**host_props)
 
@@ -507,7 +508,7 @@ def collect_new_nodes(oo_cfg):
 Add new nodes here
     """
     click.echo(message)
-    return collect_hosts(oo_cfg, masters_set=True, print_summary=False)
+    return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
 
 def get_installed_hosts(hosts, callback_facts):
     installed_hosts = []

+ 3 - 1
utils/src/ooinstall/oo_config.py

@@ -38,6 +38,7 @@ class Host(object):
         self.public_hostname = kwargs.get('public_hostname', None)
         self.connect_to = kwargs.get('connect_to', None)
         self.preconfigured = kwargs.get('preconfigured', None)
+        self.new_host = kwargs.get('new_host', None)
 
         # Should this host run as an OpenShift master:
         self.master = kwargs.get('master', False)
@@ -68,7 +69,8 @@ class Host(object):
         """ Used when exporting to yaml. """
         d = {}
         for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
-                     'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']:
+                     'master', 'node', 'master_lb', 'containerized',
+                     'connect_to', 'preconfigured', 'new_host']:
             # If the property is defined (not None or False), export it:
             if getattr(self, prop):
                 d[prop] = getattr(self, prop)

+ 13 - 2
utils/src/ooinstall/openshift_ansible.py

@@ -19,13 +19,15 @@ def generate_inventory(hosts):
     global CFG
     masters = [host for host in hosts if host.master]
     nodes = [host for host in hosts if host.node]
+    new_nodes = [host for host in hosts if host.node and host.new_host]
     proxy = determine_proxy_configuration(hosts)
     multiple_masters = len(masters) > 1
+    scaleup = len(new_nodes) > 0
 
     base_inventory_path = CFG.settings['ansible_inventory_path']
     base_inventory = open(base_inventory_path, 'w')
 
-    write_inventory_children(base_inventory, multiple_masters, proxy)
+    write_inventory_children(base_inventory, multiple_masters, proxy, scaleup)
 
     write_inventory_vars(base_inventory, multiple_masters, proxy)
 
@@ -71,6 +73,11 @@ def generate_inventory(hosts):
         base_inventory.write('\n[lb]\n')
         write_host(proxy, base_inventory)
 
+    if scaleup:
+        base_inventory.write('\n[new_nodes]\n')
+        for node in new_nodes:
+            write_host(node, base_inventory)
+
     base_inventory.close()
     return base_inventory_path
 
@@ -84,12 +91,14 @@ def determine_proxy_configuration(hosts):
 
     return None
 
-def write_inventory_children(base_inventory, multiple_masters, proxy):
+def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup):
     global CFG
 
     base_inventory.write('\n[OSEv3:children]\n')
     base_inventory.write('masters\n')
     base_inventory.write('nodes\n')
+    if scaleup:
+        base_inventory.write('new_nodes\n')
     if multiple_masters:
         base_inventory.write('etcd\n')
     if not getattr(proxy, 'preconfigured', True):
@@ -119,6 +128,8 @@ def write_host(host, inventory, schedulable=None):
         facts += ' openshift_hostname={}'.format(host.hostname)
     if host.public_hostname:
         facts += ' openshift_public_hostname={}'.format(host.public_hostname)
+    if host.containerized:
+        facts += ' containerized={}'.format(host.containerized)
     # TODO: For not write_host is handles both master and nodes.
     # Technically only nodes will ever need this.
 

+ 21 - 21
utils/test/cli_installer_tests.py

@@ -681,9 +681,9 @@ class AttendedCliTests(OOCliFixture):
         run_playbook_mock.return_value = 0
 
         cli_input = build_input(hosts=[
-            ('10.0.0.1', True),
-            ('10.0.0.2', False),
-            ('10.0.0.3', False)],
+            ('10.0.0.1', True, False),
+            ('10.0.0.2', False, False),
+            ('10.0.0.3', False, False)],
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y')
@@ -722,10 +722,10 @@ class AttendedCliTests(OOCliFixture):
         run_playbook_mock.return_value = 0
 
         cli_input = build_input(hosts=[
-            ('10.0.0.1', True),
-            ('10.0.0.2', False),
+            ('10.0.0.1', True, False),
+            ('10.0.0.2', False, False),
             ],
-                                      add_nodes=[('10.0.0.3', False)],
+                                      add_nodes=[('10.0.0.3', False, False)],
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y')
@@ -773,9 +773,9 @@ class AttendedCliTests(OOCliFixture):
         mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
 
         cli_input = build_input(hosts=[
-            ('10.0.0.1', True),
+            ('10.0.0.1', True, False),
             ],
-                                      add_nodes=[('10.0.0.2', False)],
+                                      add_nodes=[('10.0.0.2', False, False)],
                                       ssh_user='root',
                                       variant_num=1,
                                       schedulable_masters_ok=True,
@@ -796,10 +796,10 @@ class AttendedCliTests(OOCliFixture):
         run_playbook_mock.return_value = 0
 
         cli_input = build_input(hosts=[
-            ('10.0.0.1', True),
-            ('10.0.0.2', True),
-            ('10.0.0.3', True),
-            ('10.0.0.4', False)],
+            ('10.0.0.1', True, False),
+            ('10.0.0.2', True, False),
+            ('10.0.0.3', True, False),
+            ('10.0.0.4', False, False)],
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y',
@@ -837,9 +837,9 @@ class AttendedCliTests(OOCliFixture):
         run_playbook_mock.return_value = 0
 
         cli_input = build_input(hosts=[
-            ('10.0.0.1', True),
-            ('10.0.0.2', True),
-            ('10.0.0.3', True)],
+            ('10.0.0.1', True, False),
+            ('10.0.0.2', True, False),
+            ('10.0.0.3', True, False)],
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y',
@@ -872,10 +872,10 @@ class AttendedCliTests(OOCliFixture):
         run_playbook_mock.return_value = 0
 
         cli_input = build_input(hosts=[
-                                      ('10.0.0.1', True),
-                                      ('10.0.0.2', True),
-                                      ('10.0.0.3', False),
-                                      ('10.0.0.4', True)],
+                                      ('10.0.0.1', True, False),
+                                      ('10.0.0.2', True, False),
+                                      ('10.0.0.3', False, False),
+                                      ('10.0.0.4', True, False)],
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y',
@@ -893,7 +893,7 @@ class AttendedCliTests(OOCliFixture):
         run_playbook_mock.return_value = 0
 
         cli_input = build_input(hosts=[
-            ('10.0.0.1', True)],
+            ('10.0.0.1', True, False)],
                                       ssh_user='root',
                                       variant_num=1,
                                       confirm_facts='y')
@@ -921,7 +921,7 @@ class AttendedCliTests(OOCliFixture):
         run_playbook_mock.return_value = 0
 
         cli_input = build_input(hosts=[
-            ('10.0.0.1', True)],
+            ('10.0.0.1', True, False)],
                                       ssh_user='root',
                                       variant_num=2,
                                       confirm_facts='y')

+ 13 - 3
utils/test/fixture.py

@@ -138,7 +138,7 @@ class OOCliFixture(OOInstallFixture):
         self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
 
 
-#pylint: disable=too-many-arguments,too-many-branches
+#pylint: disable=too-many-arguments,too-many-branches,too-many-statements
 def build_input(ssh_user=None, hosts=None, variant_num=None,
                 add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
                 master_lb=None):
@@ -163,13 +163,19 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
     num_masters = 0
     if hosts:
         i = 0
-        for (host, is_master) in hosts:
+        for (host, is_master, is_containerized) in hosts:
             inputs.append(host)
             if is_master:
                 inputs.append('y')
                 num_masters += 1
             else:
                 inputs.append('n')
+
+            if is_containerized:
+                inputs.append('container')
+            else:
+                inputs.append('rpm')
+
             #inputs.append('rpm')
             # We should not be prompted to add more hosts if we're currently at
             # 2 masters, this is an invalid HA configuration, so this question
@@ -196,8 +202,12 @@ def build_input(ssh_user=None, hosts=None, variant_num=None,
             inputs.append('y')
         inputs.append('1')  # Add more nodes
         i = 0
-        for (host, is_master) in add_nodes:
+        for (host, is_master, is_containerized) in add_nodes:
             inputs.append(host)
+            if is_containerized:
+                inputs.append('container')
+            else:
+                inputs.append('rpm')
             #inputs.append('rpm')
             if i < len(add_nodes) - 1:
                 inputs.append('y')  # Add more hosts