浏览代码

Merge pull request #912 from twiest/master

sync master -> prod branch
Thomas Wiest 9 年之前
父节点
当前提交
ad5b5e762b
共有 36 个文件被更改,包括 633 次插入133 次删除
  1. 1 1
      README.md
  2. 1 1
      README_AWS.md
  3. 2 2
      README_vagrant.md
  4. 0 21
      Vagrantfile
  5. 6 6
      bin/cluster
  6. 59 1
      filter_plugins/oo_filters.py
  7. 2 0
      git/pylint.sh
  8. 16 16
      openshift-ansible.spec
  9. 15 1
      playbooks/adhoc/uninstall.yml
  10. 9 7
      playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
  11. 124 3
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  12. 0 3
      playbooks/common/openshift-master/config.yml
  13. 7 1
      playbooks/libvirt/openshift-cluster/list.yml
  14. 1 1
      playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
  15. 1 0
      playbooks/libvirt/openshift-cluster/templates/domain.xml
  16. 7 1
      playbooks/openstack/openshift-cluster/list.yml
  17. 38 0
      roles/copr_cli/README.md
  18. 2 0
      roles/copr_cli/defaults/main.yml
  19. 2 0
      roles/copr_cli/handlers/main.yml
  20. 14 0
      roles/copr_cli/meta/main.yml
  21. 4 0
      roles/copr_cli/tasks/main.yml
  22. 2 0
      roles/copr_cli/vars/main.yml
  23. 2 1
      roles/lib_zabbix/library/zbx_itservice.py
  24. 47 21
      roles/openshift_facts/library/openshift_facts.py
  25. 1 2
      roles/openshift_facts/tasks/main.yml
  26. 1 1
      roles/os_zabbix/vars/template_aws.yml
  27. 9 2
      roles/rhel_subscribe/tasks/main.yml
  28. 38 0
      roles/tito/README.md
  29. 2 0
      roles/tito/defaults/main.yml
  30. 2 0
      roles/tito/handlers/main.yml
  31. 14 0
      roles/tito/meta/main.yml
  32. 4 0
      roles/tito/tasks/main.yml
  33. 2 0
      roles/tito/vars/main.yml
  34. 14 8
      utils/src/ooinstall/cli_installer.py
  35. 1 1
      utils/src/ooinstall/openshift_ansible.py
  36. 183 32
      utils/test/cli_installer_tests.py

+ 1 - 1
README.md

@@ -6,7 +6,7 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise.
 - Install base dependencies:
   - Fedora:
   ```
-    yum install -y ansible rubygem-thor rubygem-parseconfig util-linux
+    dnf install -y ansible rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography
   ```
    - OSX:
   ```

+ 1 - 1
README_AWS.md

@@ -105,7 +105,7 @@ Install Dependencies
 1. Ansible requires python-boto for aws operations:
 RHEL/CentOS/Fedora
 ```
-  yum install -y ansible python-boto
+  yum install -y ansible python-boto pyOpenSSL
 ```
 OSX:
 ```

+ 2 - 2
README_vagrant.md

@@ -3,7 +3,6 @@ Requirements
 - ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient).
 - vagrant (tested against version 1.7.2)
 - vagrant-hostmanager plugin (tested against version 1.5.0)
-- vagrant-registration plugin (only required for enterprise deployment type)
 - vagrant-libvirt (tested against version 0.0.26)
   - Only required if using libvirt instead of virtualbox
 
@@ -44,7 +43,8 @@ The following environment variables can be overriden:
 - ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
 - ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
 
-For ``enterprise`` deployment types these env variables should also be specified:
+Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role:
+
 - ``rhel_subscription_user``: rhsm user
 - ``rhel_subscription_pass``: rhsm password
 - (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects

+ 0 - 21
Vagrantfile

@@ -16,27 +16,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
   config.hostmanager.include_offline = true
   config.ssh.insert_key = false
 
-  if deployment_type === 'enterprise'
-    unless Vagrant.has_plugin?('vagrant-registration')
-      raise 'vagrant-registration-plugin is required for enterprise deployment'
-    end
-    username = ENV['rhel_subscription_user']
-    password = ENV['rhel_subscription_pass']
-    unless username and password
-      raise 'rhel_subscription_user and rhel_subscription_pass are required'
-    end
-    config.registration.username = username
-    config.registration.password = password
-    # FIXME this is temporary until vagrant/ansible registration modules
-    # are capable of handling specific subscription pools
-    if not ENV['rhel_subscription_pool'].nil?
-      config.vm.provision "shell" do |s|
-        s.inline = "subscription-manager attach --pool=$1 || true"
-        s.args = "#{ENV['rhel_subscription_pool']}"
-      end
-    end
-  end
-
   config.vm.provider "virtualbox" do |vbox, override|
     override.vm.box = "centos/7"
     vbox.memory = 1024

+ 6 - 6
bin/cluster

@@ -57,7 +57,7 @@ class Cluster(object):
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
-        playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+        playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
         env['num_masters'] = args.masters
@@ -74,7 +74,7 @@ class Cluster(object):
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
-        playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+        playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
         self.action(args, inventory, env, playbook)
@@ -86,7 +86,7 @@ class Cluster(object):
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
-        playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+        playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
         self.action(args, inventory, env, playbook)
@@ -98,7 +98,7 @@ class Cluster(object):
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
-        playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+        playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
         self.action(args, inventory, env, playbook)
@@ -110,7 +110,7 @@ class Cluster(object):
         """
         env = {'cluster_id': args.cluster_id,
                'deployment_type': self.get_deployment_type(args)}
-        playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+        playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
         self.action(args, inventory, env, playbook)
@@ -124,7 +124,7 @@ class Cluster(object):
                'deployment_type': self.get_deployment_type(args),
                'new_cluster_state': args.state}
 
-        playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
+        playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
         inventory = self.setup_provider(args.provider)
 
         self.action(args, inventory, env, playbook)

+ 59 - 1
filter_plugins/oo_filters.py

@@ -401,6 +401,63 @@ class FilterModule(object):
                                                  "certificate names in host inventory"))
         return certificates
 
+    @staticmethod
+    def oo_pretty_print_cluster(data):
+        ''' Read a subset of hostvars and build a summary of the cluster
+            in the following layout:
+
+"c_id": {
+  "master": [
+    { "name": "c_id-master-12345",       "public IP": "172.16.0.1", "private IP": "192.168.0.1", "subtype": "default" }]
+  "node": [
+    { "name": "c_id-node-infra-23456",   "public IP": "172.16.0.2", "private IP": "192.168.0.2", "subtype": "infra" },
+    { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3", "subtype": "compute" },
+  ...
+  ]}
+        '''
+
+        def _get_tag_value(tags, key):
+            ''' Extract values of a map implemented as a set.
+                Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
+                    key = 'bar'
+                    returns 'value2'
+            '''
+            for tag in tags:
+                # Skip tag_env-host-type to avoid ambiguity with tag_env
+                if tag[:17] == 'tag_env-host-type':
+                    continue
+                if tag[:len(key)+4] == 'tag_' + key:
+                    return tag[len(key)+5:]
+            raise KeyError(key)
+
+        def _add_host(clusters,
+                      env,
+                      host_type,
+                      sub_host_type,
+                      host):
+            ''' Add a new host in the clusters data structure '''
+            if env not in clusters:
+                clusters[env] = {}
+            if host_type not in clusters[env]:
+                clusters[env][host_type] = {}
+            if sub_host_type not in clusters[env][host_type]:
+                clusters[env][host_type][sub_host_type] = []
+            clusters[env][host_type][sub_host_type].append(host)
+
+        clusters = {}
+        for host in data:
+            try:
+                _add_host(clusters=clusters,
+                          env=_get_tag_value(host['group_names'], 'env'),
+                          host_type=_get_tag_value(host['group_names'], 'host-type'),
+                          sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
+                          host={'name': host['inventory_hostname'],
+                                'public IP': host['ansible_ssh_host'],
+                                'private IP': host['ansible_default_ipv4']['address']})
+            except KeyError:
+                pass
+        return clusters
+
     def filters(self):
         ''' returns a mapping of filters to methods '''
         return {
@@ -418,5 +475,6 @@ class FilterModule(object):
             "oo_filter_list": self.oo_filter_list,
             "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
             "oo_parse_certificate_names": self.oo_parse_certificate_names,
-            "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters
+            "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
+            "oo_pretty_print_cluster": self.oo_pretty_print_cluster
         }

+ 2 - 0
git/pylint.sh

@@ -40,6 +40,8 @@ for PY_FILE in $PY_DIFF; do
   fi
 done
 
+export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/
+
 if [ "${FILES_TO_TEST}" != "" ]; then
   echo "Testing files: ${FILES_TO_TEST}"
   exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}

+ 16 - 16
openshift-ansible.spec

@@ -13,7 +13,8 @@ URL:            https://github.com/openshift/openshift-ansible
 Source0:        https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
 BuildArch:      noarch
 
-Requires:      ansible
+Requires:      ansible >= 1.9.3
+Requires:      python2
 
 %description
 Openshift and Atomic Enterprise Ansible
@@ -96,8 +97,9 @@ popd
 # ----------------------------------------------------------------------------------
 %package bin
 Summary:       Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
-Requires:      %{name}-inventory
-Requires:      python2
+Requires:      %{name} = %{version}
+Requires:      %{name}-inventory = %{version}
+Requires:      %{name}-playbooks = %{version}
 BuildRequires: python2-devel
 BuildArch:     noarch
 
@@ -117,7 +119,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada
 # ----------------------------------------------------------------------------------
 %package docs
 Summary:       Openshift and Atomic Enterprise Ansible documents
-Requires:      %{name}
+Requires:      %{name} = %{version}
 BuildArch:     noarch
 
 %description docs
@@ -131,7 +133,7 @@ BuildArch:     noarch
 # ----------------------------------------------------------------------------------
 %package inventory
 Summary:       Openshift and Atomic Enterprise Ansible Inventories
-Requires:      python2
+Requires:      %{name} = %{version}
 BuildArch:     noarch
 
 %description inventory
@@ -144,7 +146,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
 
 %package inventory-aws
 Summary:       Openshift and Atomic Enterprise Ansible Inventories for AWS
-Requires:      %{name}-inventory
+Requires:      %{name}-inventory = %{version}
 Requires:      python-boto
 BuildArch:     noarch
 
@@ -156,7 +158,7 @@ Ansible Inventories for AWS used with the openshift-ansible scripts and playbook
 
 %package inventory-gce
 Summary:       Openshift and Atomic Enterprise Ansible Inventories for GCE
-Requires:      %{name}-inventory
+Requires:      %{name}-inventory = %{version}
 Requires:      python-libcloud >= 0.13
 BuildArch:     noarch
 
@@ -172,10 +174,10 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook
 # ----------------------------------------------------------------------------------
 %package playbooks
 Summary:       Openshift and Atomic Enterprise Ansible Playbooks
-Requires:      %{name}
-Requires:      %{name}-roles
-Requires:      %{name}-lookup-plugins
-Requires:      %{name}-filter-plugins
+Requires:      %{name} = %{version}
+Requires:      %{name}-roles = %{version}
+Requires:      %{name}-lookup-plugins = %{version}
+Requires:      %{name}-filter-plugins = %{version}
 BuildArch:     noarch
 
 %description playbooks
@@ -191,8 +193,8 @@ BuildArch:     noarch
 %package roles
 Summary:       Openshift and Atomic Enterprise Ansible roles
 Requires:      %{name}
-Requires:      %{name}-lookup-plugins
-Requires:      %{name}-filter-plugins
+Requires:      %{name}-lookup-plugins = %{version}
+Requires:      %{name}-filter-plugins = %{version}
 BuildArch:     noarch
 
 %description roles
@@ -238,9 +240,7 @@ BuildArch:     noarch
 %package -n atomic-openshift-utils
 Summary:       Atomic OpenShift Utilities
 BuildRequires: python-setuptools
-Requires:      openshift-ansible-playbooks
-Requires:      openshift-ansible-roles
-Requires:      ansible
+Requires:      %{name}-playbooks >= %{version}
 Requires:      python-click
 Requires:      python-setuptools
 Requires:      PyYAML

+ 15 - 1
playbooks/adhoc/uninstall.yml

@@ -103,7 +103,7 @@
     - shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
       changed_when: False
 
-    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node 
+    - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
       changed_when: False
       failed_when: False
       with_items:
@@ -152,10 +152,14 @@
         - /etc/sysconfig/atomic-enterprise-master
         - /etc/sysconfig/atomic-enterprise-node
         - /etc/sysconfig/atomic-openshift-master
+        - /etc/sysconfig/atomic-openshift-master-api
+        - /etc/sysconfig/atomic-openshift-master-controllers
         - /etc/sysconfig/atomic-openshift-node
         - /etc/sysconfig/openshift-master
         - /etc/sysconfig/openshift-node
         - /etc/sysconfig/origin-master
+        - /etc/sysconfig/origin-master-api
+        - /etc/sysconfig/origin-master-controllers
         - /etc/sysconfig/origin-node
         - /root/.kube
         - /run/openshift-sdn
@@ -165,6 +169,16 @@
         - /var/lib/openshift
         - /var/lib/origin
         - /var/lib/pacemaker
+        - /usr/lib/systemd/system/atomic-openshift-master-api.service
+        - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+        - /usr/lib/systemd/system/origin-master-api.service
+        - /usr/lib/systemd/system/origin-master-controllers.service
+
+    # Since we are potentially removing the systemd unit files for separated
+    # master-api and master-controllers services, so we need to reload the
+    # systemd configuration manager
+    - name: Reload systemd manager configuration
+      command: systemctl daemon-reload
 
     - name: restart docker
       service: name=docker state=restarted

+ 9 - 7
playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check

@@ -83,7 +83,7 @@ def get(obj, *paths):
 
 
 # pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, port_name, valid):
+def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
     """
     Prints out results in human friendly way.
 
@@ -93,15 +93,16 @@ def pretty_print_errors(namespace, kind, item_name, container_name, port_name, v
        - `item_name`: Name of the resource
        - `container_name`: Name of the container. May be "" when kind=Service.
        - `port_name`: Name of the port
+       - `invalid_label`: The label of the invalid port. Port.name/targetPort
        - `valid`: True if the port is valid
     """
     if not valid:
         if len(container_name) > 0:
-            print('%s/%s -n %s (Container="%s" Port="%s")' % (
-                kind, item_name, namespace, container_name, port_name))
+            print('%s/%s -n %s (Container="%s" %s="%s")' % (
+                kind, item_name, namespace, container_name, invalid_label, port_name))
         else:
-            print('%s/%s -n %s (Port="%s")' % (
-                kind, item_name, namespace, port_name))
+            print('%s/%s -n %s (%s="%s")' % (
+                kind, item_name, namespace, invalid_label, port_name))
 
 
 def print_validation_header():
@@ -160,7 +161,7 @@ def main():
                         print_validation_header()
                     pretty_print_errors(
                         namespace, kind, item_name,
-                        container_name, port_name, valid)
+                        container_name, "Port.name", port_name, valid)
 
     # Services follow a different flow
     for item in list_items('services'):
@@ -176,7 +177,8 @@ def main():
                 first_error = False
                 print_validation_header()
             pretty_print_errors(
-                namespace, "services", item_name, "", port_name, valid)
+                namespace, "services", item_name, "",
+                "targetPort", port_name, valid)
 
     # If we had at least 1 error then exit with 1
     if not first_error:

+ 124 - 3
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -10,7 +10,7 @@
   roles:
   - openshift_facts
 
-- name: Evaluate etcd_hosts_to_backup
+- name: Evaluate additional groups for upgrade
   hosts: localhost
   tasks:
   - name: Evaluate etcd_hosts_to_backup
@@ -52,7 +52,7 @@
 
 
 - name: Verify upgrade can proceed
-  hosts: masters:nodes
+  hosts: oo_masters_to_config:oo_nodes_to_config
   tasks:
   - name: Clean yum cache
     command: yum clean all
@@ -78,6 +78,29 @@
       msg: Atomic OpenShift 3.1 packages not found
     when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
 
+  - set_fact:
+      pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+  hosts: localhost
+  vars:
+    pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+  tasks:
+  - set_fact:
+      pre_upgrade_completed: "{{ hostvars
+                                 | oo_select_keys(pre_upgrade_hosts)
+                                 | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+  - set_fact:
+      pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+    when: pre_upgrade_failed | length > 0
+
+
 
 ###############################################################################
 # Backup etcd
@@ -90,6 +113,7 @@
   roles:
   - openshift_facts
   tasks:
+  # Ensure we persist the etcd role for this host in openshift_facts
   - openshift_facts:
       role: etcd
       local_facts: {}
@@ -134,11 +158,32 @@
       etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
       --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
 
+  - set_fact:
+      etcd_backup_complete: True
+
   - name: Display location of etcd backup
     debug:
       msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
 
 
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+  hosts: localhost
+  tasks:
+  - set_fact:
+      etcd_backup_completed: "{{ hostvars
+                                 | oo_select_keys(groups.etcd_hosts_to_backup)
+                                 | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+  - set_fact:
+      etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+    when: etcd_backup_failed | length > 0
+
+
+
 ###############################################################################
 # Upgrade Masters
 ###############################################################################
@@ -152,7 +197,7 @@
     changed_when: False
 
 - name: Update deployment type
-  hosts: OSEv3
+  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
   roles:
   - openshift_facts
   post_tasks:
@@ -161,6 +206,16 @@
       local_facts:
         deployment_type: "{{ deployment_type }}"
 
+- name: Update master facts
+  hosts: oo_masters_to_config
+  roles:
+  - openshift_facts
+  post_tasks:
+  - openshift_facts:
+      role: master
+      local_facts:
+        cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
 - name: Upgrade master packages and configuration
   hosts: oo_masters_to_config
   vars:
@@ -290,6 +345,30 @@
     changed_when: False
 
 
+- name: Set master update status to complete
+  hosts: oo_masters_to_config
+  tasks:
+  - set_fact:
+      master_update_complete: True
+
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+  hosts: localhost
+  tasks:
+  - set_fact:
+      master_update_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_masters_to_config)
+                                 | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+  - set_fact:
+      master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+    when: master_update_failed | length > 0
+
+
 ###############################################################################
 # Upgrade Nodes
 ###############################################################################
@@ -309,6 +388,26 @@
   - name: Ensure node service enabled
     service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
 
+  - set_fact:
+      node_update_complete: True
+
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+  hosts: localhost
+  tasks:
+  - set_fact:
+      node_update_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_nodes_to_config)
+                                 | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+  - set_fact:
+      node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+    when: node_update_failed | length > 0
+
 
 ###############################################################################
 # Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
@@ -356,6 +455,28 @@
     when: openshift_master_ha | bool
     run_once: true
 
+  - set_fact:
+      reconcile_complete: True
+
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+  hosts: localhost
+  tasks:
+  - set_fact:
+      reconcile_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_masters_to_config)
+                                 | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+  - set_fact:
+      reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+    when: reconcile_failed | length > 0
+
+
+
 
 ###############################################################################
 # Post upgrade - Upgrade default router, default registry and examples

+ 0 - 3
playbooks/common/openshift-master/config.yml

@@ -51,9 +51,6 @@
           console_url: "{{ openshift_master_console_url | default(None) }}"
           console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
           public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
-      - role: etcd
-        local_facts: {}
-        when: openshift.master.embedded_etcd | bool
   - name: Check status of external etcd certificatees
     stat:
       path: "{{ openshift.common.config_base }}/master/{{ item }}"

+ 7 - 1
playbooks/libvirt/openshift-cluster/list.yml

@@ -18,6 +18,12 @@
 
 - name: List Hosts
   hosts: oo_list_hosts
+
+- name: List Hosts
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
   tasks:
   - debug:
-      msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+      msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"

+ 1 - 1
playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

@@ -81,7 +81,7 @@
     ansible_ssh_host: '{{ item.1 }}'
     ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
     ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
-    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+    groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}'
   with_together:
     - instances
     - ips

+ 1 - 0
playbooks/libvirt/openshift-cluster/templates/domain.xml

@@ -6,6 +6,7 @@
       <ansible:tag>env-{{ cluster }}</ansible:tag>
       <ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
       <ansible:tag>host-type-{{ type }}</ansible:tag>
+      <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
     </ansible:tags>
   </metadata>
   <currentMemory unit='GiB'>1</currentMemory>

+ 7 - 1
playbooks/openstack/openshift-cluster/list.yml

@@ -19,6 +19,12 @@
 
 - name: List Hosts
   hosts: oo_list_hosts
+
+- name: List Hosts
+  hosts: localhost
+  gather_facts: no
+  vars_files:
+  - vars.yml
   tasks:
   - debug:
-      msg: 'public:{{ansible_ssh_host}} private:{{ansible_default_ipv4.address}}'
+      msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"

+ 38 - 0
roles/copr_cli/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+This role manages Copr CLI.
+
+https://apps.fedoraproject.org/packages/copr-cli/
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+    - hosts: servers
+      roles:
+      - role: copr_cli
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Thomas Wiest

+ 2 - 0
roles/copr_cli/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for copr_cli

+ 2 - 0
roles/copr_cli/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for copr_cli

+ 14 - 0
roles/copr_cli/meta/main.yml

@@ -0,0 +1,14 @@
+---
+galaxy_info:
+  author: Thomas Wiest
+  description: Manages Copr CLI
+  company: Red Hat
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - packaging
+dependencies: []

+ 4 - 0
roles/copr_cli/tasks/main.yml

@@ -0,0 +1,4 @@
+---
+- yum:
+    name: copr-cli
+    state: present

+ 2 - 0
roles/copr_cli/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for copr_cli

+ 2 - 1
roles/lib_zabbix/library/zbx_itservice.py

@@ -183,7 +183,7 @@ def main():
         if not exists(content):
             module.exit_json(changed=False, state="absent")
 
-        content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['itemid']])
+        content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['serviceid']])
         module.exit_json(changed=True, results=content['result'], state="absent")
 
     # Create and Update
@@ -210,6 +210,7 @@ def main():
             if content.has_key('error'):
                 module.exit_json(failed=True, changed=True, results=content['error'], state="present")
 
+            if dependencies:
                 content = add_dependencies(zapi, module.params['name'], dependencies)
 
                 if content.has_key('error'):

+ 47 - 21
roles/openshift_facts/library/openshift_facts.py

@@ -24,8 +24,23 @@ import StringIO
 import yaml
 from distutils.util import strtobool
 from distutils.version import LooseVersion
-from netaddr import IPNetwork
+import struct
+import socket
 
+def first_ip(network):
+    """ Return the first IPv4 address in network
+
+        Args:
+            network (str): network in CIDR format
+        Returns:
+            str: first IPv4 address
+    """
+    atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
+    itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+
+    (address, netmask) = network.split('/')
+    netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
+    return itoa((atoi(address) & netmask_i) + 1)
 
 def hostname_valid(hostname):
     """ Test if specified hostname should be considered valid
@@ -525,7 +540,7 @@ def set_aggregate_facts(facts):
                          'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
             all_hostnames.update(svc_names)
             internal_hostnames.update(svc_names)
-            first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
+            first_svc_ip = first_ip(facts['master']['portal_net'])
             all_hostnames.add(first_svc_ip)
             internal_hostnames.add(first_svc_ip)
 
@@ -543,8 +558,10 @@ def set_etcd_facts_if_unset(facts):
 
     If anything goes wrong parsing these, the fact will not be set.
     """
-    if 'etcd' in facts:
-        if 'master' in facts and facts['master']['embedded_etcd']:
+    if 'master' in facts and facts['master']['embedded_etcd']:
+        etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
+
+        if 'etcd_data_dir' not in etcd_facts:
             try:
                 # Parse master config to find actual etcd data dir:
                 master_cfg_path = os.path.join(facts['common']['config_base'],
@@ -553,28 +570,37 @@ def set_etcd_facts_if_unset(facts):
                 config = yaml.safe_load(master_cfg_f.read())
                 master_cfg_f.close()
 
-                facts['etcd']['etcd_data_dir'] = \
+                etcd_facts['etcd_data_dir'] = \
                     config['etcdConfig']['storageDirectory']
+
+                facts['etcd'] = etcd_facts
+
             # We don't want exceptions bubbling up here:
             # pylint: disable=broad-except
             except Exception:
                 pass
-        else:
-            # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
-            try:
-                # Add a fake section for parsing:
-                ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
-                ini_fp = StringIO.StringIO(ini_str)
-                config = ConfigParser.RawConfigParser()
-                config.readfp(ini_fp)
-                etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
-                if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
-                    etcd_data_dir = etcd_data_dir[1:-1]
-                facts['etcd']['etcd_data_dir'] = etcd_data_dir
-            # We don't want exceptions bubbling up here:
-            # pylint: disable=broad-except
-            except Exception:
-                pass
+    else:
+        etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
+
+        # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
+        try:
+            # Add a fake section for parsing:
+            ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
+            ini_fp = StringIO.StringIO(ini_str)
+            config = ConfigParser.RawConfigParser()
+            config.readfp(ini_fp)
+            etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
+            if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
+                etcd_data_dir = etcd_data_dir[1:-1]
+
+            etcd_facts['etcd_data_dir'] = etcd_data_dir
+            facts['etcd'] = etcd_facts
+
+        # We don't want exceptions bubbling up here:
+        # pylint: disable=broad-except
+        except Exception:
+            pass
+
     return facts
 
 def set_deployment_facts_if_unset(facts):

+ 1 - 2
roles/openshift_facts/tasks/main.yml

@@ -6,10 +6,9 @@
     - ansible_version | version_compare('1.9.0', 'ne')
     - ansible_version | version_compare('1.9.0.1', 'ne')
 
-- name: Ensure python-netaddr and PyYaml are installed
+- name: Ensure PyYaml is installed
   yum: pkg={{ item }} state=installed
   with_items:
-    - python-netaddr
     - PyYAML
 
 - name: Gather Cluster facts

+ 1 - 1
roles/os_zabbix/vars/template_aws.yml

@@ -4,7 +4,7 @@ g_template_aws:
   zdiscoveryrules:
   - name: disc.aws
     key: disc.aws
-    lifetime: 1
+    lifetime: 14
     description: "Dynamically register AWS bucket info"
 
   zitemprototypes:

+ 9 - 2
roles/rhel_subscribe/tasks/main.yml

@@ -6,19 +6,26 @@
 - set_fact:
     rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}"
     rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}"
+    rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}"
 
 - fail:
     msg: "This role is only supported for Red Hat hosts"
   when: ansible_distribution != 'RedHat'
 
 - fail:
-    msg: Either rsub_user or the rhel_subscription_user env variable are required for this role.
+    msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role.
   when: rhel_subscription_user is not defined
 
 - fail:
-    msg: Either rsub_pass or the rhel_subscription_pass env variable are required for this role.
+    msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
   when: rhel_subscription_pass is not defined
 
+- name: Satellite preparation
+  command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+  args:
+    creates: /etc/rhsm/ca/katello-server-ca.pem
+  when: rhel_subscription_server is defined and rhel_subscription_server
+
 - name: RedHat subscriptions
   redhat_subscription:
     username: "{{ rhel_subscription_user }}"

+ 38 - 0
roles/tito/README.md

@@ -0,0 +1,38 @@
+Role Name
+=========
+
+This role manages Tito.
+
+https://github.com/dgoodwin/tito
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+    - hosts: servers
+      roles:
+      - role: tito
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Thomas Wiest

+ 2 - 0
roles/tito/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+# defaults file for tito

+ 2 - 0
roles/tito/handlers/main.yml

@@ -0,0 +1,2 @@
+---
+# handlers file for tito

+ 14 - 0
roles/tito/meta/main.yml

@@ -0,0 +1,14 @@
+---
+galaxy_info:
+  author: Thomas Wiest
+  description: Manages Tito
+  company: Red Hat
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.2
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - packaging
+dependencies: []

+ 4 - 0
roles/tito/tasks/main.yml

@@ -0,0 +1,4 @@
+---
+- yum:
+    name: tito
+    state: present

+ 2 - 0
roles/tito/vars/main.yml

@@ -0,0 +1,2 @@
+---
+# vars file for tito

+ 14 - 8
utils/src/ooinstall/cli_installer.py

@@ -71,7 +71,7 @@ def delete_hosts(hosts):
                 click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
     return hosts, None
 
-def collect_hosts():
+def collect_hosts(master_set=False):
     """
         Collect host information from user. This will later be filled in using
         ansible.
@@ -108,8 +108,10 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
                                       value_proc=validate_prompt_hostname)
 
         host_props['connect_to'] = hostname_or_ip
-
-        host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
+        if not master_set:
+            is_master = click.confirm('Will this host be an OpenShift Master?')
+            host_props['master'] = is_master
+            master_set = True
         host_props['node'] = True
 
         #TODO: Reenable this option once container installs are out of tech preview
@@ -188,7 +190,7 @@ Notes:
     facts_confirmed = click.confirm("Do the above facts look correct?")
     if not facts_confirmed:
         message = """
-Edit %s with the desired values and rerun atomic-openshift-installer with --unattended .
+Edit %s with the desired values and run `atomic-openshift-installer --unattended install` to restart the install.
 """ % oo_cfg.config_path
         click.echo(message)
         # Make sure we actually write out the config file.
@@ -308,7 +310,7 @@ def collect_new_nodes():
 Add new nodes here
     """
     click.echo(message)
-    return collect_hosts()
+    return collect_hosts(True)
 
 def get_installed_hosts(hosts, callback_facts):
     installed_hosts = []
@@ -336,7 +338,9 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
             if not unattended:
                 click.echo('By default the installer only adds new nodes to an installed environment.')
                 response = click.prompt('Do you want to (1) only add additional nodes or ' \
-                                        '(2) perform a clean install?', type=int)
+                                        '(2) reinstall the existing hosts ' \
+                                        'potentially erasing any custom changes?',
+                                        type=int)
                 # TODO: this should be reworked with error handling.
                 # Click can certainly do this for us.
                 # This should be refactored as soon as we add a 3rd option.
@@ -429,8 +433,10 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
 # Main CLI entrypoint, not much we can do about too many arguments.
 def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
     """
-    The main click CLI module. Responsible for handling most common CLI options,
-    assigning any defaults and adding to the context for the sub-commands.
+    atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host.
+    It can also be run in unattended mode if provided with a configuration file.
+
+    Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
     """
     ctx.obj = {}
     ctx.obj['unattended'] = unattended

+ 1 - 1
utils/src/ooinstall/openshift_ansible.py

@@ -82,7 +82,7 @@ def write_host(host, inventory, scheduleable=True):
     if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
         facts += ' ansible_connection=local'
         if os.geteuid() != 0:
-            no_pwd_sudo = subprocess.call(['sudo', '-v', '-n'])
+            no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift'])
             if no_pwd_sudo == 1:
                 print 'The atomic-openshift-installer requires sudo access without a password.'
                 sys.exit(1)

+ 183 - 32
utils/test/cli_installer_tests.py

@@ -46,20 +46,20 @@ SAMPLE_CONFIG = """
 variant: %s
 ansible_ssh_user: root
 hosts:
-  - connect_to: master-private.example.com
+  - connect_to: 10.0.0.1
     ip: 10.0.0.1
     hostname: master-private.example.com
     public_ip: 24.222.0.1
     public_hostname: master.example.com
     master: true
     node: true
-  - connect_to: node1-private.example.com
+  - connect_to: 10.0.0.2
     ip: 10.0.0.2
     hostname: node1-private.example.com
     public_ip: 24.222.0.2
     public_hostname: node1.example.com
     node: true
-  - connect_to: node2-private.example.com
+  - connect_to: 10.0.0.3
     ip: 10.0.0.3
     hostname: node2-private.example.com
     public_ip: 24.222.0.3
@@ -98,6 +98,76 @@ class OOCliFixture(OOInstallFixture):
         f.close()
         return config
 
+    def _verify_load_facts(self, load_facts_mock):
+        """ Check that we ran load facts with expected inputs. """
+        load_facts_args = load_facts_mock.call_args[0]
+        self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+            load_facts_args[0])
+        self.assertEquals(os.path.join(self.work_dir,
+            "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
+        env_vars = load_facts_args[2]
+        self.assertEquals(os.path.join(self.work_dir,
+            '.ansible/callback_facts.yaml'),
+            env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+        self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+    def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+        """ Check that we ran playbook with expected inputs. """
+        hosts = run_playbook_mock.call_args[0][0]
+        hosts_to_run_on = run_playbook_mock.call_args[0][1]
+        self.assertEquals(exp_hosts_len, len(hosts))
+        self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+    def _verify_config_hosts(self, written_config, host_count):
+        print written_config['hosts']
+        self.assertEquals(host_count, len(written_config['hosts']))
+        for h in written_config['hosts']:
+            self.assertTrue(h['node'])
+            self.assertTrue('ip' in h)
+            self.assertTrue('hostname' in h)
+            self.assertTrue('public_ip' in h)
+            self.assertTrue('public_hostname' in h)
+
+    #pylint: disable=too-many-arguments
+    def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+                                    run_playbook_mock, cli_input,
+                                    exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+                                    force=None):
+        """
+        Tests cli_installer.py:get_hosts_to_run_on.  That method has quite a
+        few subtle branches in the logic.  The goal with this method is simply
+        to handle all the messy stuff here and allow the main test cases to be
+        easily read.  The basic idea is to modify mock_facts to return a
+        version indicating OpenShift is already installed on particular hosts.
+        """
+        load_facts_mock.return_value = (mock_facts, 0)
+        run_playbook_mock.return_value = 0
+
+        if cli_input:
+            self.cli_args.append("install")
+            result = self.runner.invoke(cli.cli,
+                                        self.cli_args,
+                                        input=cli_input)
+        else:
+            config_file = self.write_config(os.path.join(self.work_dir,
+                'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+            self.cli_args.extend(["-c", config_file, "install"])
+            if force:
+                self.cli_args.append("--force")
+            result = self.runner.invoke(cli.cli, self.cli_args)
+            written_config = self._read_yaml(config_file)
+            self._verify_config_hosts(written_config, exp_hosts_len)
+
+        self.assert_result(result, 0)
+        self._verify_load_facts(load_facts_mock)
+        self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+        # Make sure we ran on the expected masters and nodes:
+        hosts = run_playbook_mock.call_args[0][0]
+        hosts_to_run_on = run_playbook_mock.call_args[0][1]
+        self.assertEquals(exp_hosts_len, len(hosts))
+        self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
 
 class UnattendedCliTests(OOCliFixture):
 
@@ -105,6 +175,92 @@ class UnattendedCliTests(OOCliFixture):
         OOCliFixture.setUp(self)
         self.cli_args.append("-u")
 
+    # unattended with config file and all installed hosts (without --force)
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
+    def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock):
+        mock_facts = copy.deepcopy(MOCK_FACTS)
+        mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+        mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+        mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+
+        load_facts_mock.return_value = (mock_facts, 0)
+        run_playbook_mock.return_value = 0
+
+        config_file = self.write_config(os.path.join(self.work_dir,
+            'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+        self.cli_args.extend(["-c", config_file, "install"])
+        result = self.runner.invoke(cli.cli, self.cli_args)
+
+        if result.exception is None or result.exit_code != 1:
+            print "Exit code: %s" % result.exit_code
+            self.fail("Unexpected CLI return")
+
+    # unattended with config file and all installed hosts (with --force)
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
+    def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock):
+        mock_facts = copy.deepcopy(MOCK_FACTS)
+        mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+        mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+        mock_facts['10.0.0.3']['common']['version'] = "3.0.0"
+        self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+                                         cli_input=None,
+                                         exp_hosts_len=3,
+                                         exp_hosts_to_run_on_len=3,
+                                         force=True)
+
+    # unattended with config file and no installed hosts (without --force)
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
+    def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock):
+        load_facts_mock.return_value = (MOCK_FACTS, 0)
+        run_playbook_mock.return_value = 0
+        self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+                                         cli_input=None,
+                                         exp_hosts_len=3,
+                                         exp_hosts_to_run_on_len=3,
+                                         force=False)
+
+    # unattended with config file and no installed hosts (with --force)
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
+    def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock):
+        load_facts_mock.return_value = (MOCK_FACTS, 0)
+        run_playbook_mock.return_value = 0
+        self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,
+                                         cli_input=None,
+                                         exp_hosts_len=3,
+                                         exp_hosts_to_run_on_len=3,
+                                         force=True)
+
+    # unattended with config file and some installed some uninstalled hosts (without --force)
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
+    def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock):
+        mock_facts = copy.deepcopy(MOCK_FACTS)
+        mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+        mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+        self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+                                         cli_input=None,
+                                         exp_hosts_len=3,
+                                         exp_hosts_to_run_on_len=2,
+                                         force=False)
+
+    # unattended with config file and some installed some uninstalled hosts (with --force)
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
+    def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock):
+        mock_facts = copy.deepcopy(MOCK_FACTS)
+        mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+        mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+        self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,
+                                         cli_input=None,
+                                         exp_hosts_len=3,
+                                         exp_hosts_to_run_on_len=3,
+                                         force=True)
+
     @patch('ooinstall.openshift_ansible.run_main_playbook')
     @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_cfg_full_run(self, load_facts_mock, run_playbook_mock):
@@ -363,35 +519,6 @@ class AttendedCliTests(OOCliFixture):
 
         return '\n'.join(inputs)
 
-    def _verify_load_facts(self, load_facts_mock):
-        """ Check that we ran load facts with expected inputs. """
-        load_facts_args = load_facts_mock.call_args[0]
-        self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
-            load_facts_args[0])
-        self.assertEquals(os.path.join(self.work_dir,
-            "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
-        env_vars = load_facts_args[2]
-        self.assertEquals(os.path.join(self.work_dir,
-            '.ansible/callback_facts.yaml'),
-            env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
-        self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
-
-    def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
-        """ Check that we ran playbook with expected inputs. """
-        hosts = run_playbook_mock.call_args[0][0]
-        hosts_to_run_on = run_playbook_mock.call_args[0][1]
-        self.assertEquals(exp_hosts_len, len(hosts))
-        self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
-    def _verify_config_hosts(self, written_config, host_count):
-        self.assertEquals(host_count, len(written_config['hosts']))
-        for h in written_config['hosts']:
-            self.assertTrue(h['node'])
-            self.assertTrue('ip' in h)
-            self.assertTrue('hostname' in h)
-            self.assertTrue('public_ip' in h)
-            self.assertTrue('public_hostname' in h)
-
     @patch('ooinstall.openshift_ansible.run_main_playbook')
     @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_full_run(self, load_facts_mock, run_playbook_mock):
@@ -416,6 +543,7 @@ class AttendedCliTests(OOCliFixture):
         written_config = self._read_yaml(self.config_file)
         self._verify_config_hosts(written_config, 3)
 
+    # interactive with config file and some installed some uninstalled hosts
     @patch('ooinstall.openshift_ansible.run_main_playbook')
     @patch('ooinstall.openshift_ansible.load_system_facts')
     def test_add_nodes(self, load_facts_mock, run_playbook_mock):
@@ -472,6 +600,29 @@ class AttendedCliTests(OOCliFixture):
         written_config = self._read_yaml(config_file)
         self._verify_config_hosts(written_config, 3)
 
+    #interactive with config file and all installed hosts
+    @patch('ooinstall.openshift_ansible.run_main_playbook')
+    @patch('ooinstall.openshift_ansible.load_system_facts')
+    def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):
+        mock_facts = copy.deepcopy(MOCK_FACTS)
+        mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
+        mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
+
+        cli_input = self._build_input(hosts=[
+            ('10.0.0.1', True),
+            ],
+                                      add_nodes=[('10.0.0.2', False)],
+                                      ssh_user='root',
+                                      variant_num=1,
+                                      confirm_facts='y')
+
+        self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
+                                         run_playbook_mock,
+                                         cli_input,
+                                         exp_hosts_len=2,
+                                         exp_hosts_to_run_on_len=2,
+                                         force=False)
+
 # TODO: test with config file, attended add node
 # TODO: test with config file, attended new node already in config file
 # TODO: test with config file, attended new node already in config file, plus manually added nodes