ソースを参照

Refactor upgrade codepaths step 1

This commit refactors some upgrade code paths.

Touched areas are:

1) Reduces usage of 'oo_all_hosts' in various places,
especially when running upgrade_control_plane.

2) Reuses common code across the various upgrade*
playbooks.

3) Moves docker upgrade checks into container_runtime_role.

4) Combines smaller playbooks and plays to reduce file sprawl.
Michael Gugino 7 年 前
コミット
e1df3b6ba7
29 ファイル変更406 行追加1357 行削除
  1. 0 21
      playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml
  2. 1 1
      playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml
  3. 0 11
      playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
  4. 3 1
      playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
  5. 0 25
      playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh
  6. 0 57
      playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
  7. 1 1
      playbooks/common/openshift-cluster/upgrades/init.yml
  8. 77 0
      playbooks/common/openshift-cluster/upgrades/pre/config.yml
  9. 0 6
      playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml
  10. 0 22
      playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml
  11. 93 0
      playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
  12. 0 37
      playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
  13. 0 22
      playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml
  14. 0 16
      playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
  15. 0 37
      playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
  16. 12 87
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
  17. 15 87
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
  18. 14 85
      playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
  19. 12 91
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
  20. 15 91
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
  21. 14 85
      playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
  22. 12 91
      playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml
  23. 15 91
      playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml
  24. 14 85
      playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml
  25. 12 101
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml
  26. 15 96
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
  27. 14 95
      playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml
  28. 67 0
      roles/container_runtime/tasks/docker_upgrade_check.yml
  29. 0 15
      roles/openshift_node/tasks/docker/upgrade.yml

+ 0 - 21
playbooks/common/openshift-cluster/upgrades/cleanup_unused_images.yml

@@ -1,22 +1 @@
 ---
-- name: Check Docker image count
-  shell: "docker images -aq | wc -l"
-  register: docker_image_count
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Remove unused Docker images for Docker 1.10+ migration
-  shell: "docker rmi `docker images -aq`"
-  # Will fail on images still in use:
-  failed_when: false
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
-  shell: "docker images -aq | wc -l"
-  register: docker_image_count
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml

@@ -1,6 +1,6 @@
 ---
 - name: Disable excluders
-  hosts: oo_masters_to_config
+  hosts: "{{ l_upgrade_excluder_hosts }}"
   gather_facts: no
   roles:
   - role: openshift_excluder

+ 0 - 11
playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml

@@ -1,11 +0,0 @@
----
-- name: Disable excluders
-  hosts: oo_nodes_to_upgrade:!oo_masters_to_config
-  gather_facts: no
-  roles:
-  - role: openshift_excluder
-    r_openshift_excluder_action: disable
-    r_openshift_excluder_verify_upgrade: true
-    r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}"
-    r_openshift_excluder_package_state: latest
-    r_openshift_excluder_docker_package_state: latest

+ 3 - 1
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -19,7 +19,9 @@
       msg: Cannot upgrade Docker on Atomic operating systems.
     when: openshift.common.is_atomic | bool
 
-  - include_tasks: upgrade_check.yml
+  - include_role:
+      name: container_runtime
+      tasks_from: docker_upgrade_check.yml
     when: docker_upgrade is not defined or docker_upgrade | bool
 
 

+ 0 - 25
playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh

@@ -1,25 +0,0 @@
-#!/bin/bash
-
-# Stop any running containers
-running_container_ids=`docker ps -q`
-if test -n "$running_container_ids"
-then
-    docker stop $running_container_ids
-fi
-
-# Delete all containers
-container_ids=`docker ps -a -q`
-if test -n "$container_ids"
-then
-    docker rm -f -v $container_ids
-fi
-
-# Delete all images (forcefully)
-image_ids=`docker images -aq`
-if test -n "$image_ids"
-then
-    # Some layers are deleted recursively and are no longer present
-    # when docker goes to remove them:
-    docker rmi -f `docker images -aq` || true
-fi
-

+ 0 - 57
playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml

@@ -1,58 +1 @@
 ---
-
-# This snippet determines if a Docker upgrade is required by checking the inventory
-# variables, the available packages, and sets l_docker_upgrade to True if so.
-
-- set_fact:
-    docker_upgrade: True
-  when: docker_upgrade is not defined
-
-- name: Check if Docker is installed
-  command: rpm -q docker
-  args:
-    warn: no
-  register: pkg_check
-  failed_when: pkg_check.rc > 1
-  changed_when: no
-
-- name: Get current version of Docker
-  command: "{{ repoquery_installed }} --qf '%{version}' docker"
-  register: curr_docker_version
-  retries: 4
-  until: curr_docker_version | succeeded
-  changed_when: false
-
-- name: Get latest available version of Docker
-  command: >
-    {{ repoquery_cmd }} --qf '%{version}' "docker"
-  register: avail_docker_version
-  retries: 4
-  until: avail_docker_version | succeeded
-  # Don't expect docker rpm to be available on hosts that don't already have it installed:
-  when: pkg_check.rc == 0
-  failed_when: false
-  changed_when: false
-
-- fail:
-    msg: This playbook requires access to Docker 1.12 or later
-  # Disable the 1.12 requirement if the user set a specific Docker version
-  when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
-
-# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
-- set_fact:
-    l_docker_upgrade: False
-
-# Make sure a docker_version is set if none was requested:
-- set_fact:
-    docker_version: "{{ avail_docker_version.stdout }}"
-  when: pkg_check.rc == 0 and docker_version is not defined
-
-- name: Flag for Docker upgrade if necessary
-  set_fact:
-    l_docker_upgrade: True
-  when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
-
-- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
-  set_fact:
-    docker_upgrade_nuke_images: True
-  when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/init.yml

@@ -8,7 +8,7 @@
 - import_playbook: ../../../init/facts.yml
 
 - name: Ensure firewall is not switched during upgrade
-  hosts: oo_all_hosts
+  hosts: "{{ l_upgrade_no_switch_firewall_hosts | default('oo_all_hosts') }}"
   vars:
     openshift_master_installed_version: "{{ hostvars[groups.oo_first_master.0].openshift.common.version }}"
   tasks:

+ 77 - 0
playbooks/common/openshift-cluster/upgrades/pre/config.yml

@@ -0,0 +1,77 @@
+---
+
+# Pre-upgrade
+- import_playbook: ../initialize_nodes_to_upgrade.yml
+
+- import_playbook: verify_cluster.yml
+
+- name: Update repos on upgrade hosts
+  hosts: "{{ l_upgrade_repo_hosts }}"
+  roles:
+  - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+  hosts: "{{ l_upgrade_no_proxy_hosts }}"
+  tasks:
+  - set_fact:
+      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+                                                    | union(groups['oo_masters_to_config'])
+                                                    | union(groups['oo_etcd_to_config'] | default([])))
+                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                                }}"
+    when:
+    - openshift_http_proxy is defined or openshift_https_proxy is defined
+    - openshift_generate_no_proxy_hosts | default(True) | bool
+
+- name: OpenShift Health Checks
+  hosts: "{{ l_upgrade_health_check_hosts }}"
+  any_errors_fatal: true
+  roles:
+  - openshift_health_checker
+  vars:
+  - r_openshift_health_checker_playbook_context: upgrade
+  post_tasks:
+  - name: Run health checks (upgrade)
+    action: openshift_health_check
+    args:
+      checks:
+      - disk_availability
+      - memory_availability
+      - docker_image_availability
+
+- import_playbook: ../disable_excluders.yml
+
+- import_playbook: ../../../../init/version.yml
+  vars:
+    # Request specific openshift_release and let the openshift_version role handle converting this
+    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+    # defined, and overriding the normal behavior of protecting the installed version
+    openshift_release: "{{ openshift_upgrade_target }}"
+    openshift_protect_installed_version: False
+
+# If we're only upgrading nodes, we need to ensure masters are already upgraded
+- name: Verify masters are already upgraded
+  hosts: oo_masters_to_config
+  tasks:
+  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+    when:
+    - l_upgrade_nodes_only | default(False) | bool
+    - openshift.common.version != openshift_version
+
+# If we're only upgrading nodes, skip this.
+- import_playbook: ../../../../openshift-master/private/validate_restart.yml
+  when: not (l_upgrade_nodes_only | default(False)) | bool
+
+- name: Verify upgrade targets
+  hosts: "{{ l_upgrade_verify_targets_hosts }}"
+  roles:
+  - role: openshift_facts
+  tasks:
+  - include_tasks: verify_upgrade_targets.yml
+
+- name: Verify docker upgrade targets
+  hosts: "{{ l_upgrade_docker_target_hosts }}"
+  tasks:
+  - include_role:
+      name: container_runtime
+      tasks_from: docker_upgrade_check.yml

+ 0 - 6
playbooks/common/openshift-cluster/upgrades/pre/gate_checks.yml

@@ -1,6 +0,0 @@
----
-- name: Flag pre-upgrade checks complete for hosts without errors
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - set_fact:
-      pre_upgrade_complete: True

+ 0 - 22
playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml

@@ -1,22 +0,0 @@
----
-# Only check if docker upgrade is required if docker_upgrade is not
-# already set to False.
-- include_tasks: ../../docker/upgrade_check.yml
-  when:
-  - docker_upgrade is not defined or (docker_upgrade | bool)
-  - not (openshift.common.is_atomic | bool)
-
-# Additional checks for Atomic hosts:
-
-- name: Determine available Docker
-  shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
-  register: g_atomic_docker_version_result
-  when: openshift.common.is_atomic | bool
-
-- set_fact:
-    l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
-  when: openshift.common.is_atomic | bool
-
-- fail:
-    msg: This playbook requires access to Docker 1.12 or later
-  when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')

+ 93 - 0
playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml

@@ -0,0 +1,93 @@
+---
+# Verify a few items before we proceed with upgrade process.
+
+- name: Verify upgrade can proceed on first master
+  hosts: oo_first_master
+  gather_facts: no
+  tasks:
+  - fail:
+      msg: >
+        This upgrade is only supported for origin and openshift-enterprise
+        deployment types
+    when: deployment_type not in ['origin','openshift-enterprise']
+
+  # Error out in situations where the user has older versions specified in their
+  # inventory in any of the openshift_release, openshift_image_tag, and
+  # openshift_pkg_version variables. These must be removed or updated to proceed
+  # with upgrade.
+  # TODO: Should we block if you're *over* the next major release version as well?
+  - fail:
+      msg: >
+        openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+        valid version for a {{ openshift_upgrade_target }} upgrade
+    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+  - fail:
+      msg: >
+        openshift_image_tag is {{ openshift_image_tag }} which is not a
+        valid version for a {{ openshift_upgrade_target }} upgrade
+    when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+
+  - set_fact:
+      openshift_release: "{{ openshift_release[1:] }}"
+    when: openshift_release is defined and openshift_release[0] == 'v'
+
+  - fail:
+      msg: >
+        openshift_release is {{ openshift_release }} which is not a
+        valid release for a {{ openshift_upgrade_target }} upgrade
+    when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
+
+- name: Verify master processes
+  hosts: oo_masters_to_config
+  roles:
+  - lib_utils
+  - openshift_facts
+  tasks:
+  - name: Read master storage backend setting
+    yedit:
+      state: list
+      src: /etc/origin/master/master-config.yaml
+      key: kubernetesMasterConfig.apiServerArguments.storage-backend
+    register: _storage_backend
+
+  - fail:
+      msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
+    when:
+    # assuming the master-config.yml is properly configured, i.e. the value is a list
+    - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
+
+  - debug:
+      msg: "Storage backend is set to etcd3"
+
+  - openshift_facts:
+      role: master
+      local_facts:
+        ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
+  - when: openshift.common.is_containerized | bool
+    block:
+    - set_fact:
+        master_services:
+        - "{{ openshift_service_type }}-master"
+
+    # In case of the non-ha to ha upgrade.
+    - name: Check if the {{ openshift_service_type }}-master-api.service exists
+      command: >
+        systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
+      register: master_api_service_status
+
+    - set_fact:
+        master_services:
+        - "{{ openshift_service_type }}-master-api"
+        - "{{ openshift_service_type }}-master-controllers"
+      when:
+      - master_api_service_status.stdout_lines | length > 0
+      - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+    - name: Ensure Master is running
+      service:
+        name: "{{ item }}"
+        state: started
+        enabled: yes
+      with_items: "{{ master_services }}"

+ 0 - 37
playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml

@@ -1,37 +0,0 @@
----
-- name: Verify master processes
-  hosts: oo_masters_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  - openshift_facts:
-      role: master
-      local_facts:
-        ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-
-  - when: openshift.common.is_containerized | bool
-    block:
-    - set_fact:
-        master_services:
-        - "{{ openshift_service_type }}-master"
-
-    # In case of the non-ha to ha upgrade.
-    - name: Check if the {{ openshift_service_type }}-master-api.service exists
-      command: >
-        systemctl list-units {{ openshift_service_type }}-master-api.service --no-legend
-      register: master_api_service_status
-
-    - set_fact:
-        master_services:
-        - "{{ openshift_service_type }}-master-api"
-        - "{{ openshift_service_type }}-master-controllers"
-      when:
-      - master_api_service_status.stdout_lines | length > 0
-      - (openshift_service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
-
-    - name: Ensure Master is running
-      service:
-        name: "{{ item }}"
-        state: started
-        enabled: yes
-      with_items: "{{ master_services }}"

+ 0 - 22
playbooks/common/openshift-cluster/upgrades/pre/verify_etcd3_backend.yml

@@ -1,22 +0,0 @@
----
-- name: Verify all masters has etcd3 storage backend set
-  hosts: oo_masters_to_config
-  gather_facts: no
-  roles:
-  - lib_utils
-  tasks:
-  - name: Read master storage backend setting
-    yedit:
-      state: list
-      src: /etc/origin/master/master-config.yaml
-      key: kubernetesMasterConfig.apiServerArguments.storage-backend
-    register: _storage_backend
-
-  - fail:
-      msg: "Storage backend in /etc/origin/master/master-config.yaml must be set to 'etcd3' before the upgrade can continue"
-    when:
-    # assuming the master-config.yml is properly configured, i.e. the value is a list
-    - _storage_backend.result | default([], true) | length == 0 or _storage_backend.result[0] != "etcd3"
-
-  - debug:
-      msg: "Storage backend is set to etcd3"

+ 0 - 16
playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml

@@ -1,16 +0,0 @@
----
-- name: OpenShift Health Checks
-  hosts: oo_all_hosts
-  any_errors_fatal: true
-  roles:
-  - openshift_health_checker
-  vars:
-  - r_openshift_health_checker_playbook_context: upgrade
-  post_tasks:
-  - name: Run health checks (upgrade)
-    action: openshift_health_check
-    args:
-      checks:
-      - disk_availability
-      - memory_availability
-      - docker_image_availability

+ 0 - 37
playbooks/common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml

@@ -1,37 +0,0 @@
----
-- name: Verify upgrade can proceed on first master
-  hosts: oo_first_master
-  gather_facts: no
-  tasks:
-  - fail:
-      msg: >
-        This upgrade is only supported for origin and openshift-enterprise
-        deployment types
-    when: deployment_type not in ['origin','openshift-enterprise']
-
-  # Error out in situations where the user has older versions specified in their
-  # inventory in any of the openshift_release, openshift_image_tag, and
-  # openshift_pkg_version variables. These must be removed or updated to proceed
-  # with upgrade.
-  # TODO: Should we block if you're *over* the next major release version as well?
-  - fail:
-      msg: >
-        openshift_pkg_version is {{ openshift_pkg_version }} which is not a
-        valid version for a {{ openshift_upgrade_target }} upgrade
-    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
-
-  - fail:
-      msg: >
-        openshift_image_tag is {{ openshift_image_tag }} which is not a
-        valid version for a {{ openshift_upgrade_target }} upgrade
-    when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
-
-  - set_fact:
-      openshift_release: "{{ openshift_release[1:] }}"
-    when: openshift_release is defined and openshift_release[0] == 'v'
-
-  - fail:
-      msg: >
-        openshift_release is {{ openshift_release }} which is not a
-        valid release for a {{ openshift_upgrade_target }} upgrade
-    when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')

+ 12 - 87
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml

@@ -15,99 +15,24 @@
       openshift_upgrade_target: '3.6'
       openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
 
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
 
 - import_playbook: validator.yml
-  tags:
-  - pre_upgrade
 
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 15 - 87
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

@@ -12,106 +12,34 @@
 # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
 #
 - import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
+  vars:
+    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
 - name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.6'
       openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on control plane hosts
-  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_masters_to_config:oo_etcd_to_config
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_masters_to_config"
 
 - import_playbook: validator.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
 
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_etcd_to_config
   tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 14 - 85
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml

@@ -17,93 +17,22 @@
       openshift_upgrade_target: '3.6'
       openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on nodes
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  roles:
-  - openshift_repos
-  tags:
-  - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
-  hosts: oo_masters_to_config
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_nodes_to_config"
+    l_upgrade_no_proxy_hosts: "oo_all_hosts"
+    l_upgrade_health_check_hosts: "oo_nodes_to_config"
+    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+    l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
-    when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+  - set_fact:
+      pre_upgrade_complete: True
 
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_nodes.yml

+ 12 - 91
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml

@@ -15,103 +15,24 @@
       openshift_upgrade_target: '3.7'
       openshift_upgrade_min: '3.6'
 
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
 
 - import_playbook: validator.yml
-  tags:
-  - pre_upgrade
 
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 15 - 91
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml

@@ -12,110 +12,34 @@
 # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
 #
 - import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
+  vars:
+    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
 - name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.7'
       openshift_upgrade_min: '3.6'
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on control plane hosts
-  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_masters_to_config:oo_etcd_to_config
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_masters_to_config"
 
 - import_playbook: validator.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
 
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_etcd_to_config
   tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 14 - 85
playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml

@@ -17,93 +17,22 @@
       openshift_upgrade_target: '3.7'
       openshift_upgrade_min: '3.6'
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on nodes
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  roles:
-  - openshift_repos
-  tags:
-  - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
-  hosts: oo_masters_to_config
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_nodes_to_config"
+    l_upgrade_no_proxy_hosts: "oo_all_hosts"
+    l_upgrade_health_check_hosts: "oo_nodes_to_config"
+    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+    l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
-    when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+  - set_fact:
+      pre_upgrade_complete: True
 
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_nodes.yml

+ 12 - 91
playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml

@@ -15,103 +15,24 @@
       openshift_upgrade_target: '3.8'
       openshift_upgrade_min: '3.7'
 
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
 
 - import_playbook: validator.yml
-  tags:
-  - pre_upgrade
 
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 15 - 91
playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml

@@ -12,110 +12,34 @@
 # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
 #
 - import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
+  vars:
+    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
 - name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.8'
       openshift_upgrade_min: '3.7'
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on control plane hosts
-  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_masters_to_config:oo_etcd_to_config
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_masters_to_config"
 
 - import_playbook: validator.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
 
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_etcd_to_config
   tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 14 - 85
playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_nodes.yml

@@ -17,93 +17,22 @@
       openshift_upgrade_target: '3.8'
       openshift_upgrade_min: '3.7'
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on nodes
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  roles:
-  - openshift_repos
-  tags:
-  - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-- name: Verify masters are already upgraded
-  hosts: oo_masters_to_config
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_nodes_to_config"
+    l_upgrade_no_proxy_hosts: "oo_all_hosts"
+    l_upgrade_health_check_hosts: "oo_nodes_to_config"
+    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+    l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
-    when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+  - set_fact:
+      pre_upgrade_complete: True
 
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_nodes.yml

+ 12 - 101
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml

@@ -3,121 +3,32 @@
 # Full Control Plane + Nodes Upgrade
 #
 - import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
 
 - name: Configure the upgrade target for the common upgrade tasks
   hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.9'
       openshift_upgrade_min: '3.7'
 
-# Pre-upgrade
-
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-    # We skip the docker role at this point in upgrade to prevent
-    # unintended package, container, or config upgrades which trigger
-    # docker restarts. At this early stage of upgrade we can assume
-    # docker is configured and running.
-    skip_docker_role: True
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
-  tasks:
-  - import_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config"
 
 - import_playbook: validator.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
 
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+  - set_fact:
+      pre_upgrade_complete: True
+
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 15 - 96
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml

@@ -12,116 +12,35 @@
 # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
 #
 - import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
+  vars:
+    l_upgrade_no_switch_firewall_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
 
 - name: Configure the upgrade target for the common upgrade tasks
-  hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.9'
       openshift_upgrade_min: '3.7'
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_etcd3_backend.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on control plane hosts
-  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
-  tags:
-  - pre_upgrade
-  roles:
-  - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_master_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-    # We skip the docker role at this point in upgrade to prevent
-    # unintended package, container, or config upgrades which trigger
-    # docker restarts. At this early stage of upgrade we can assume
-    # docker is configured and running.
-    skip_docker_role: True
-
-- import_playbook: ../../../../openshift-master/private/validate_restart.yml
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_no_proxy_hosts: "oo_masters_to_config"
+    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config"
+    l_upgrade_verify_targets_hosts: "oo_masters_to_config"
+    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"
+    l_upgrade_excluder_hosts: "oo_masters_to_config"
 
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
+- import_playbook: validator.yml
 
-- name: Verify docker upgrade targets
+- name: Flag pre-upgrade checks complete for hosts without errors
   hosts: oo_masters_to_config:oo_etcd_to_config
   tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: validator.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
+  - set_fact:
+      pre_upgrade_complete: True
 
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+# Pre-upgrade completed
 
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
-  hosts: oo_masters_to_config:oo_etcd_to_config
-  tasks:
-  - include_tasks: ../cleanup_unused_images.yml
 
 - import_playbook: ../upgrade_control_plane.yml
   vars:

+ 14 - 95
playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml

@@ -5,111 +5,30 @@
 # Upgrades nodes only, but requires the control plane to have already been upgraded.
 #
 - import_playbook: ../init.yml
-  tags:
-  - pre_upgrade
 
 - name: Configure the upgrade target for the common upgrade tasks
   hosts: oo_all_hosts
-  tags:
-  - pre_upgrade
   tasks:
   - set_fact:
       openshift_upgrade_target: '3.9'
       openshift_upgrade_min: '3.7'
 
-# Pre-upgrade
-- import_playbook: ../initialize_nodes_to_upgrade.yml
-  tags:
-  - pre_upgrade
-
-- name: Update repos on nodes
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
-  roles:
-  - openshift_repos
-  tags:
-  - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
-  hosts: oo_masters_to_config:oo_nodes_to_upgrade
-  tags:
-  - pre_upgrade
-  tasks:
-  - set_fact:
-      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
-                                                    | union(groups['oo_masters_to_config'])
-                                                    | union(groups['oo_etcd_to_config'] | default([])))
-                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
-                                                }}"
-    when:
-    - openshift_http_proxy is defined or openshift_https_proxy is defined
-    - openshift_generate_no_proxy_hosts | default(True) | bool
-
-- import_playbook: ../pre/verify_inventory_vars.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/verify_health_checks.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../disable_node_excluders.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../../../../init/version.yml
-  tags:
-  - pre_upgrade
+- import_playbook: ../pre/config.yml
   vars:
-    # Request specific openshift_release and let the openshift_version role handle converting this
-    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
-    # defined, and overriding the normal behavior of protecting the installed version
-    openshift_release: "{{ openshift_upgrade_target }}"
-    openshift_protect_installed_version: False
-
-    # We skip the docker role at this point in upgrade to prevent
-    # unintended package, container, or config upgrades which trigger
-    # docker restarts. At this early stage of upgrade we can assume
-    # docker is configured and running.
-    skip_docker_role: True
-
-- name: Verify masters are already upgraded
-  hosts: oo_masters_to_config
-  tags:
-  - pre_upgrade
+    l_upgrade_repo_hosts: "oo_nodes_to_config"
+    l_upgrade_no_proxy_hosts: "oo_all_hosts"
+    l_upgrade_health_check_hosts: "oo_nodes_to_config"
+    l_upgrade_verify_targets_hosts: "oo_nodes_to_config"
+    l_upgrade_docker_target_hosts: "oo_nodes_to_config"
+    l_upgrade_excluder_hosts: "oo_nodes_to_config:!oo_masters_to_config"
+    l_upgrade_nodes_only: True
+
+- name: Flag pre-upgrade checks complete for hosts without errors
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   tasks:
-  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
-    when: openshift.common.version != openshift_version
-
-- import_playbook: ../pre/verify_control_plane_running.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/verify_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- name: Verify docker upgrade targets
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../pre/tasks/verify_docker_upgrade_targets.yml
-  tags:
-  - pre_upgrade
-
-- import_playbook: ../pre/gate_checks.yml
-  tags:
-  - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+  - set_fact:
+      pre_upgrade_complete: True
 
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
-  hosts: oo_nodes_to_upgrade
-  tasks:
-  - include_tasks: ../cleanup_unused_images.yml
+# Pre-upgrade completed
 
 - import_playbook: ../upgrade_nodes.yml

+ 67 - 0
roles/container_runtime/tasks/docker_upgrade_check.yml

@@ -0,0 +1,67 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_upgrade to True if so.
+
+- set_fact:
+    docker_upgrade: True
+  when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+  command: rpm -q docker
+  args:
+    warn: no
+  register: pkg_check
+  failed_when: pkg_check.rc > 1
+  changed_when: no
+
+- name: Get current version of Docker
+  command: "{{ repoquery_installed }} --qf '%{version}' docker"
+  register: curr_docker_version
+  retries: 4
+  until: curr_docker_version | succeeded
+  changed_when: false
+
+- name: Get latest available version of Docker
+  command: >
+    {{ repoquery_cmd }} --qf '%{version}' "docker"
+  register: avail_docker_version
+  retries: 4
+  until: avail_docker_version | succeeded
+  # Don't expect docker rpm to be available on hosts that don't already have it installed:
+  when: pkg_check.rc == 0
+  failed_when: false
+  changed_when: false
+
+- fail:
+    msg: This playbook requires access to Docker 1.12 or later
+  # Disable the 1.12 requirement if the user set a specific Docker version
+  when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+    l_docker_upgrade: False
+
+# Make sure a docker_version is set if none was requested:
+- set_fact:
+    docker_version: "{{ avail_docker_version.stdout }}"
+  when: pkg_check.rc == 0 and docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+  set_fact:
+    l_docker_upgrade: True
+  when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+# Additional checks for Atomic hosts:
+- name: Determine available Docker
+  shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+  register: g_atomic_docker_version_result
+  when: openshift.common.is_atomic | bool
+
+- set_fact:
+    l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+  when: openshift.common.is_atomic | bool
+
+- fail:
+    msg: This playbook requires access to Docker 1.12 or later
+  when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')

+ 0 - 15
roles/openshift_node/tasks/docker/upgrade.yml

@@ -2,7 +2,6 @@
 # input variables:
 # - openshift_service_type
 # - openshift.common.is_containerized
-# - docker_upgrade_nuke_images
 # - docker_version
 # - skip_docker_restart
 
@@ -12,20 +11,6 @@
 
 - debug: var=docker_image_count.stdout
 
-# TODO(jchaloup): put all docker_upgrade_nuke_images into a block with only one condition
-- name: Remove all containers and images
-  script: nuke_images.sh
-  register: nuke_images_result
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- name: Check Docker image count
-  shell: "docker images -aq | wc -l"
-  register: docker_image_count
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
-- debug: var=docker_image_count.stdout
-  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
-
 - service:
     name: docker
     state: stopped