Przeglądaj źródła

First past at the upgrade process

Brenton Leanhardt 9 lat temu
rodzic
commit
50b9eefd2b
24 zmienionych plików z 614 dodań i 35 usunięć
  1. 17 0
      playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md
  2. 18 0
      playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
  3. 52 0
      playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh
  4. 22 0
      playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
  5. 24 0
      playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
  6. 0 9
      playbooks/common/openshift-cluster/upgrades/files/versions.sh
  7. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  8. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
  9. 6 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
  10. 20 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
  11. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins
  12. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library
  13. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins
  14. 57 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
  15. 220 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
  16. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles
  17. 6 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
  18. 147 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
  19. 1 1
      roles/cockpit/tasks/main.yml
  20. 1 6
      roles/openshift_cli/tasks/main.yml
  21. 10 1
      roles/openshift_cli/templates/openshift.j2
  22. 0 5
      roles/openshift_master/tasks/main.yml
  23. 0 5
      roles/openshift_master_ca/tasks/main.yml
  24. 7 6
      roles/openshift_repos/tasks/main.yaml

+ 17 - 0
playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md

@@ -0,0 +1,17 @@
+# v3.1 to v3.2 upgrade playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+**TODO: update for current steps**
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml

+ 18 - 0
playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml

@@ -0,0 +1,18 @@
+---
+- include: ../../../../common/openshift-cluster/evaluate_groups.yml
+  vars:
+    g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+    g_master_hosts: "{{ groups.masters | default([]) }}"
+    g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+    g_node_hosts: "{{ groups.nodes | default([]) }}"
+    g_lb_hosts: "{{ groups.lb | default([]) }}"
+    openshift_cluster_id: "{{ cluster_id | default('default') }}"
+    openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+  vars:
+    openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+  vars:
+    openshift_deployment_type: "{{ deployment_type }}"
+- include: ../../../openshift-master/restart.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml

+ 52 - 0
playbooks/common/openshift-cluster/upgrades/files/ensure_system_units_have_version.sh

@@ -0,0 +1,52 @@
+#!/bin/bash
+set -e
+
+SERVICE_TYPE=$1
+DEPLOYMENT_TYPE=$2
+VERSION="v${3}"
+
+add_image_version_to_sysconfig () {
+    unit_name=$2
+    sysconfig_file=/etc/sysconfig/${unit_name}
+
+    if ! grep IMAGE_VERSION ${sysconfig_file}; then
+        sed -i "/CONFIG_FILE/a IMAGE_VERSION=${1}" ${sysconfig_file}
+    else
+        sed -i "s/\(IMAGE_VERSION=\).*/\1${1}/" ${sysconfig_file}
+    fi
+}
+
+add_image_version_to_unit () {
+    deployment_type=$1
+    unit_file=$2
+
+    if ! grep IMAGE_VERSION $unit_file; then
+        image_namespace="openshift/"
+        if [ $deployment_type == "atomic-enterprise" ]; then
+            image_namespace="aep3/"
+        elif [ $deployment_type == "openshift-enterprise" ]; then
+            image_namespace="openshift3/"
+        fi
+
+        sed -i "s|\(${image_namespace}[a-zA-Z0-9]\+\)|\1:\${IMAGE_VERSION}|" $unit_file
+    fi
+}
+
+for unit_file in $(ls /etc/systemd/system/${SERVICE_TYPE}*.service | head -n1); do
+    unit_name=$(basename -s .service ${unit_file})
+    add_image_version_to_sysconfig $VERSION $unit_name
+    add_image_version_to_unit $DEPLOYMENT_TYPE $unit_file
+done
+
+if [ -e /etc/sysconfig/openvswitch ]; then
+    add_image_version_to_sysconfig $VERSION openvswitch
+else
+    # TODO: add this to config.yml
+    echo IMAGE_VERSION=${VERSION} > /etc/sysconfig/openvswitch
+fi 
+if ! grep EnvironmentFile /etc/systemd/system/openvswitch.service > /dev/null; then
+    sed -i "/Service/a EnvironmentFile=/etc/sysconfig/openvswitch" /etc/systemd/system/openvswitch.service
+fi
+add_image_version_to_unit $DEPLOYMENT_TYPE /etc/systemd/system/openvswitch.service
+
+systemctl daemon-reload

+ 22 - 0
playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh

@@ -0,0 +1,22 @@
+#!/bin/bash
+
+# Here we don't really care if this is a master, api, controller or node image.
+# We just need to know the version of one of them.
+unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1)
+installed_container_name=$(basename -s .service ${unit_file})
+installed=$(docker exec ${installed_container_name} openshift version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+if [ ${1} == "origin" ]; then
+    image_name="openshift/origin"
+elif grep aep $unit_file > /dev/null; then
+    image_name="aep3/aep"
+elif grep ose $unit_file > /dev/null; then
+    image_name="openshift3/ose"
+fi
+
+docker pull ${image_name} 1>&2
+available=$(docker run --rm ${image_name} version | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"

+ 24 - 0
playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh

@@ -0,0 +1,24 @@
+#!/bin/bash
+
+while getopts ":c" opt; do
+  case $opt in
+    c)
+      echo "-c was triggered!" >&2
+      containerized="TRUE"
+      ;;
+    \?)
+      echo "Invalid option: -$OPTARG" >&2
+      ;;
+  esac
+done
+
+if [ "${containerized}" == "TRUE" ] ; then
+  docker exec atomic-openshift-master rpm -q atomic-openshift 
+else
+  installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+  available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
+fi 
+
+echo "---"
+echo "curr_version: ${installed}"
+echo "avail_version: ${available}"

+ 0 - 9
playbooks/common/openshift-cluster/upgrades/files/versions.sh

@@ -1,9 +0,0 @@
-#!/bin/bash
-
-yum_installed=$(yum list installed -e 0 -q "$@" 2>&1 | tail -n +2 | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-yum_available=$(yum list available -e 0 -q "$@" 2>&1 | tail -n +2 | grep -v 'el7ose' | awk '{ print $2 }' | sort -r | tr '\n' ' ')
-
-echo "---"
-echo "curr_version: ${yum_installed}"
-echo "avail_version: ${yum_available}"

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -66,7 +66,7 @@
       g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
 
   - name: Determine available versions
-    script: ../files/versions.sh {{ g_new_service_name }} openshift
+    script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
     register: g_versions_result
 
   - set_fact:

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml

@@ -41,7 +41,7 @@
       g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
 
   - name: Determine available versions
-    script: ../files/versions.sh {{ g_new_service_name }}
+    script: ../files/rpm_versions.sh {{ g_new_service_name }}
     register: g_versions_result
 
   - set_fact:

+ 6 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml

@@ -0,0 +1,6 @@
+- name: Update system_units
+  script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_new_version }}
+
+- name: Ensure python-yaml present for config upgrade
+  action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+  when: not openshift.common.is_atomic | bool

+ 20 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml

@@ -0,0 +1,20 @@
+---
+- name: Upgrade Docker
+  hosts: oo_masters_to_config
+  vars:
+    openshift_version: "{{ openshift_pkg_version | default('') }}"
+  tasks:
+  - name: Check if Docker is installed
+    command: rpm -q docker
+    register: pkg_check
+    failed_when: pkg_check.rc > 1
+    changed_when: no
+
+  - name: Upgrade Docker
+    command: "{{ ansible_pkg_mgr}} update -y docker"
+    when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
+    register: docker_upgrade
+
+  - name: Restart Docker
+    service: name=docker state=restarted
+    when: docker_upgrade | changed

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/filter_plugins

@@ -0,0 +1 @@
+../../../../../filter_plugins

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/library

@@ -0,0 +1 @@
+../library

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/lookup_plugins

@@ -0,0 +1 @@
+../../../../../lookup_plugins

+ 57 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml

@@ -0,0 +1,57 @@
+---
+###############################################################################
+# Post upgrade - Upgrade default router, default registry and examples
+###############################################################################
+- name: Upgrade default router and default registry
+  hosts: oo_first_master
+  vars:
+    openshift_deployment_type: "{{ deployment_type }}"
+    registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + g_new_version  ) }}"
+    router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+    oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+  roles:
+  # Create the new templates shipped in 3.2, existing templates are left
+  # unmodified. This prevents the subsequent role definition for
+  # openshift_examples from failing when trying to replace templates that do
+  # not already exist. We could have potentially done a replace --force to
+  # create and update in one step.
+  - openshift_examples
+  # Update the existing templates
+  - role: openshift_examples
+    openshift_examples_import_command: replace
+  pre_tasks:
+  - name: Collect all routers
+    command: >
+      {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
+    register: all_routers
+    failed_when: false
+    changed_when: false
+
+  - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
+    when: all_routers.rc == 0
+
+  - set_fact: haproxy_routers=[]
+    when: all_routers.rc != 0
+
+  - name: Update router image to current version
+    when: all_routers.rc == 0
+    command: >
+      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
+      '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
+      --api-version=v1
+    with_items: haproxy_routers
+
+  - name: Check for default registry
+    command: >
+      {{ oc_cmd }} get -n default dc/docker-registry
+    register: _default_registry
+    failed_when: false
+    changed_when: false
+
+  - name: Update registry image to current version
+    when: _default_registry.rc == 0
+    command: >
+      {{ oc_cmd }} patch dc/docker-registry -p
+      '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+      --api-version=v1
+

+ 220 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml

@@ -0,0 +1,220 @@
+---
+###############################################################################
+# Evaluate host groups and gather facts
+###############################################################################
+- name: Load openshift_facts
+  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+  roles:
+  - openshift_facts
+
+- name: Load openshift_facts
+  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
+  roles:
+  - openshift_facts
+
+- name: Evaluate additional groups for upgrade
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - name: Evaluate etcd_hosts_to_backup
+    add_host:
+      name: "{{ item }}"
+      groups: etcd_hosts_to_backup
+    with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
+
+###############################################################################
+# Pre-upgrade checks
+###############################################################################
+- name: Verify upgrade can proceed
+  hosts: oo_first_master
+  vars:
+    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+    target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+  gather_facts: no
+  tasks:
+  - fail:
+      msg: >
+        This upgrade is only supported for origin, openshift-enterprise, and online
+        deployment types
+    when: deployment_type not in ['origin','openshift-enterprise', 'online']
+
+  - fail:
+      msg: >
+        openshift_pkg_version is {{ openshift_pkg_version }} which is not a
+        valid version for a {{ target_version }} upgrade
+    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
+
+- name: Verify upgrade can proceed
+  hosts: oo_masters_to_config:oo_nodes_to_config
+  vars:
+    target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+  roles:
+  - openshift_cli
+  tasks:
+  - name: Clean package cache
+    command: "{{ ansible_pkg_mgr }} clean all"
+
+  - set_fact:
+      g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
+    when: not openshift.common.is_containerized | bool
+
+  - name: Determine available versions
+    script: ../files/rpm_versions.sh {{ g_new_service_name }}
+    register: g_versions_result
+    when: not openshift.common.is_containerized | bool
+
+  - name: Determine available versions
+    script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
+    register: g_versions_result
+    when: openshift.common.is_containerized | bool
+
+  - set_fact:
+      g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
+
+  - set_fact:
+      g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
+    when: openshift_pkg_version is not defined
+
+  - set_fact:
+      g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
+    when: openshift_pkg_version is defined
+
+  - name: Update systemd units
+    script: ../files/ensure_system_units_have_version.sh {{ openshift.common.service_type }} {{ openshift.common.deployment_type }} {{ g_aos_versions.curr_version }}
+    when: openshift.common.is_containerized | bool
+
+  # TODO: Remove this, used for testing
+  #- pause:
+
+  - fail:
+      msg: This playbook requires Origin 1.1 or later
+    when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+
+  - fail:
+      msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
+    when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+
+  # TODO: this may only make sense for RPM installs.  We probably need another check for containerized installs.
+  - fail:
+      msg: Upgrade packages not found
+    when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+
+  - name: Determine available Docker
+    script: ../files/rpm_versions.sh docker
+    register: g_docker_version_result
+    when: not openshift.common.is_atomic | bool
+
+  - set_fact:
+      g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
+
+  - fail:
+      msg: This playbook requires access to Docker 1.9 or later
+    when: not openshift.common.is_atomic | bool
+          and (g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<'))
+
+  # TODO: add check to upgrade ostree to get latest Docker
+
+  - set_fact:
+      pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+  hosts: localhost
+  connection: local
+  become: no
+  vars:
+    pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+  tasks:
+  - set_fact:
+      pre_upgrade_completed: "{{ hostvars
+                                 | oo_select_keys(pre_upgrade_hosts)
+                                 | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+  - set_fact:
+      pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+    when: pre_upgrade_failed | length > 0
+
+###############################################################################
+# Backup etcd
+###############################################################################
+- name: Backup etcd
+  hosts: etcd_hosts_to_backup
+  vars:
+    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
+    timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+  roles:
+  - openshift_facts
+  tasks:
+  # Ensure we persist the etcd role for this host in openshift_facts
+  - openshift_facts:
+      role: etcd
+      local_facts: {}
+    when: "'etcd' not in openshift"
+
+  - stat: path=/var/lib/openshift
+    register: var_lib_openshift
+
+  - stat: path=/var/lib/origin
+    register: var_lib_origin
+
+  - name: Create origin symlink if necessary
+    file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
+    when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
+
+  # TODO: replace shell module with command and update later checks
+  # We assume to be using the data dir for all backups.
+  - name: Check available disk space for etcd backup
+    shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
+    register: avail_disk
+
+  # TODO: replace shell module with command and update later checks
+  - name: Check current embedded etcd disk usage
+    shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
+    register: etcd_disk_usage
+    when: embedded_etcd | bool
+
+  - name: Abort if insufficient disk space for etcd backup
+    fail:
+      msg: >
+        {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
+        {{ avail_disk.stdout }} Kb available.
+    when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
+
+  - name: Install etcd (for etcdctl)
+    action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
+
+  - name: Generate etcd backup
+    command: >
+      etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+      --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+
+  - set_fact:
+      etcd_backup_complete: True
+
+  - name: Display location of etcd backup
+    debug:
+      msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+
+
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - set_fact:
+      etcd_backup_completed: "{{ hostvars
+                                 | oo_select_keys(groups.etcd_hosts_to_backup)
+                                 | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+  - set_fact:
+      etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+    when: etcd_backup_failed | length > 0

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/roles

@@ -0,0 +1 @@
+../../../../../roles

+ 6 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml

@@ -0,0 +1,6 @@
+- name: Upgrade packages
+  command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}{{ openshift_version }}"
+
+- name: Ensure python-yaml present for config upgrade
+  action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
+  when: not openshift.common.is_atomic | bool

+ 147 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml

@@ -0,0 +1,147 @@
+---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+- include: docker_upgrade.yml
+  when: not openshift.common.is_atomic | bool
+
+###############################################################################
+# Upgrade Masters
+###############################################################################
+- name: Upgrade master container
+  hosts: oo_masters_to_config
+  roles:
+  - openshift_cli
+  tasks:
+  - include: rpm_upgrade.yml component=master
+    when: not openshift.common.is_containerized | bool
+
+  - include: containerized_upgrade.yml
+    when: openshift.common.is_containerized | bool
+
+#  - name: Upgrade master configuration
+#    openshift_upgrade_config:
+#      from_version: '3.1'
+#       to_version: '3.2'
+#      role: master
+#      config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
+
+- name: Set master update status to complete
+  hosts: oo_masters_to_config
+  tasks:
+  - set_fact:
+      master_update_complete: True
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - set_fact:
+      master_update_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_masters_to_config)
+                                 | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+  - set_fact:
+      master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+    when: master_update_failed | length > 0
+
+###############################################################################
+# Upgrade Nodes
+###############################################################################
+- name: Upgrade nodes
+  hosts: oo_nodes_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  - include: rpm_upgrade.yml
+    vars:
+       component: "node"
+       openshift_version: "{{ openshift_pkg_version | default('') }}"
+    when: not openshift.common.is_containerized | bool
+
+  - include: containerized_upgrade.yml
+    when: openshift.common.is_containerized | bool
+
+  - name: Restart node service
+    service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+  - set_fact:
+      node_update_complete: True
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - set_fact:
+      node_update_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_nodes_to_config)
+                                 | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+  - set_fact:
+      node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+    when: node_update_failed | length > 0
+
+###############################################################################
+# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
+###############################################################################
+- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
+  hosts: oo_masters_to_config
+  vars:
+    origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+    ent_reconcile_bindings: true
+    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+  tasks:
+  - name: Reconcile Cluster Roles
+    command: >
+      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      policy reconcile-cluster-roles --confirm
+    run_once: true
+
+  - name: Reconcile Cluster Role Bindings
+    command: >
+      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      policy reconcile-cluster-role-bindings
+      --exclude-groups=system:authenticated
+      --exclude-groups=system:authenticated:oauth
+      --exclude-groups=system:unauthenticated
+      --exclude-users=system:anonymous
+      --additive-only=true --confirm
+    when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
+    run_once: true
+
+  - name: Reconcile Security Context Constraints
+    command: >
+      {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm
+    run_once: true
+
+  - set_fact:
+      reconcile_complete: True
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+  hosts: localhost
+  connection: local
+  become: no
+  tasks:
+  - set_fact:
+      reconcile_completed: "{{ hostvars
+                                 | oo_select_keys(groups.oo_masters_to_config)
+                                 | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+  - set_fact:
+      reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+  - fail:
+      msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+    when: reconcile_failed | length > 0

+ 1 - 1
roles/cockpit/tasks/main.yml

@@ -6,7 +6,7 @@
     - cockpit-shell
     - cockpit-bridge
     - "{{ cockpit_plugins }}"
-  when: not openshift.common.is_atomic | bool
+  when: not openshift.common.is_containerized | bool
 
 - name: Enable cockpit-ws
   service:

+ 1 - 6
roles/openshift_cli/tasks/main.yml

@@ -9,11 +9,6 @@
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-clients state=present"
   when: not openshift.common.is_containerized | bool
   
-- name: Pull CLI Image
-  command: >
-    docker pull {{ openshift.common.cli_image }}
-  when: openshift.common.is_containerized | bool
-
 - name: Create /usr/local/bin/openshift cli wrapper
   template:
     src: openshift.j2
@@ -30,4 +25,4 @@
     - /usr/local/bin/oadm
     - /usr/local/bin/oc
     - /usr/local/bin/kubectl
-  when: openshift.common.is_containerized | bool
+  when: openshift.common.is_containerized | bool

+ 10 - 1
roles/openshift_cli/templates/openshift.j2

@@ -20,4 +20,13 @@ See https://docs.openshift.org/latest/cli_reference/get_started_cli.html
 =================================================================================
 """
 
-docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }} "${@}"
+image=""
+if ! docker inspect -f {% raw %}'{{ .State.Status }}'{% endraw %} {{ openshift.common.service_type }}-master | grep running > /dev/null; then
+>&2 echo """
+
+Warning: {{ openshift.common.service_type }}-master service is not running.  Using the latest image.
+"""
+    image=:`docker inspect -f {% raw %}'{{ .Image }}'{% endraw %} {{ openshift.common.service_type }}-master`
+fi
+
+docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }}${image} "${@}"

+ 0 - 5
roles/openshift_master/tasks/main.yml

@@ -92,11 +92,6 @@
   action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version  }} state=present"
   when: not openshift.common.is_containerized | bool
 
-- name: Pull master image
-  command: >
-    docker pull {{ openshift.master.master_image }}
-  when: openshift.common.is_containerized | bool
-
 - name: Install Master docker service file
   template:
     dest: "/etc/systemd/system/{{ openshift.common.service_type }}-master.service"

+ 0 - 5
roles/openshift_master_ca/tasks/main.yml

@@ -13,11 +13,6 @@
     path: "{{ openshift_master_config_dir }}"
     state: directory
 
-- name: Pull master docker image
-  command: >
-    docker pull {{ openshift.common.cli_image }}
-  when: openshift.common.is_containerized | bool
-
 - name: Create the master certificates if they do not already exist
   command: >
     {{ openshift.common.admin_binary }} create-master-certs

+ 7 - 6
roles/openshift_repos/tasks/main.yaml

@@ -9,6 +9,7 @@
 
 - assert:
     that: openshift_deployment_type in known_openshift_deployment_types
+  when: not openshift.common.is_containerized | bool
 
 - name: Ensure libselinux-python is installed
   action: "{{ ansible_pkg_mgr }} name=libselinux-python state=present"
@@ -34,9 +35,9 @@
     state: absent
   with_fileglob:
   - '*/repos/*'
-  when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and
-        (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
-        and not openshift.common.is_containerized | bool
+  when: not openshift.common.is_containerized | bool 
+        and not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
+        and (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
   notify: refresh cache
 
 - name: Remove any yum repo files for other deployment types Fedora
@@ -45,9 +46,9 @@
     state: absent
   with_fileglob:
   - '*/repos/*'
-  when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and
-        (ansible_distribution == "Fedora")
-        and not openshift.common.is_containerized | bool
+  when: not openshift.common.is_containerized | bool
+        and not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos"))
+        and (ansible_distribution == "Fedora")
   notify: refresh cache
 
 - name: Configure gpg keys if needed