Переглянути джерело

Merge pull request #1945 from dgoodwin/upgrade33

openshift_release / version / upgrade improvements
Scott Dodson 8 роки тому
батько
коміт
65ffae3e6e
83 змінених файлів з 718 додано та 1657 видалено
  1. 4 5
      filter_plugins/oo_filters.py
  2. 24 0
      inventory/byo/hosts.aep.example
  3. 24 0
      inventory/byo/hosts.origin.example
  4. 24 0
      inventory/byo/hosts.ose.example
  5. 0 16
      playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  6. 12 71
      playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
  7. 0 21
      playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md
  8. 0 28
      playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
  9. 0 16
      playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md
  10. 0 28
      playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  11. 0 17
      playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md
  12. 0 32
      playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
  13. 0 0
      playbooks/byo/openshift-cluster/upgrades/v3_2/README.md
  14. 0 4
      playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
  15. 2 0
      playbooks/common/openshift-cluster/config.yml
  16. 2 0
      playbooks/common/openshift-cluster/initialize_facts.yml
  17. 16 0
      playbooks/common/openshift-cluster/initialize_openshift_version.yml
  18. 44 0
      playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
  19. 51 0
      playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
  20. 0 0
      playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh
  21. 0 22
      playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh
  22. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins
  23. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_minor/library
  24. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins
  25. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles
  26. 0 114
      playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
  27. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins
  28. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library
  29. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins
  30. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles
  31. 0 646
      playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
  32. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins
  33. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/library
  34. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins
  35. 0 58
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml
  36. 0 88
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml
  37. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles
  38. 0 140
      playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
  39. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml
  40. 0 14
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml
  41. 0 24
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml
  42. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service
  43. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service
  44. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service
  45. 1 0
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2
  46. 2 2
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml
  47. 65 73
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
  48. 2 1
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml
  49. 44 69
      playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
  50. 2 4
      playbooks/common/openshift-master/config.yml
  51. 1 1
      roles/docker/defaults/main.yml
  52. 39 1
      roles/docker/tasks/main.yml
  53. 53 0
      roles/openshift_ca/tasks/main.yml
  54. 0 1
      roles/openshift_cli/defaults/main.yml
  55. 4 1
      roles/openshift_cli/tasks/main.yml
  56. 1 1
      roles/openshift_cli/templates/openshift.j2
  57. 0 1
      roles/openshift_common/defaults/main.yml
  58. 1 0
      roles/openshift_common/meta/main.yml
  59. 2 5
      roles/openshift_common/tasks/main.yml
  60. 0 1
      roles/openshift_docker/defaults/main.yml
  61. 1 1
      roles/openshift_docker/meta/main.yml
  62. 0 40
      roles/openshift_docker/tasks/main.yml
  63. 0 1
      roles/openshift_docker_facts/defaults/main.yml
  64. 2 20
      roles/openshift_docker_facts/tasks/main.yml
  65. 54 30
      roles/openshift_facts/library/openshift_facts.py
  66. 5 0
      roles/openshift_facts/tasks/main.yml
  67. 1 1
      roles/openshift_master/defaults/main.yml
  68. 2 2
      roles/openshift_master/tasks/main.yml
  69. 1 1
      roles/openshift_master/templates/atomic-openshift-master.j2
  70. 1 1
      roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
  71. 1 1
      roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
  72. 2 1
      roles/openshift_master_ca/tasks/main.yml
  73. 0 1
      roles/openshift_master_ca/vars/main.yml
  74. 14 1
      roles/openshift_node/defaults/main.yml
  75. 4 4
      roles/openshift_node/tasks/main.yml
  76. 1 1
      roles/openshift_node/tasks/systemd_units.yml
  77. 1 1
      roles/openshift_node/templates/openvswitch.sysconfig.j2
  78. 2 0
      roles/openshift_version/defaults/main.yml
  79. 18 0
      roles/openshift_version/meta/main.yml
  80. 76 0
      roles/openshift_version/tasks/main.yml
  81. 39 0
      roles/openshift_version/tasks/set_version_containerized.yml
  82. 18 0
      roles/openshift_version/tasks/set_version_rpm.yml
  83. 51 32
      utils/src/ooinstall/cli_installer.py

+ 4 - 5
filter_plugins/oo_filters.py

@@ -803,14 +803,13 @@ class FilterModule(object):
         """
         if not isinstance(version, basestring):
             raise errors.AnsibleFilterError("|failed expects a string or unicode")
-        # TODO: Do we need to make this actually convert v1.2.0-rc1 into 1.2.0-0.rc1
-        # We'd need to be really strict about how we build the RPM Version+Release
         if version.startswith("v"):
-            version = version.replace("v", "")
+            version = version[1:]
+            # Strip release from requested version, we no longer support this.
             version = version.split('-')[0]
 
-            if include_dash:
-                version = "-" + version
+        if include_dash and version and not version.startswith("-"):
+            version = "-" + version
 
         return version
 

+ 24 - 0
inventory/byo/hosts.aep.example

@@ -24,6 +24,23 @@ debug_level=2
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 deployment_type=atomic-enterprise
 
+# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
+# rely on the version running on the first master. Works best for containerized installs where we can usually
+# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
+# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
+# release.
+openshift_release=v3.2
+
+# Specify an exact container image tag to install or configure.
+# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_image_tag=v3.2.0.46
+
+# Specify an exact rpm version to install or configure.
+# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_pkg_version=-3.2.0.46
+
 # Install the openshift examples
 #openshift_install_examples=true
 
@@ -75,6 +92,13 @@ deployment_type=atomic-enterprise
 # Default value: "--log-driver=json-file --log-opt max-size=50m"
 #openshift_docker_options="-l warn --ipv6=false"
 
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.10.3"
+
+# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
+# docker_upgrade=False
+
 # Alternate image format string. If you're not modifying the format string and
 # only need to inject your own registry you may want to consider
 # openshift_docker_additional_registries instead

+ 24 - 0
inventory/byo/hosts.origin.example

@@ -25,6 +25,23 @@ debug_level=2
 # deployment type valid values are origin, online, atomic-enterprise and openshift-enterprise
 deployment_type=origin
 
+# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
+# rely on the version running on the first master. Works best for containerized installs where we can usually
+# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
+# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
+# release.
+openshift_release=v1.2
+
+# Specify an exact container image tag to install or configure.
+# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_image_tag=v1.2.0
+
+# Specify an exact rpm version to install or configure.
+# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_pkg_version=-1.2.0
+
 # Install the openshift examples
 #openshift_install_examples=true
 
@@ -76,6 +93,13 @@ deployment_type=origin
 # Default value: "--log-driver=json-file --log-opt max-size=50m"
 #openshift_docker_options="-l warn --ipv6=false"
 
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.10.3"
+
+# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
+# docker_upgrade=False
+
 # Alternate image format string. If you're not modifying the format string and
 # only need to inject your own registry you may want to consider
 # openshift_docker_additional_registries instead

+ 24 - 0
inventory/byo/hosts.ose.example

@@ -24,6 +24,23 @@ debug_level=2
 # deployment type valid values are origin, online, atomic-enterprise, and openshift-enterprise
 deployment_type=openshift-enterprise
 
+# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
+# rely on the version running on the first master. Works best for containerized installs where we can usually
+# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
+# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
+# release.
+openshift_release=v3.2
+
+# Specify an exact container image tag to install or configure.
+# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_image_tag=v3.2.0.46
+
+# Specify an exact rpm version to install or configure.
+# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
+# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
+#openshift_pkg_version=-3.2.0.46
+
 # Install the openshift examples
 #openshift_install_examples=true
 
@@ -75,6 +92,13 @@ deployment_type=openshift-enterprise
 # Default value: "--log-driver=json-file --log-opt max-size=50m"
 #openshift_docker_options="-l warn --ipv6=false"
 
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.10.3"
+
+# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
+# docker_upgrade=False
+
 # Alternate image format string. If you're not modifying the format string and
 # only need to inject your own registry you may want to consider
 # openshift_docker_additional_registries instead

+ 0 - 16
playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -1,16 +0,0 @@
----
-# Usage:
-#  ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=<deployment_type> -e cluster_id=<cluster_id>
-- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
-  vars_files:
-  - "{{lookup('file', '../../../../aws/openshift-cluster/vars.yml')}}"
-  - "{{lookup('file', '../../../../aws/openshift-cluster/cluster_hosts.yml')}}"
-  vars:
-    g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
-    g_sudo: "{{ deployment_vars[deployment_type].become }}"
-    g_nodeonmaster: true
-    openshift_cluster_id: "{{ cluster_id }}"
-    openshift_debug_level: "{{ debug_level }}"
-    openshift_deployment_type: "{{ deployment_type }}"
-    openshift_hostname: "{{ ec2_private_ip_address }}"
-    openshift_public_hostname: "{{ ec2_ip_address }}"

+ 12 - 71
playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -1,106 +1,47 @@
 
-- name: Check for appropriate Docker versions for 1.9.x to 1.10.x upgrade
+- name: Check for appropriate Docker versions
   hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
   roles:
   - openshift_facts
   tasks:
+  - set_fact:
+      repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+
   - fail:
       msg: Cannot upgrade Docker on Atomic operating systems.
     when: openshift.common.is_atomic | bool
 
-  - name: Determine available Docker version
-    script: ../../../../common/openshift-cluster/upgrades/files/rpm_versions.sh docker
-    register: g_docker_version_result
-
-  - name: Check if Docker is installed
-    command: rpm -q docker
-    register: pkg_check
-    failed_when: pkg_check.rc > 1
-    changed_when: no
-
-  - set_fact:
-      g_docker_version: "{{ g_docker_version_result.stdout | from_yaml }}"
-
-  - name: Set fact if docker requires an upgrade
-    set_fact:
-      docker_upgrade: true
-    when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.10','<')
+  - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade_check.yml
+    when: docker_upgrade is not defined or docker_upgrade | bool
 
-  - fail:
-      msg: This playbook requires access to Docker 1.10 or later
-    when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
 
 # If a node fails, halt everything, the admin will need to clean up and we
 # don't want to carry on, potentially taking out every node. The playbook can safely be re-run
-# and will not take any action on a node already running 1.10+.
+# and will not take any action on a node already running the requested docker version.
 - name: Evacuate and upgrade nodes
   hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
   serial: 1
   any_errors_fatal: true
   tasks:
-  - debug: var=docker_upgrade
-
   - name: Prepare for Node evacuation
     command: >
       {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
     delegate_to: "{{ groups.oo_first_master.0 }}"
-    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
+    when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
 
-# TODO: skip all node evac stuff for non-nodes (i.e. separate containerized etcd hosts)
   - name: Evacuate Node for Kubelet upgrade
     command: >
       {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
     delegate_to: "{{ groups.oo_first_master.0 }}"
-    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
-
-  - name: Stop containerized services
-    service: name={{ item }} state=stopped
-    with_items:
-      - "{{ openshift.common.service_type }}-master"
-      - "{{ openshift.common.service_type }}-master-api"
-      - "{{ openshift.common.service_type }}-master-controllers"
-      - "{{ openshift.common.service_type }}-node"
-      - etcd_container
-      - openvswitch
-    failed_when: false
-    when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
-
-  - name: Remove all containers and images
-    script: files/nuke_images.sh docker
-    register: nuke_images_result
-    when: docker_upgrade is defined and docker_upgrade | bool
-
-  - name: Upgrade Docker
-    command: "{{ ansible_pkg_mgr}} update -y docker"
-    register: docker_upgrade_result
-    when: docker_upgrade is defined and docker_upgrade | bool
-
-  - name: Restart containerized services
-    service: name={{ item }} state=started
-    with_items:
-      - etcd_container
-      - openvswitch
-      - "{{ openshift.common.service_type }}-master"
-      - "{{ openshift.common.service_type }}-master-api"
-      - "{{ openshift.common.service_type }}-master-controllers"
-      - "{{ openshift.common.service_type }}-node"
-    failed_when: false
-    when: docker_upgrade is defined and docker_upgrade | bool and openshift.common.is_containerized | bool
+    when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config
 
-  - name: Wait for master API to come back online
-    become: no
-    local_action:
-      module: wait_for
-        host="{{ inventory_hostname }}"
-        state=started
-        delay=10
-        port="{{ openshift.master.api_port }}"
-    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_masters_to_config
+  - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml
+    when: l_docker_upgrade is defined and l_docker_upgrade | bool
 
   - name: Set node schedulability
     command: >
       {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: openshift.node.schedulable | bool
-    when: docker_upgrade is defined and docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
+    when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
 

+ 0 - 21
playbooks/byo/openshift-cluster/upgrades/v3_0_minor/README.md

@@ -1,21 +0,0 @@
-# v3.0 minor upgrade playbook
-**Note:** This playbook will re-run installation steps overwriting any local
-modifications. You should ensure that your inventory has been updated with any
-modifications you've made after your initial installation. If you find any items
-that cannot be configured via ansible please open an issue at
-https://github.com/openshift/openshift-ansible
-
-## Overview
-This playbook is available as a technical preview. It currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Upgrade and restart node services
- * Applies latest configuration by re-running the installation playbook
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml

+ 0 - 28
playbooks/byo/openshift-cluster/upgrades/v3_0_minor/upgrade.yml

@@ -1,28 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  become: no
-  gather_facts: no
-  tasks:
-  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
-    fail:
-      msg: "Unsupported ansible version: {{ ansible_version }} found."
-    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
-  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-  - add_host:
-      name: "{{ item }}"
-      groups: l_oo_all_hosts
-    with_items: g_all_hosts
-
-- hosts: l_oo_all_hosts
-  gather_facts: no
-  tasks:
-  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml
-  vars:
-    # Do not allow adding hosts during upgrade.
-    g_new_master_hosts: []
-    g_new_node_hosts: []
-    openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_deployment_type: "{{ deployment_type }}"

+ 0 - 16
playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/README.md

@@ -1,16 +0,0 @@
-# v3.0 to v3.1 upgrade playbook
-
-## Overview
-This playbook currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

+ 0 - 28
playbooks/byo/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -1,28 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  become: no
-  gather_facts: no
-  tasks:
-  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
-    fail:
-      msg: "Unsupported ansible version: {{ ansible_version }} found."
-    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
-  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-  - add_host:
-      name: "{{ item }}"
-      groups: l_oo_all_hosts
-    with_items: g_all_hosts
-
-- hosts: l_oo_all_hosts
-  gather_facts: no
-  tasks:
-  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
-  vars:
-    # Do not allow adding hosts during upgrade.
-    g_new_master_hosts: []
-    g_new_node_hosts: []
-    openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_deployment_type: "{{ deployment_type }}"

+ 0 - 17
playbooks/byo/openshift-cluster/upgrades/v3_1_minor/README.md

@@ -1,17 +0,0 @@
-# v3.1 minor upgrade playbook
-This upgrade will preserve all locally made configuration modifications to the
-Masters and Nodes.
-
-## Overview
-This playbook is available as a technical preview. It currently performs the
-following steps.
-
- * Upgrade and restart master services
- * Upgrade and restart node services
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml

+ 0 - 32
playbooks/byo/openshift-cluster/upgrades/v3_1_minor/upgrade.yml

@@ -1,32 +0,0 @@
----
-- hosts: localhost
-  connection: local
-  become: no
-  gather_facts: no
-  tasks:
-  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
-    fail:
-      msg: "Unsupported ansible version: {{ ansible_version }} found."
-    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
-  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-  - add_host:
-      name: "{{ item }}"
-      groups: l_oo_all_hosts
-    with_items: g_all_hosts
-
-- hosts: l_oo_all_hosts
-  gather_facts: no
-  tasks:
-  - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
-  vars:
-    # Do not allow adding hosts during upgrade.
-    g_new_master_hosts: []
-    g_new_node_hosts: []
-    openshift_cluster_id: "{{ cluster_id | default('default') }}"
-    openshift_deployment_type: "{{ deployment_type }}"
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/pre.yml
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml
-- include: ../../../openshift-master/restart.yml
-- include: ../../../../common/openshift-cluster/upgrades/v3_1_minor/post.yml

playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/README.md → playbooks/byo/openshift-cluster/upgrades/v3_2/README.md


+ 0 - 4
playbooks/byo/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml

@@ -4,10 +4,6 @@
   become: no
   gather_facts: no
   tasks:
-  - name: Verify Ansible version is greater than or equal to 1.9.4 and less than 2.0
-    fail:
-      msg: "Unsupported ansible version: {{ ansible_version }} found."
-    when: ansible_version.full | version_compare('1.9.4', 'lt') or ansible_version.full | version_compare('2.0', 'ge')
   - include_vars: ../../../../byo/openshift-cluster/cluster_hosts.yml
   - add_host:
       name: "{{ item }}"

+ 2 - 0
playbooks/common/openshift-cluster/config.yml

@@ -5,6 +5,8 @@
 
 - include: validate_hostnames.yml
 
+- include: initialize_openshift_version.yml
+
 - name: Set oo_options
   hosts: oo_all_hosts
   tasks:

+ 2 - 0
playbooks/common/openshift-cluster/initialize_facts.yml

@@ -9,3 +9,5 @@
       role: common
       local_facts:
         hostname: "{{ openshift_hostname | default(None) }}"
+  - set_fact:
+      openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"

+ 16 - 0
playbooks/common/openshift-cluster/initialize_openshift_version.yml

@@ -0,0 +1,16 @@
+---
+# NOTE: requires openshift_facts be run
+- name: Determine openshift_version to configure on first master
+  hosts: oo_first_master
+  roles:
+  - openshift_version
+
+# NOTE: We set this even on etcd hosts as they may also later run as masters,
+# and we don't want to install wrong version of docker and have to downgrade
+# later.
+- name: Set openshift_version for all hosts
+  hosts: oo_all_hosts:!oo_first_master
+  vars:
+    openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
+  roles:
+  - openshift_version

+ 44 - 0
playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml

@@ -0,0 +1,44 @@
+---
+# We need docker service up to remove all the images, but these services will keep
+# trying to re-start and thus re-pull the images we're trying to delete.
+- name: Stop containerized services
+  service: name={{ item }} state=stopped
+  with_items:
+    - "{{ openshift.common.service_type }}-master"
+    - "{{ openshift.common.service_type }}-master-api"
+    - "{{ openshift.common.service_type }}-master-controllers"
+    - "{{ openshift.common.service_type }}-node"
+    - etcd_container
+    - openvswitch
+  failed_when: false
+  when: openshift.common.is_containerized | bool
+
+- name: Remove all containers and images
+  script: nuke_images.sh docker
+  register: nuke_images_result
+  when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
+
+- name: Upgrade Docker
+  action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version }} state=present"
+
+- name: Restart containerized services
+  service: name={{ item }} state=started
+  with_items:
+    - etcd_container
+    - openvswitch
+    - "{{ openshift.common.service_type }}-master"
+    - "{{ openshift.common.service_type }}-master-api"
+    - "{{ openshift.common.service_type }}-master-controllers"
+    - "{{ openshift.common.service_type }}-node"
+  failed_when: false
+  when: openshift.common.is_containerized | bool
+
+- name: Wait for master API to come back online
+  become: no
+  local_action:
+    module: wait_for
+      host="{{ inventory_hostname }}"
+      state=started
+      delay=10
+      port="{{ openshift.master.api_port }}"
+  when: inventory_hostname in groups.oo_masters_to_config

+ 51 - 0
playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml

@@ -0,0 +1,51 @@
+---
+
+# This snippet determines if a Docker upgrade is required by checking the inventory
+# variables, the available packages, and sets l_docker_version to True if so.
+
+- set_fact:
+    docker_upgrade: True
+  when: docker_upgrade is not defined
+
+- name: Check if Docker is installed
+  command: rpm -q docker
+  register: pkg_check
+  failed_when: pkg_check.rc > 1
+  changed_when: no
+
+- name: Get current version of Docker
+  command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+  register: curr_docker_version
+  changed_when: false
+
+- name: Get latest available version of Docker
+  command: >
+    {{ repoquery_cmd }} --qf '%{version}' "docker"
+  register: avail_docker_version
+  failed_when: false
+  changed_when: false
+
+- fail:
+    msg: This playbook requires access to Docker 1.10 or later
+  # Disable the 1.10 requirement if the user set a specific Docker version
+  when: avail_docker_version.stdout | version_compare('1.10','<') and docker_version is not defined
+
+# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
+- set_fact:
+    l_docker_upgrade: False
+
+# Make sure a docker_verison is set if none was requested:
+- set_fact:
+    docker_version: "{{ avail_docker_version.stdout }}"
+  when: docker_version is not defined
+
+- name: Flag for Docker upgrade if necessary
+  set_fact:
+    l_docker_upgrade: True
+  when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+
+- name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
+  set_fact:
+      docker_upgrade_nuke_images: True
+  when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
+

playbooks/byo/openshift-cluster/upgrades/docker/files/nuke_images.sh → playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh


+ 0 - 22
playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh

@@ -1,22 +0,0 @@
-#!/bin/bash
-
-# Here we don't really care if this is a master, api, controller or node image.
-# We just need to know the version of one of them.
-unit_file=$(ls /etc/systemd/system/${1}*.service | grep -v node-dep | head -n1)
-
-if [ ${1} == "origin" ]; then
-    image_name="openshift/origin"
-elif grep aep $unit_file 2>&1 > /dev/null; then
-    image_name="aep3/node"
-elif grep openshift3 $unit_file 2>&1 > /dev/null; then
-    image_name="openshift3/node"
-fi
-
-installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
-
-docker pull ${image_name} 2>&1 > /dev/null
-available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
-
-echo "---"
-echo "curr_version: ${installed}"
-echo "avail_version: ${available}"

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/library

@@ -1 +0,0 @@
-../library

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins

@@ -1 +0,0 @@
-../../../../../lookup_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles

@@ -1 +0,0 @@
-../../../../../roles

+ 0 - 114
playbooks/common/openshift-cluster/upgrades/v3_0_minor/upgrade.yml

@@ -1,114 +0,0 @@
----
-- name: Evaluate groups
-  include: ../../evaluate_groups.yml
-
-- name: Re-Run cluster configuration to apply latest configuration changes
-  include: ../../config.yml
-
-- name: Upgrade masters
-  hosts: oo_masters_to_config
-  vars:
-    openshift_version: "{{ openshift_pkg_version | default('') }}"
-  tasks:
-    - name: Upgrade master packages
-      action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest"
-    - name: Restart master services
-      service: name="{{ openshift.common.service_type}}-master" state=restarted
-
-- name: Upgrade nodes
-  hosts: oo_nodes_to_config
-  vars:
-    openshift_version: "{{ openshift_pkg_version | default('') }}"
-  tasks:
-    - name: Upgrade node packages
-      action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest"
-    - name: Restart node services
-      service: name="{{ openshift.common.service_type }}-node" state=restarted
-
-- name: Determine new master version
-  hosts: oo_first_master
-  tasks:
-    - name: Determine new version
-      command: >
-        rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
-      register: _new_version
-
-- name: Ensure AOS 3.0.2 or Origin 1.0.6
-  hosts: oo_first_master
-  tasks:
-  - fail:
-      msg: "This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later"
-    when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') )
-
-- name: Update cluster policy
-  hosts: oo_first_master
-  tasks:
-    - name: oadm policy reconcile-cluster-roles --additive-only=true --confirm
-      command: >
-        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-        policy reconcile-cluster-roles --additive-only=true --confirm
-
-- name: Upgrade default router
-  hosts: oo_first_master
-  vars:
-    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
-    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
-  tasks:
-    - name: Check for default router
-      command: >
-        {{ oc_cmd }} get -n default dc/router
-      register: _default_router
-      failed_when: false
-      changed_when: false
-    - name: Check for allowHostNetwork and allowHostPorts
-      when: _default_router.rc == 0
-      shell: >
-        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
-      register: _scc
-    - name: Grant allowHostNetwork and allowHostPorts
-      when:
-        - _default_router.rc == 0
-        - "'false' in _scc.stdout"
-      command: >
-        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
-    - name: Update deployment config to 1.0.4/3.0.1 spec
-      when: _default_router.rc == 0
-      command: >
-        {{ oc_cmd }} patch dc/router -p
-        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
-    - name: Switch to hostNetwork=true
-      when: _default_router.rc == 0
-      command: >
-        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
-    - name: Update router image to current version
-      when: _default_router.rc == 0
-      command: >
-        {{ oc_cmd }} patch dc/router -p
-        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
-
-- name: Upgrade default
-  hosts: oo_first_master
-  vars:
-    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + _new_version.stdout  ) }}"
-    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
-  tasks:
-    - name: Check for default registry
-      command: >
-          {{ oc_cmd }} get -n default dc/docker-registry
-      register: _default_registry
-      failed_when: false
-      changed_when: false
-    - name: Update registry image to current version
-      when: _default_registry.rc == 0
-      command: >
-        {{ oc_cmd }} patch dc/docker-registry -p
-        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
-
-- name: Update image streams and templates
-  hosts: oo_first_master
-  vars:
-    openshift_examples_import_command: "update"
-    openshift_deployment_type: "{{ deployment_type }}"
-    registry_url: "{{ openshift.master.registry_url }}"
-  roles:
-    - openshift_examples

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library

@@ -1 +0,0 @@
-../library

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins

@@ -1 +0,0 @@
-../../../../../lookup_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles

@@ -1 +0,0 @@
-../../../../../roles

+ 0 - 646
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml

@@ -1,646 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-- name: Evaluate host groups
-  include: ../../evaluate_groups.yml
-
-- name: Load openshift_facts
-  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
-  roles:
-  - openshift_facts
-
-- name: Evaluate additional groups for upgrade
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - name: Evaluate etcd_hosts_to_backup
-    add_host:
-      name: "{{ item }}"
-      groups: etcd_hosts_to_backup
-    with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master
-
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed
-  hosts: oo_first_master
-  vars:
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-    target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
-  gather_facts: no
-  tasks:
-  # Pacemaker is currently the only supported upgrade path for multiple masters
-  - fail:
-      msg: "openshift_master_cluster_method must be set to 'pacemaker'"
-    when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method != "pacemaker"))
-
-  - fail:
-      msg: >
-        This upgrade is only supported for origin, openshift-enterprise, and online
-        deployment types
-    when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
-  - fail:
-      msg: >
-        openshift_pkg_version is {{ openshift_pkg_version }} which is not a
-        valid version for a {{ target_version }} upgrade
-    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-
-  # If this script errors out ansible will show the default stdout/stderr
-  # which contains details for the user:
-  - script: ../files/pre-upgrade-check
-
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_config
-  vars:
-    target_version: "{{ '1.1' if deployment_type == 'origin' else '3.1' }}"
-  tasks:
-  - name: Clean package cache
-    command: "{{ ansible_pkg_mgr }} clean all"
-
-  - set_fact:
-      g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
-
-  - name: Determine available versions
-    script: ../files/rpm_versions.sh {{ g_new_service_name }} openshift
-    register: g_versions_result
-
-  - set_fact:
-      g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
-  - set_fact:
-      g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
-    when: openshift_pkg_version is not defined
-
-  - set_fact:
-      g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
-    when: openshift_pkg_version is defined
-
-  - fail:
-      msg: This playbook requires Origin 1.0.6 or later
-    when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')
-
-  - fail:
-      msg: Upgrade packages not found
-    when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
-
-  - set_fact:
-      pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
-  hosts: localhost
-  connection: local
-  become: no
-  vars:
-    pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
-  tasks:
-  - set_fact:
-      pre_upgrade_completed: "{{ hostvars
-                                 | oo_select_keys(pre_upgrade_hosts)
-                                 | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
-  - set_fact:
-      pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
-    when: pre_upgrade_failed | length > 0
-
-
-
-###############################################################################
-# Backup etcd
-###############################################################################
-- name: Backup etcd
-  hosts: etcd_hosts_to_backup
-  vars:
-    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
-    timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
-  roles:
-  - openshift_facts
-  tasks:
-  # Ensure we persist the etcd role for this host in openshift_facts
-  - openshift_facts:
-      role: etcd
-      local_facts: {}
-    when: "'etcd' not in openshift"
-
-  - stat: path=/var/lib/openshift
-    register: var_lib_openshift
-
-  - stat: path=/var/lib/origin
-    register: var_lib_origin
-
-  - name: Create origin symlink if necessary
-    file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
-    when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False
-
-  # TODO: replace shell module with command and update later checks
-  # We assume to be using the data dir for all backups.
-  - name: Check available disk space for etcd backup
-    shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
-    register: avail_disk
-
-  # TODO: replace shell module with command and update later checks
-  - name: Check current embedded etcd disk usage
-    shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1
-    register: etcd_disk_usage
-    when: embedded_etcd | bool
-
-  - name: Abort if insufficient disk space for etcd backup
-    fail:
-      msg: >
-        {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
-        {{ avail_disk.stdout }} Kb available.
-    when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
-
-  - name: Install etcd (for etcdctl)
-    action: "{{ ansible_pkg_mgr }} name=etcd state=latest"
-
-  - name: Generate etcd backup
-    command: >
-      etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
-      --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
-
-  - set_fact:
-      etcd_backup_complete: True
-
-  - name: Display location of etcd backup
-    debug:
-      msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
-
-
-##############################################################################
-# Gate on etcd backup
-##############################################################################
-- name: Gate on etcd backup
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      etcd_backup_completed: "{{ hostvars
-                                 | oo_select_keys(groups.etcd_hosts_to_backup)
-                                 | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
-  - set_fact:
-      etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
-    when: etcd_backup_failed | length > 0
-
-
-
-###############################################################################
-# Upgrade Masters
-###############################################################################
-- name: Create temp directory for syncing certs
-  hosts: localhost
-  connection: local
-  become: no
-  gather_facts: no
-  tasks:
-  - name: Create local temp directory for syncing certs
-    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
-    register: g_master_mktemp
-    changed_when: False
-
-- name: Update deployment type
-  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
-  vars:
-    openshift_deployment_type: "{{ deployment_type }}"
-  roles:
-  - openshift_facts
-
-- name: Update master facts
-  hosts: oo_masters_to_config
-  roles:
-  - openshift_facts
-  post_tasks:
-  - openshift_facts:
-      role: master
-      local_facts:
-        cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
-
-- name: Upgrade master packages and configuration
-  hosts: oo_masters_to_config
-  vars:
-    openshift_version: "{{ openshift_pkg_version | default('') }}"
-  roles:
-  - openshift_facts
-  tasks:
-  - name: Upgrade to latest available kernel
-    action: "{{ ansible_pkg_mgr}} name=kernel state=latest"
-
-  - name: Upgrade master packages
-    command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-master{{ openshift_version }}"
-    when: openshift_pkg_version is not defined
-
-  - name: Upgrade packages
-    command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
-    when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
-
-  - name: Ensure python-yaml present for config upgrade
-    action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
-    when: not openshift.common.is_atomic | bool
-
-  - name: Upgrade master configuration
-    openshift_upgrade_config:
-      from_version: '3.0'
-      to_version: '3.1'
-      role: master
-      config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
-  - set_fact:
-      openshift_master_certs_no_etcd:
-      - admin.crt
-      - master.kubelet-client.crt
-      - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
-      - master.server.crt
-      - openshift-master.crt
-      - openshift-registry.crt
-      - openshift-router.crt
-      - etcd.server.crt
-      openshift_master_certs_etcd:
-      - master.etcd-client.crt
-
-  - set_fact:
-      openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
-
-  - name: Check status of master certificates
-    stat:
-      path: "{{ openshift.common.config_base }}/master/{{ item }}"
-    with_items: openshift_master_certs
-    register: g_master_cert_stat_result
-
-  - set_fact:
-      master_certs_missing: "{{ False in (g_master_cert_stat_result.results
-                                | oo_collect(attribute='stat.exists')
-                                | list ) }}"
-      master_cert_subdir: master-{{ openshift.common.hostname }}
-      master_cert_config_dir: "{{ openshift.common.config_base }}/master"
-
-
-- name: Generate missing master certificates
-  hosts: oo_first_master
-  vars:
-    master_hostnames: "{{ hostvars
-                          | oo_select_keys(groups.oo_masters_to_config)
-                          | oo_collect('openshift.common.all_hostnames')
-                          | oo_flatten | unique }}"
-    master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
-    masters_needing_certs: "{{ hostvars
-                               | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
-                               | oo_filter_list(filter_attr='master_certs_missing') }}"
-    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
-    openshift_deployment_type: "{{ deployment_type }}"
-  roles:
-  - openshift_master_certificates
-  post_tasks:
-  - name: Remove generated etcd client certs when using external etcd
-    file:
-      path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
-      state: absent
-    when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
-    with_nested:
-    - masters_needing_certs
-    - - master.etcd-client.crt
-      - master.etcd-client.key
-
-  - name: Create a tarball of the master certs
-    command: >
-      tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
-        -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
-    with_items: masters_needing_certs
-
-  - name: Retrieve the master cert tarball from the master
-    fetch:
-      src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
-      dest: "{{ sync_tmpdir }}/"
-      flat: yes
-      fail_on_missing: yes
-      validate_checksum: yes
-    with_items: masters_needing_certs
-
-
-- name: Sync generated certs, update service config and restart master services
-  hosts: oo_masters_to_config
-  vars:
-    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-    openshift_deployment_type: "{{ deployment_type }}"
-  tasks:
-  - name: Unarchive the tarball on the master
-    unarchive:
-      src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
-      dest: "{{ master_cert_config_dir }}"
-    when: inventory_hostname != groups.oo_first_master.0
-
-  - name: Restart master service
-    service: name="{{ openshift.common.service_type}}-master" state=restarted
-    when: not openshift_master_ha | bool
-
-  - name: Ensure the master service is enabled
-    service: name="{{ openshift.common.service_type}}-master" state=started enabled=yes
-    when: not openshift_master_ha | bool
-
-  - name: Check for configured cluster
-    stat:
-      path: /etc/corosync/corosync.conf
-    register: corosync_conf
-    when: openshift_master_ha | bool
-
-  - name: Destroy cluster
-    command: pcs cluster destroy --all
-    when: openshift_master_ha | bool and corosync_conf.stat.exists == true
-    run_once: true
-
-  - name: Start pcsd
-    service: name=pcsd enabled=yes state=started
-    when: openshift_master_ha | bool
-
-
-- name: Re-create cluster
-  hosts: oo_first_master
-  vars:
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-    openshift_deployment_type: "{{ deployment_type }}"
-    omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ') }}"
-  roles:
-  - role: openshift_master_cluster
-    when: openshift_master_ha | bool
-
-
-- name: Delete temporary directory on localhost
-  hosts: localhost
-  connection: local
-  become: no
-  gather_facts: no
-  tasks:
-  - file: name={{ g_master_mktemp.stdout }} state=absent
-    changed_when: False
-
-
-- name: Set master update status to complete
-  hosts: oo_masters_to_config
-  tasks:
-  - set_fact:
-      master_update_complete: True
-
-
-##############################################################################
-# Gate on master update complete
-##############################################################################
-- name: Gate on master update
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      master_update_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_masters_to_config)
-                                 | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
-  - set_fact:
-      master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
-    when: master_update_failed | length > 0
-
-
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-- name: Upgrade nodes
-  hosts: oo_nodes_to_config
-  vars:
-    openshift_version: "{{ openshift_pkg_version | default('') }}"
-  roles:
-  - openshift_facts
-  tasks:
-  - name: Upgrade node packages
-    command: "{{ ansible_pkg_mgr }} update -y {{ openshift.common.service_type }}-node{{ openshift_version }}"
-    when: openshift_pkg_version is not defined
-
-  - name: Upgrade packages
-    command: "{{ ansible_pkg_mgr}} install -y {{ openshift.common.installed_variant_rpms | oo_31_rpm_rename_conversion(openshift_version) | join (' ')}}"
-    when: openshift_pkg_version is defined and deployment_type == 'openshift-enterprise'
-
-  - name: Restart node service
-    service: name="{{ openshift.common.service_type }}-node" state=restarted
-
-  - name: Ensure node service enabled
-    service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
-
-  - name: Install Ceph storage plugin dependencies
-    action: "{{ ansible_pkg_mgr }} name=ceph-common state=present"
-
-  - name: Install GlusterFS storage plugin dependencies
-    action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
-
-  - name: Set sebooleans to allow gluster storage plugin access from containers
-    seboolean:
-      name: "{{ item }}"
-      state: yes
-      persistent: yes
-    when: ansible_selinux and ansible_selinux.status == "enabled"
-    with_items:
-    - virt_use_fusefs
-    - virt_sandbox_use_fusefs
-    register: sebool_result
-    failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
-
-  - set_fact:
-      node_update_complete: True
-
-
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      node_update_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_nodes_to_config)
-                                 | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
-  - set_fact:
-      node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
-    when: node_update_failed | length > 0
-
-
-###############################################################################
-# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
-###############################################################################
-- name: Reconcile Cluster Roles and Cluster Role Bindings
-  hosts: oo_masters_to_config
-  vars:
-    origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
-    ent_reconcile_bindings: true
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-  tasks:
-  - name: Reconcile Cluster Roles
-    command: >
-      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-      policy reconcile-cluster-roles --additive-only=true --confirm
-    run_once: true
-
-  - name: Reconcile Cluster Role Bindings
-    command: >
-      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-      policy reconcile-cluster-role-bindings
-      --exclude-groups=system:authenticated
-      --exclude-groups=system:authenticated:oauth
-      --exclude-groups=system:unauthenticated
-      --exclude-users=system:anonymous
-      --additive-only=true --confirm
-    when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
-    run_once: true
-
-  - name: Restart master services
-    service: name="{{ openshift.common.service_type}}-master" state=restarted
-    when: not openshift_master_ha | bool
-
-  - name: Restart master cluster
-    command: pcs resource restart master
-    when: openshift_master_ha | bool
-    run_once: true
-
-  - name: Wait for the clustered master service to be available
-    wait_for:
-      host: "{{ openshift_master_cluster_vip }}"
-      port: 8443
-      state: started
-      timeout: 180
-      delay: 90
-    when: openshift_master_ha | bool
-    run_once: true
-
-  - set_fact:
-      reconcile_complete: True
-
-
-##############################################################################
-# Gate on reconcile
-##############################################################################
-- name: Gate on reconcile
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      reconcile_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_masters_to_config)
-                                 | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
-  - set_fact:
-      reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
-    when: reconcile_failed | length > 0
-
-
-
-
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
-- name: Upgrade default router and default registry
-  hosts: oo_first_master
-  vars:
-    openshift_deployment_type: "{{ deployment_type }}"
-    registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + g_new_version  ) }}"
-    router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
-    oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
-  roles:
-  # Create the new templates shipped in 3.1, existing templates are left
-  # unmodified. This prevents the subsequent role definition for
-  # openshift_examples from failing when trying to replace templates that do
-  # not already exist. We could have potentially done a replace --force to
-  # create and update in one step.
-  - openshift_examples
-  # Update the existing templates
-  - role: openshift_examples
-    openshift_examples_import_command: replace
-    registry_url: "{{ openshift.master.registry_url }}"
-  pre_tasks:
-  - name: Collect all routers
-    command: >
-      {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
-    register: all_routers
-    failed_when: false
-    changed_when: false
-
-  - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
-    when: all_routers.rc == 0
-
-  - set_fact: haproxy_routers=[]
-    when: all_routers.rc != 0
-
-  - name: Check for allowHostNetwork and allowHostPorts
-    when: all_routers.rc == 0
-    shell: >
-      {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
-    register: _scc
-
-  - name: Grant allowHostNetwork and allowHostPorts
-    when:
-    - all_routers.rc == 0
-    - "'false' in _scc.stdout"
-    command: >
-      {{ oc_cmd }} patch scc/privileged -p
-      '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1
-
-  - name: Update deployment config to 1.0.4/3.0.1 spec
-    when: all_routers.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
-      '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
-      --api-version=v1
-    with_items: haproxy_routers
-
-  - name: Switch to hostNetwork=true
-    when: all_routers.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
-      --api-version=v1
-    with_items: haproxy_routers
-
-  - name: Update router image to current version
-    when: all_routers.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
-      '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
-      --api-version=v1
-    with_items: haproxy_routers
-    when: not openshift.common.version_gte_3_1_1_or_1_1_1
-
-  - name: Update router image to current version
-    when: all_routers.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
-      '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
-      --api-version=v1
-    with_items: haproxy_routers
-    when: openshift.common.version_gte_3_1_1_or_1_1_1
-
-  - name: Check for default registry
-    command: >
-      {{ oc_cmd }} get -n default dc/docker-registry
-    register: _default_registry
-    failed_when: false
-    changed_when: false
-
-  - name: Update registry image to current version
-    when: _default_registry.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/docker-registry -p
-      '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
-      --api-version=v1

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/library

@@ -1 +0,0 @@
-../library

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins

@@ -1 +0,0 @@
-../../../../../lookup_plugins

+ 0 - 58
playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml

@@ -1,58 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
-- name: Upgrade default router and default registry
-  hosts: oo_first_master
-  vars:
-    openshift_deployment_type: "{{ deployment_type }}"
-    registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + g_new_version  ) }}"
-    router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
-    oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
-  roles:
-  # Create the new templates shipped in 3.1.z, existing templates are left
-  # unmodified. This prevents the subsequent role definition for
-  # openshift_examples from failing when trying to replace templates that do
-  # not already exist. We could have potentially done a replace --force to
-  # create and update in one step.
-  - openshift_examples
-  # Update the existing templates
-  - role: openshift_examples
-    openshift_examples_import_command: replace
-    registry_url: "{{ openshift.master.registry_url }}"
-  pre_tasks:
-  - name: Collect all routers
-    command: >
-      {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
-    register: all_routers
-    failed_when: false
-    changed_when: false
-
-  - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
-    when: all_routers.rc == 0
-
-  - set_fact: haproxy_routers=[]
-    when: all_routers.rc != 0
-
-  - name: Update router image to current version
-    when: all_routers.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
-      '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
-      --api-version=v1
-    with_items: haproxy_routers
-
-  - name: Check for default registry
-    command: >
-      {{ oc_cmd }} get -n default dc/docker-registry
-    register: _default_registry
-    failed_when: false
-    changed_when: false
-
-  - name: Update registry image to current version
-    when: _default_registry.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/docker-registry -p
-      '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
-      --api-version=v1
-

+ 0 - 88
playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml

@@ -1,88 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-- name: Load openshift_facts
-  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
-  roles:
-  - openshift_facts
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed
-  hosts: oo_first_master
-  vars:
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-    target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
-  gather_facts: no
-  tasks:
-  - fail:
-      msg: >
-        This upgrade is only supported for origin, openshift-enterprise, and online
-        deployment types
-    when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
-  - fail:
-      msg: >
-        openshift_pkg_version is {{ openshift_pkg_version }} which is not a
-        valid version for a {{ target_version }} upgrade
-    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_config
-  vars:
-    target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
-  tasks:
-  - name: Clean package cache
-    command: "{{ ansible_pkg_mgr }} clean all"
-    when: not openshift.common.is_atomic | bool
-
-  - set_fact:
-      g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
-
-  - name: Determine available versions
-    script: ../files/rpm_versions.sh {{ g_new_service_name }}
-    register: g_versions_result
-
-  - set_fact:
-      g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
-  - set_fact:
-      g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
-
-  - fail:
-      msg: This playbook requires Origin 1.1 or later
-    when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
-
-  - fail:
-      msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
-    when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
-
-  - fail:
-      msg: Upgrade packages not found
-    when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
-
-  - set_fact:
-      pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
-  hosts: localhost
-  connection: local
-  become: no
-  vars:
-    pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
-  tasks:
-  - set_fact:
-      pre_upgrade_completed: "{{ hostvars
-                                 | oo_select_keys(pre_upgrade_hosts)
-                                 | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
-  - set_fact:
-      pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
-    when: pre_upgrade_failed | length > 0

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles

@@ -1 +0,0 @@
-../../../../../roles

+ 0 - 140
playbooks/common/openshift-cluster/upgrades/v3_1_minor/upgrade.yml

@@ -1,140 +0,0 @@
----
-###############################################################################
-# The restart playbook should be run after this playbook completes.
-###############################################################################
-
-###############################################################################
-# Upgrade Masters
-###############################################################################
-- name: Upgrade master packages and configuration
-  hosts: oo_masters_to_config
-  vars:
-    openshift_version: "{{ openshift_pkg_version | default('') }}"
-  tasks:
-  - name: Upgrade master packages
-    command: "{{ ansible_pkg_mgr}} update-to -y {{ openshift.common.service_type }}-master{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
-    when: not openshift.common.is_containerized | bool
-
-  - name: Ensure python-yaml present for config upgrade
-    action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"
-    when: not openshift.common.is_containerized | bool
-
-# Currently 3.1.1 does not have any new configuration settings
-#
-#  - name: Upgrade master configuration
-#    openshift_upgrade_config:
-#      from_version: '3.0'
-#      to_version: '3.1'
-#      role: master
-#      config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"
-
-- name: Set master update status to complete
-  hosts: oo_masters_to_config
-  tasks:
-  - set_fact:
-      master_update_complete: True
-
-##############################################################################
-# Gate on master update complete
-##############################################################################
-- name: Gate on master update
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      master_update_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_masters_to_config)
-                                 | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
-  - set_fact:
-      master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
-    when: master_update_failed | length > 0
-
-###############################################################################
-# Upgrade Nodes
-###############################################################################
-- name: Upgrade nodes
-  hosts: oo_nodes_to_config
-  vars:
-    openshift_version: "{{ openshift_pkg_version | default('') }}"
-  roles:
-  - openshift_facts
-  tasks:
-  - name: Upgrade node packages
-    command: "{{ ansible_pkg_mgr }} update-to -y {{ openshift.common.service_type }}-node{{ openshift_version }} {{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }}"
-    when: not openshift.common.is_containerized | bool
-
-  - name: Restart node service
-    service: name="{{ openshift.common.service_type }}-node" state=restarted
-
-  - set_fact:
-      node_update_complete: True
-
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      node_update_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_nodes_to_config)
-                                 | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
-  - set_fact:
-      node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
-    when: node_update_failed | length > 0
-
-###############################################################################
-# Reconcile Cluster Roles and Cluster Role Bindings
-###############################################################################
-- name: Reconcile Cluster Roles and Cluster Role Bindings
-  hosts: oo_masters_to_config
-  vars:
-    origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
-    ent_reconcile_bindings: true
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-  tasks:
-  - name: Reconcile Cluster Roles
-    command: >
-      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-      policy reconcile-cluster-roles --additive-only=true --confirm
-    run_once: true
-
-  - name: Reconcile Cluster Role Bindings
-    command: >
-      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-      policy reconcile-cluster-role-bindings
-      --exclude-groups=system:authenticated
-      --exclude-groups=system:authenticated:oauth
-      --exclude-groups=system:unauthenticated
-      --exclude-users=system:anonymous
-      --additive-only=true --confirm
-    when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool
-    run_once: true
-
-  - set_fact:
-      reconcile_complete: True
-
-##############################################################################
-# Gate on reconcile
-##############################################################################
-- name: Gate on reconcile
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      reconcile_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_masters_to_config)
-                                 | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
-  - set_fact:
-      reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
-    when: reconcile_failed | length > 0

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/containerized_upgrade.yml

@@ -1,7 +1,7 @@
 - include_vars: ../../../../../roles/openshift_node/vars/main.yml
 
 - name: Update systemd units
-  include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+  include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }}
 
 - name: Verifying the correct version was configured
   shell: grep {{ verify_upgrade_version }} {{ item }}

+ 0 - 14
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/docker_upgrade.yml

@@ -1,14 +0,0 @@
-- name: Check if Docker is installed
-  command: rpm -q docker
-  register: pkg_check
-  failed_when: pkg_check.rc > 1
-  changed_when: no
-
-- name: Upgrade Docker
-  command: "{{ ansible_pkg_mgr}} update -y docker"
-  when: pkg_check.rc == 0 and g_docker_version.curr_version | version_compare('1.9','<')
-  register: docker_upgrade
-
-- name: Restart Docker
-  command: systemctl restart docker
-  when: docker_upgrade | changed

+ 0 - 24
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/node_upgrade.yml

@@ -1,24 +0,0 @@
-- name: Prepare for Node evacuation
-  command: >
-    {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
-  delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- name: Evacuate Node for Kubelet upgrade
-  command: >
-    {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
-  delegate_to: "{{ groups.oo_first_master.0 }}"
-
-- include: rpm_upgrade.yml
-  vars:
-     component: "node"
-     openshift_version: "{{ openshift_pkg_version | default('') }}"
-  when: not openshift.common.is_containerized | bool
-
-- include: containerized_upgrade.yml
-  when: openshift.common.is_containerized | bool
-
-- name: Set node schedulability
-  command: >
-    {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
-  delegate_to: "{{ groups.oo_first_master.0 }}"
-  when: openshift.node.schedulable | bool

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.dep.service

@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openshift.docker.node.dep.service

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openshift.docker.node.service

@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openshift.docker.node.service

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.docker.service

@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openvswitch.docker.service

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/openvswitch.sysconfig.j2

@@ -0,0 +1 @@
+../../../../../roles/openshift_node/templates/openvswitch.sysconfig.j2

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/post.yml

@@ -6,8 +6,8 @@
   hosts: oo_first_master
   vars:
     openshift_deployment_type: "{{ deployment_type }}"
-    registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + g_new_version  ) }}"
-    router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
+    registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', openshift_image_tag ) }}"
+    router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}"
     oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
   roles:
   - openshift_manageiq

+ 65 - 73
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml

@@ -2,10 +2,12 @@
 ###############################################################################
 # Evaluate host groups and gather facts
 ###############################################################################
-- name: Load openshift_facts and update repos
+
+- include: ../../initialize_facts.yml
+
+- name: Update repos and initialize facts on all hosts
   hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
   roles:
-  - openshift_facts
   - openshift_repos
 
 - name: Set openshift_no_proxy_internal_hostnames
@@ -34,10 +36,10 @@
 ###############################################################################
 # Pre-upgrade checks
 ###############################################################################
-- name: Verify upgrade can proceed
+- name: Verify upgrade can proceed on first master
   hosts: oo_first_master
   vars:
-    target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+    target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
     g_pacemaker_upgrade_url_segment: "{{ 'org/latest' if deployment_type =='origin' else '.com/enterprise/3.1' }}"
   gather_facts: no
   tasks:
@@ -53,6 +55,11 @@
         https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
     when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
 
+  # Error out in situations where the user has older versions specified in their
+  # inventory in any of the openshift_release, openshift_image_tag, and
+  # openshift_pkg_version variables. These must be removed or updated to proceed
+  # with upgrade.
+  # TODO: Should we block if you're *over* the next major release version as well?
   - fail:
       msg: >
         openshift_pkg_version is {{ openshift_pkg_version }} which is not a
@@ -65,6 +72,28 @@
         valid version for a {{ target_version }} upgrade
     when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(target_version ,'<')
 
+  - set_fact:
+      openshift_release: "{{ openshift_release[1:] }}"
+    when: openshift_release is defined and openshift_release[0] == 'v'
+
+  - fail:
+      msg: >
+        openshift_release is {{ openshift_release }} which is not a
+        valid release for a {{ target_version }} upgrade
+    when: openshift_release is defined and not openshift_release | version_compare(target_version ,'=')
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+  vars:
+    # Request openshift_release 3.2 and let the openshift_version role handle converting this
+    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+    # defined, and overriding the normal behavior of protecting the installed version
+    openshift_release: "3.2"
+    openshift_protect_installed_version: False
+    # Docker role (a dependency) should be told not to do anything to installed version
+    # of docker, we handle this separately during upgrade. (the inventory may have a
+    # docker_version defined, we don't want to actually do it until later)
+    docker_protect_installed_version: True
+
 - name: Verify master processes
   hosts: oo_masters_to_config
   roles:
@@ -100,6 +129,7 @@
   hosts: oo_nodes_to_config
   roles:
   - openshift_facts
+  - openshift_docker_facts
   tasks:
   - name: Ensure Node is running
     service:
@@ -111,19 +141,17 @@
 - name: Verify upgrade targets
   hosts: oo_masters_to_config:oo_nodes_to_config
   vars:
-    target_version: "{{ '1.2' if deployment_type == 'origin' else '3.1.1.900' }}"
+    target_version: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
     openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
-    upgrading: True
-  handlers:
-  - include: ../../../../../roles/openshift_master/handlers/main.yml
-  - include: ../../../../../roles/openshift_node/handlers/main.yml
-  roles:
-  # We want the cli role to evaluate so that the containerized oc/oadm wrappers
-  # are modified to use the correct image tag.  However, this can trigger a
-  # docker restart if new configuration is laid down which would immediately
-  # pull the latest image and defeat the purpose of these tasks.
-  - { role: openshift_cli }
   pre_tasks:
+  - fail:
+      msg: Verify OpenShift is already installed
+    when: openshift.common.version is not defined
+
+  - fail:
+      msg: Verify the correct version was found
+    when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
+
   - name: Clean package cache
     command: "{{ ansible_pkg_mgr }} clean all"
     when: not openshift.common.is_atomic | bool
@@ -132,58 +160,26 @@
       g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
     when: not openshift.common.is_containerized | bool
 
-  - name: Determine available versions
-    script: ../files/rpm_versions.sh {{ g_new_service_name }}
-    register: g_rpm_versions_result
-    when: not openshift.common.is_containerized | bool
-
-  - set_fact:
-      g_aos_versions: "{{ g_rpm_versions_result.stdout | from_yaml }}"
-    when: not openshift.common.is_containerized | bool
-
-  - name: Determine available versions
-    script: ../files/openshift_container_versions.sh {{ openshift.common.service_type }}
-    register: g_containerized_versions_result
-    when: openshift.common.is_containerized | bool
-
-  - set_fact:
-      g_aos_versions: "{{ g_containerized_versions_result.stdout | from_yaml }}"
+  - name: Verify containers are available for upgrade
+    command: >
+      docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
     when: openshift.common.is_containerized | bool
 
-  - set_fact:
-      g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
-    when: openshift_pkg_version is not defined
-
-  - set_fact:
-      g_new_version: "{{ openshift_pkg_version | replace('-','') }}"
-    when: openshift_pkg_version is defined
-
-  - set_fact:
-      g_new_version: "{{ openshift_image_tag | replace('v','') }}"
-    when: openshift_image_tag is defined
-
-  - fail:
-      msg: Verifying the correct version was found
-    when: g_aos_versions.curr_version == ""
-
-  - fail:
-      msg: Verifying the correct version was found
-    when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
-
-  - include_vars: ../../../../../roles/openshift_master/vars/main.yml
-    when: inventory_hostname in groups.oo_masters_to_config
-
-  - name: Update systemd units
-    include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
-    when: inventory_hostname in groups.oo_masters_to_config
+  - name: Check latest available OpenShift RPM version
+    command: >
+      {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+    failed_when: false
+    changed_when: false
+    register: avail_openshift_version
+    when: not openshift.common.is_containerized | bool
 
-  - include_vars: ../../../../../roles/openshift_node/vars/main.yml
-    when: inventory_hostname in groups.oo_nodes_to_config
+  - name: Verify OpenShift 3.2 RPMs are available for upgrade
+    fail:
+      msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 3.2 or greater is required"
+    when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare('3.2', '<')
 
-  - name: Update systemd units
-    include: ../../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
-    when: inventory_hostname in groups.oo_nodes_to_config
 
+  # TODO: Are these two grep checks necessary anymore?
   # Note: the version number is hardcoded here in hopes of catching potential
   # bugs in how g_aos_versions.curr_version is set
   - name: Verifying the correct version is installed for upgrade
@@ -198,19 +194,15 @@
     with_items:
       - /etc/systemd/system/openvswitch.service
       - /etc/systemd/system/{{ openshift.common.service_type }}*.service
-    when: openshift.common.is_containerized | bool
-
-  - fail:
-      msg: This playbook requires Origin 1.1 or later
-    when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
+    when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
 
   - fail:
-      msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
-    when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
+      msg: This upgrade playbook must be run on Origin 1.1 or later
+    when: deployment_type == 'origin' and openshift.common.version | version_compare('1.1','<')
 
   - fail:
-      msg: Upgrade packages not found
-    when: openshift_image_tag is not defined and (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
+      msg: This upgrade playbook must be run on OpenShift Enterprise 3.1 or later
+    when: deployment_type == 'atomic-openshift' and openshift.common.version | version_compare('3.1','<')
 
 - name: Verify docker upgrade targets
   hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
@@ -234,8 +226,8 @@
     when: openshift.common.is_atomic | bool
 
   - fail:
-      msg: This playbook requires access to Docker 1.9 or later
-    when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.9','<')
+      msg: This playbook requires access to Docker 1.10 or later
+    when: g_docker_version.avail_version | default(g_docker_version.curr_version, true) | version_compare('1.10','<')
 
   # TODO: add check to upgrade ostree to get latest Docker
 

+ 2 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/rpm_upgrade.yml

@@ -1,5 +1,6 @@
+# We verified latest rpm available is suitable, so just yum update.
 - name: Upgrade packages
-  command: "{{ ansible_pkg_mgr}} update -y {{ openshift.common.service_type }}-{{ component }}-{{ g_new_version }}"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present"
 
 - name: Ensure python-yaml present for config upgrade
   action: "{{ ansible_pkg_mgr }} name=PyYAML state=present"

+ 44 - 69
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml

@@ -3,49 +3,6 @@
 # The restart playbook should be run after this playbook completes.
 ###############################################################################
 
-- name: Upgrade docker
-  hosts: oo_masters_to_config:oo_nodes_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  - include: docker_upgrade.yml
-    when: not openshift.common.is_atomic | bool
-  - name: Set post docker install facts
-    openshift_facts:
-      role: "{{ item.role }}"
-      local_facts: "{{ item.local_facts }}"
-    with_items:
-    - role: docker
-      local_facts:
-        openshift_image_tag: "v{{ g_new_version }}"
-        openshift_version: "{{ g_new_version }}"
-
-- name: Upgrade docker
-  hosts: oo_etcd_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  # Upgrade docker when host is not atomic and host is not a non-containerized etcd node
-  - include: docker_upgrade.yml
-    when: not openshift.common.is_atomic | bool and not ('oo_etcd_to_config' in group_names and not openshift.common.is_containerized)
-
-# The cli image is used by openshift_docker_facts to determine the currently installed
-# version.  We need to explicitly pull the latest image to handle cases where
-# the locally cached 'latest' tag is older the g_new_version.
-- name: Download cli image
-  hosts: oo_masters_to_config:oo_nodes_to_config
-  roles:
-  - { role: openshift_docker_facts }
-  vars:
-    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
-  tasks:
-  - name: Pull Images
-    command: >
-      docker pull {{ item }}:latest
-    with_items:
-    - "{{ openshift.common.cli_image }}"
-    when: openshift.common.is_containerized | bool
-
 ###############################################################################
 # Upgrade Masters
 ###############################################################################
@@ -62,7 +19,7 @@
   - include_vars: ../../../../../roles/openshift_master/vars/main.yml
 
   - name: Update systemd units
-    include: ../../../../../roles/openshift_master/tasks/systemd_units.yml openshift_version=v{{ g_new_version }}
+    include: ../../../../../roles/openshift_master/tasks/systemd_units.yml
 
 #  - name: Upgrade master configuration
 #    openshift_upgrade_config:
@@ -98,36 +55,55 @@
 ###############################################################################
 # Upgrade Nodes
 ###############################################################################
-- name: Upgrade nodes
-  hosts: oo_nodes_to_config
+
+# Here we handle all tasks that might require a node evac. (upgrading docker, and the node service)
+- name: Perform upgrades that may require node evacuation
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_nodes_to_config
   serial: 1
+  any_errors_fatal: true
   roles:
   - openshift_facts
   handlers:
   - include: ../../../../../roles/openshift_node/handlers/main.yml
   tasks:
-  - include: node_upgrade.yml
+  # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node
+  # or docker actually needs an upgrade before proceeding. Perhaps best to save this until
+  # we merge upgrade functionality into the base roles and a normal config.yml playbook run.
+  - name: Mark unschedulable if host is a node
+    command: >
+      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false
+    delegate_to: "{{ groups.oo_first_master.0 }}"
+    when: inventory_hostname in groups.oo_nodes_to_config
 
-  - set_fact:
-      node_update_complete: True
+  - name: Evacuate Node for Kubelet upgrade
+    command: >
+      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --evacuate --force
+    delegate_to: "{{ groups.oo_first_master.0 }}"
+    when: inventory_hostname in groups.oo_nodes_to_config
+
+  # Only check if docker upgrade is required if docker_upgrade is not
+  # already set to False.
+  - include: ../docker/upgrade_check.yml
+    when: docker_upgrade is not defined or docker_upgrade | bool
+
+  - include: ../docker/upgrade.yml
+    when: l_docker_upgrade is defined and l_docker_upgrade | bool
+
+  - include: rpm_upgrade.yml
+    vars:
+       component: "node"
+       openshift_version: "{{ openshift_pkg_version | default('') }}"
+    when: inventory_hostname in groups.oo_nodes_to_config and not openshift.common.is_containerized | bool
+
+  - include: containerized_node_upgrade.yml
+    when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool
+
+  - name: Set node schedulability
+    command: >
+      {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true
+    delegate_to: "{{ groups.oo_first_master.0 }}"
+    when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool
 
-##############################################################################
-# Gate on nodes update
-##############################################################################
-- name: Gate on nodes update
-  hosts: localhost
-  connection: local
-  become: no
-  tasks:
-  - set_fact:
-      node_update_completed: "{{ hostvars
-                                 | oo_select_keys(groups.oo_nodes_to_config)
-                                 | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
-  - set_fact:
-      node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
-    when: node_update_failed | length > 0
 
 ###############################################################################
 # Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
@@ -136,12 +112,11 @@
 - name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints
   hosts: oo_masters_to_config
   roles:
-  - { role: openshift_cli, openshift_image_tag: "v{{ g_new_version }}" }
+  - { role: openshift_cli }
   vars:
-    origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
+    origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}"
     ent_reconcile_bindings: true
     openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
-    upgrading: True
   tasks:
   - name: Verifying the correct commandline tools are available
     shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}

+ 2 - 4
playbooks/common/openshift-master/config.yml

@@ -199,7 +199,6 @@
                                | oo_collect('openshift.common.all_hostnames')
                                | oo_flatten | unique }}"
     sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
-    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
   roles:
   - openshift_master_certificates
   post_tasks:
@@ -305,7 +304,7 @@
     with_items: openshift_master_named_certificates
     when: named_certs_specified | bool
 
-- name: Configure master instances
+- name: Configure masters
   hosts: oo_masters_to_config
   any_errors_fatal: true
   serial: 1
@@ -315,13 +314,12 @@
     openshift_master_count: "{{ openshift.master.master_count }}"
     openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
     openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}"
-    openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}"
     openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
                                                     | union(groups['oo_masters_to_config'])
                                                     | union(groups['oo_etcd_to_config'] | default([])))
                                                 | oo_collect('openshift.common.hostname') | default([]) | join (',')
                                                 }}"
-    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and 
+    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
             openshift_generate_no_proxy_hosts | default(True) | bool }}"
   pre_tasks:
   - name: Ensure certificate directory exists

+ 1 - 1
roles/docker/defaults/main.yml

@@ -1,2 +1,2 @@
 ---
-docker_version: ''
+docker_protect_installed_version: False

+ 39 - 1
roles/docker/tasks/main.yml

@@ -2,10 +2,48 @@
 - stat: path=/etc/sysconfig/docker-storage
   register: docker_storage_check
 
+- name: Get current installed Docker version
+  command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+  when: not openshift.common.is_atomic | bool
+  register: curr_docker_version
+  changed_when: false
+
+- name: Error out if Docker pre-installed but too old
+  fail:
+    msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
+  when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined and not docker_protect_installed_version | bool
+
+- name: Error out if requested Docker is too old
+  fail:
+    msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
+  when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+
+- name: Get latest available version of Docker
+  command: >
+    {{ repoquery_cmd }} --qf '%{version}' "docker"
+  register: avail_docker_version
+  failed_when: false
+  changed_when: false
+  when: docker_version is defined and not openshift.common.is_atomic | bool
+
+# If a docker_version was requested, sanity check that we can install or upgrade to it, and
+# no downgrade is required.
+- name: Fail if Docker version requested but downgrade is required
+  fail:
+    msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
+  when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>') and not docker_protect_installed_version | bool
+
+# This involves an extremely slow migration process, users should instead run the
+# Docker 1.10 upgrade playbook to accomplish this.
+- name: Error out if attempting to upgrade Docker across the 1.10 boundary
+  fail:
+    msg: "Cannot upgrade Docker to >= 1.10, please use the Docker upgrade playbook for this."
+  when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=') and not docker_protect_installed_version | bool
+
 # Make sure Docker is installed, but does not update a running version.
 # Docker upgrades are handled by a separate playbook.
 - name: Install Docker
-  action: "{{ ansible_pkg_mgr }} name=docker state=present"
+  action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and not docker_protect_installed_version | bool else '' }} state=present"
   when: not openshift.common.is_atomic | bool
 
 - name: Start the Docker service

+ 53 - 0
roles/openshift_ca/tasks/main.yml

@@ -0,0 +1,53 @@
+---
+- fail:
+    msg: "openshift_ca_host variable must be defined for this role"
+  when: openshift_ca_host is not defined
+
+- name: Install the base package for admin tooling
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+  when: not openshift.common.is_containerized | bool
+  register: install_result
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
+- name: Reload generated facts
+  openshift_facts:
+  when: install_result | changed
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
+- name: Create openshift_ca_config_dir if it does not exist
+  file:
+    path: "{{ openshift_ca_config_dir }}"
+    state: directory
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
+- name: Determine if CA must be created
+  stat:
+    path: "{{ openshift_ca_config_dir }}/{{ item }}"
+  register: g_master_ca_stat_result
+  with_items:
+  - ca.crt
+  - ca.key
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
+- set_fact:
+    master_ca_missing: "{{ False in (g_master_ca_stat_result.results
+                           | oo_collect(attribute='stat.exists')
+                           | list) }}"
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true
+
+- name: Create the master certificates if they do not already exist
+  command: >
+    {{ openshift.common.admin_binary }} create-master-certs
+      --hostnames={{ openshift_master_hostnames | join(',') }}
+      --master={{ openshift.master.api_url }}
+      --public-master={{ openshift.master.public_api_url }}
+      --cert-dir={{ openshift_ca_config_dir }}
+      --overwrite=false
+  when: hostvars[openshift_ca_host].master_ca_missing | bool
+  delegate_to: "{{ openshift_ca_host }}"
+  run_once: true

+ 0 - 1
roles/openshift_cli/defaults/main.yml

@@ -1,2 +1 @@
 ---
-openshift_version: "{{ openshift_image_tag | default(openshift.docker.openshift_image_tag | default('')) }}"

+ 4 - 1
roles/openshift_cli/tasks/main.yml

@@ -5,7 +5,7 @@
 
 - name: Pull CLI Image
   command: >
-    docker pull {{ openshift.common.cli_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
+    docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
   when: openshift.common.is_containerized | bool
 
 - name: Create /usr/local/bin/openshift cli wrapper
@@ -26,6 +26,9 @@
     - /usr/local/bin/kubectl
   when: openshift.common.is_containerized | bool
 
+- name: Reload facts to pick up installed OpenShift version
+  openshift_facts:
+
 - name: Install bash completion for oc tools
   action: "{{ ansible_pkg_mgr }} name=bash-completion state=present"
   when: not openshift.common.is_containerized | bool

+ 1 - 1
roles/openshift_cli/templates/openshift.j2

@@ -5,7 +5,7 @@ fi
 cmd=`basename $0`
 user=`id -u`
 group=`id -g`
-image_tag="{{ openshift_version }}"
+image_tag="{{ openshift_image_tag }}"
 
 >&2 echo """
 ================================================================================

+ 0 - 1
roles/openshift_common/defaults/main.yml

@@ -1,4 +1,3 @@
 ---
 openshift_cluster_id: 'default'
 openshift_debug_level: 2
-openshift_version: "{{ openshift_pkg_version | default('') }}"

+ 1 - 0
roles/openshift_common/meta/main.yml

@@ -14,3 +14,4 @@ galaxy_info:
 dependencies:
 - role: openshift_facts
 - role: openshift_repos
+- role: openshift_version

+ 2 - 5
roles/openshift_common/tasks/main.yml

@@ -29,12 +29,8 @@
       data_dir: "{{ openshift_data_dir | default(None) }}"
       use_dnsmasq: "{{ openshift_use_dnsmasq | default(None) }}"
 
-# Using oo_image_tag_to_rpm_version here is a workaround for how
-# openshift_version is set.  That value is computed based on either RPM
-# versions or image tags.  openshift_common's usage requires that it be a RPM
-# version and openshift_cli expects it to be an image tag.
 - name: Install the base package for versioning
-  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
   when: not openshift.common.is_containerized | bool
 
 - name: Set version facts
@@ -49,3 +45,4 @@
   command: >
     hostnamectl set-hostname {{ openshift.common.hostname }}
   when: openshift_set_hostname | default(set_hostname_default) | bool
+

+ 0 - 1
roles/openshift_docker/defaults/main.yml

@@ -1,2 +1 @@
 ---
-upgrading: False

+ 1 - 1
roles/openshift_docker/meta/main.yml

@@ -12,6 +12,6 @@ galaxy_info:
   categories:
   - cloud
 dependencies:
-- role: openshift_repos
+- role: openshift_version
 - role: openshift_docker_facts
 - role: docker

+ 0 - 40
roles/openshift_docker/tasks/main.yml

@@ -1,41 +1 @@
 ---
-# It's important that we don't explicitly pull this image here.  Otherwise we
-# could result in upgrading a preinstalled environment.  We'll have to set
-# openshift_image_tag correctly for upgrades.
-- set_fact:
-    is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
-    # Does the host already have an image tag fact, used to determine if it's a new node
-    # in non-upgrade scenarios:
-    has_image_tag_fact: "{{ hostvars[inventory_hostname].openshift.docker.openshift_image_tag is defined }}"
-
-- name: Set version when containerized
-  command: >
-    docker run --rm {{ openshift.common.cli_image }} version
-  register: cli_image_version
-  when: is_containerized | bool and openshift_image_tag is not defined and (upgrading | bool or not has_image_tag_fact | bool)
-
-# Use the pre-existing image tag from system facts if present, and we're not upgrading.
-# Ignores explicit openshift_image_tag if it's in the inventory, as this isn't an upgrade.
-- set_fact:
-    l_image_tag: "{{ hostvars[inventory_hostname].openshift.docker.openshift_image_tag }}"
-  when: is_containerized | bool and not upgrading | bool and has_image_tag_fact | bool
-
-- set_fact:
-    l_image_tag: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2] | join('-') if openshift.common.deployment_type == 'origin' else
-                     cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0] }}"
-  when: is_containerized | bool and openshift_image_tag is not defined and (upgrading | bool or not has_image_tag_fact | bool)
-
-- set_fact:
-    l_image_tag: "{{ openshift_image_tag }}"
-  when: is_containerized | bool and openshift_image_tag is defined and (upgrading | bool or not has_image_tag_fact | bool)
-
-- name: Set post docker install facts
-  openshift_facts:
-    role: "{{ item.role }}"
-    local_facts: "{{ item.local_facts }}"
-  with_items:
-  - role: docker
-    local_facts:
-      openshift_image_tag: "{{ l_image_tag | default(None) }}"
-      openshift_version: "{{ l_image_tag.split('-')[0] | oo_image_tag_to_rpm_version if l_image_tag is defined else '' }}"
-  when: is_containerized | bool

+ 0 - 1
roles/openshift_docker_facts/defaults/main.yml

@@ -1,2 +1 @@
 ---
-openshift_version: "{{ openshift_image_tag | default(openshift.docker.openshift_image_tag | default('')) }}"

+ 2 - 20
roles/openshift_docker_facts/tasks/main.yml

@@ -34,26 +34,8 @@
 - set_fact:
     docker_options: "--insecure-registry={{ openshift.docker.hosted_registry_network }} {{ openshift.docker.options | default ('') }}"
   when: openshift.docker.hosted_registry_insecure | default(False) | bool and openshift.docker.hosted_registry_network is defined
+  register: hosted_registry_options
 
 - set_fact:
     docker_options: "{{ openshift.docker.options | default(omit) }}"
-  when: not openshift.docker.hosted_registry_insecure | default(False) | bool
-
-# Avoid docker 1.9 when installing origin < 1.2 or OSE < 3.2 on RHEL/Centos and
-# See: https://bugzilla.redhat.com/show_bug.cgi?id=1304038
-- name: Gather common package version
-  command: >
-    {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type}}"
-  register: common_version
-  failed_when: false
-  changed_when: false
-  when: not openshift.common.is_containerized | bool
-
-- set_fact:
-    l_common_version: "{{ openshift_version | default('0.0', True) | oo_image_tag_to_rpm_version }}"
-  when: openshift.common.is_containerized | bool
-
-- set_fact:
-    l_common_version: "{{ common_version.stdout | default('0.0', True) }}"
-  when: not openshift.common.is_containerized | bool
-
+  when: hosted_registry_options | skipped

+ 54 - 30
roles/openshift_facts/library/openshift_facts.py

@@ -7,16 +7,6 @@
 
 """Ansible module for retrieving and setting openshift related facts"""
 
-DOCUMENTATION = '''
----
-module: openshift_facts
-short_description: Cluster Facts
-author: Jason DeTiberus
-requirements: [ ]
-'''
-EXAMPLES = '''
-'''
-
 import ConfigParser
 import copy
 import io
@@ -30,6 +20,17 @@ from dbus import SystemBus, Interface
 from dbus.exceptions import DBusException
 
 
+DOCUMENTATION = '''
+---
+module: openshift_facts
+short_description: Cluster Facts
+author: Jason DeTiberus
+requirements: [ ]
+'''
+EXAMPLES = '''
+'''
+
+
 def migrate_docker_facts(facts):
     """ Apply migrations for docker facts """
     params = {
@@ -505,10 +506,8 @@ def set_dnsmasq_facts_if_unset(facts):
     """
 
     if 'common' in facts:
-        if 'use_dnsmasq' not in facts['common'] and safe_get_bool(facts['common']['version_gte_3_2_or_1_2']):
-            facts['common']['use_dnsmasq'] = True
-        else:
-            facts['common']['use_dnsmasq'] = False
+        facts['common']['use_dnsmasq'] = bool('use_dnsmasq' not in facts['common'] and
+                                              safe_get_bool(facts['common']['version_gte_3_2_or_1_2']))
         if 'master' in facts and 'dns_port' not in facts['master']:
             if safe_get_bool(facts['common']['use_dnsmasq']):
                 facts['master']['dns_port'] = 8053
@@ -832,7 +831,7 @@ def set_version_facts_if_unset(facts):
     if 'common' in facts:
         deployment_type = facts['common']['deployment_type']
         version = get_openshift_version(facts)
-        if version is not None:
+        if version:
             facts['common']['version'] = version
             if deployment_type == 'origin':
                 version_gte_3_1_or_1_1 = LooseVersion(version) >= LooseVersion('1.1.0')
@@ -1126,7 +1125,9 @@ def get_docker_version_info():
     return result
 
 def get_openshift_version(facts):
-    """ Get current version of openshift on the host
+    """ Get current version of openshift on the host.
+
+        Checks a variety of ways ranging from fastest to slowest.
 
         Args:
             facts (dict): existing facts
@@ -1146,18 +1147,40 @@ def get_openshift_version(facts):
     if os.path.isfile('/usr/bin/openshift'):
         _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])
         version = parse_openshift_version(output)
-
-    # openshift_facts runs before openshift_docker_facts.  However, it will be
-    # called again and set properly throughout the playbook run.  This could be
-    # refactored to simply set the openshift.common.version in the
-    # openshift_docker_facts role but it would take reworking some assumptions
-    # on how get_openshift_version is called.
-    if 'is_containerized' in facts['common'] and safe_get_bool(facts['common']['is_containerized']):
-        if 'docker' in facts and 'openshift_version' in facts['docker']:
-            version = facts['docker']['openshift_version']
+    elif 'common' in facts and 'is_containerized' in facts['common']:
+        version = get_container_openshift_version(facts)
+
+    # Handle containerized masters that have not yet been configured as a node.
+    # This can be very slow and may get re-run multiple times, so we only use this
+    # if other methods failed to find a version.
+    if not version and os.path.isfile('/usr/local/bin/openshift'):
+        _, output, _ = module.run_command(['/usr/local/bin/openshift', 'version'])
+        version = parse_openshift_version(output)
 
     return version
 
+
+def get_container_openshift_version(facts):
+    """
+    If containerized, see if we can determine the installed version via the
+    systemd environment files.
+    """
+    for filename in ['/etc/sysconfig/%s-master', '/etc/sysconfig/%s-node']:
+        env_path = filename % facts['common']['service_type']
+        if not os.path.exists(env_path):
+            continue
+
+        with open(env_path) as env_file:
+            for line in env_file:
+                if line.startswith("IMAGE_VERSION="):
+                    tag = line[len("IMAGE_VERSION="):].strip()
+                    # Remove leading "v" and any trailing release info, we just want
+                    # a version number here:
+                    version = tag[1:].split("-")[0]
+                    return version
+    return None
+
+
 def parse_openshift_version(output):
     """ Apply provider facts to supplied facts dict
 
@@ -1167,7 +1190,11 @@ def parse_openshift_version(output):
             string: the version number
     """
     versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
-    return versions.get('openshift', '')
+    ver = versions.get('openshift', '')
+    # Remove trailing build number and commit hash from older versions, we need to return a straight
+    # w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
+    ver = ver.split('-')[0]
+    return ver
 
 
 def apply_provider_facts(facts, provider_facts):
@@ -1747,10 +1774,7 @@ class OpenShiftFacts(object):
 
         if 'clock' in roles:
             exit_code, _, _ = module.run_command(['rpm', '-q', 'chrony'])
-            if exit_code == 0:
-                chrony_installed = True
-            else:
-                chrony_installed = False
+            chrony_installed = bool(exit_code == 0)
             defaults['clock'] = dict(
                 enabled=True,
                 chrony_installed=chrony_installed)

+ 5 - 0
roles/openshift_facts/tasks/main.yml

@@ -41,3 +41,8 @@
       no_proxy: "{{ openshift_no_proxy | default(None) }}"
       generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
       no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+
+- name: Set repoquery command
+  set_fact:
+    repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
+

+ 1 - 1
roles/openshift_master/defaults/main.yml

@@ -1,4 +1,4 @@
 ---
 openshift_node_ips: []
 # TODO: update setting these values based on the facts
-openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}"
+#openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}"

+ 2 - 2
roles/openshift_master/tasks/main.yml

@@ -24,12 +24,12 @@
   when: openshift_master_ha | bool and openshift_master_cluster_method == "pacemaker" and openshift.common.is_containerized | bool
 
 - name: Install Master package
-  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
   when: not openshift.common.is_containerized | bool
 
 - name: Pull master image
   command: >
-    docker pull {{ openshift.master.master_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
+    docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}
   when: openshift.common.is_containerized | bool
 
 - name: Create openshift.common.data_dir

+ 1 - 1
roles/openshift_master/templates/atomic-openshift-master.j2

@@ -1,7 +1,7 @@
 OPTIONS=--loglevel={{ openshift.master.debug_level }}
 CONFIG_FILE={{ openshift_master_config_file }}
 {% if openshift.common.is_containerized | bool %}
-IMAGE_VERSION={{ openshift_version }}
+IMAGE_VERSION={{ openshift_image_tag }}
 {% endif %}
 
 {% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}

+ 1 - 1
roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2

@@ -1,7 +1,7 @@
 OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
 CONFIG_FILE={{ openshift_master_config_file }}
 {% if openshift.common.is_containerized | bool %}
-IMAGE_VERSION={{ openshift_version }}
+IMAGE_VERSION={{ openshift_image_tag }}
 {% endif %}
 
 {% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}

+ 1 - 1
roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2

@@ -1,7 +1,7 @@
 OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
 CONFIG_FILE={{ openshift_master_config_file }}
 {% if openshift.common.is_containerized | bool %}
-IMAGE_VERSION={{ openshift_version }}
+IMAGE_VERSION={{ openshift_image_tag }}
 {% endif %}
 
 {% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}

+ 2 - 1
roles/openshift_master_ca/tasks/main.yml

@@ -1,6 +1,7 @@
 ---
+
 - name: Install the base package for admin tooling
-  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version  }} state=present"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
   when: not openshift.common.is_containerized | bool
   register: install_result
 

+ 0 - 1
roles/openshift_master_ca/vars/main.yml

@@ -3,4 +3,3 @@ openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
 openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
 openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
 openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
-openshift_version: "{{ openshift_pkg_version | default('') }}"

+ 14 - 1
roles/openshift_node/defaults/main.yml

@@ -1,2 +1,15 @@
 ---
-openshift_version: "{{ openshift_pkg_version | default(openshift_image_tag | default(openshift.docker.openshift_image_tag | default(''))) }}"
+os_firewall_allow:
+- service: Kubernetes kubelet
+  port: 10250/tcp
+- service: http
+  port: 80/tcp
+- service: https
+  port: 443/tcp
+- service: Openshift kubelet ReadOnlyPort
+  port: 10255/tcp
+- service: Openshift kubelet ReadOnlyPort udp
+  port: 10255/udp
+- service: OpenShift OVS sdn
+  port: 4789/udp
+  when: openshift.node.use_openshift_sdn | bool

+ 4 - 4
roles/openshift_node/tasks/main.yml

@@ -31,21 +31,21 @@
 # We have to add tuned-profiles in the same transaction otherwise we run into depsolving
 # problems because the rpms don't pin the version properly. This was fixed in 3.1 packaging.
 - name: Install Node package
-  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
   when: not openshift.common.is_containerized | bool
 
 - name: Install sdn-ovs package
-  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
   when: openshift.common.use_openshift_sdn and not openshift.common.is_containerized | bool
 
 - name: Pull node image
   command: >
-    docker pull {{ openshift.node.node_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
+    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
   when: openshift.common.is_containerized | bool
 
 - name: Pull OpenVSwitch image
   command: >
-    docker pull {{ openshift.node.ovs_image }}{{ ':' + openshift_version if openshift_version is defined and openshift_version != '' else '' }}
+    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
   when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
 
 - name: Install the systemd units

+ 1 - 1
roles/openshift_node/tasks/systemd_units.yml

@@ -44,6 +44,6 @@
     - regex: '^CONFIG_FILE='
       line: "CONFIG_FILE={{ openshift_node_config_file }}"
     - regex: '^IMAGE_VERSION='
-      line: "IMAGE_VERSION={{ openshift_version }}"
+      line: "IMAGE_VERSION={{ openshift_image_tag }}"
   notify:
   - restart node

+ 1 - 1
roles/openshift_node/templates/openvswitch.sysconfig.j2

@@ -1 +1 @@
-IMAGE_VERSION={{ openshift_version }}
+IMAGE_VERSION={{ openshift_image_tag }}

+ 2 - 0
roles/openshift_version/defaults/main.yml

@@ -0,0 +1,2 @@
+---
+openshift_protect_installed_version: True

+ 18 - 0
roles/openshift_version/meta/main.yml

@@ -0,0 +1,18 @@
+---
+galaxy_info:
+  author: Devan Goodwin
+  description: Determines the version of OpenShift to install or upgrade to
+  company: Red Hat, Inc.
+  license: Apache License, Version 2.0
+  min_ansible_version: 1.9
+  platforms:
+  - name: EL
+    versions:
+    - 7
+  categories:
+  - cloud
+dependencies:
+- role: openshift_repos
+- role: openshift_docker_facts
+- role: docker
+  when: openshift.common.is_containerized | default(False) | bool

+ 76 - 0
roles/openshift_version/tasks/main.yml

@@ -0,0 +1,76 @@
+---
+# Determine the openshift_version to configure if none has been specified or set previously.
+
+- set_fact:
+    is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
+
+# Block attempts to install origin without specifying some kind of version information.
+# This is because the latest tags for origin are usually alpha builds, which should not
+# be used by default. Users must indicate what they want.
+- fail:
+    msg: "Must specify openshift_release or openshift_image_tag in inventory to install origin. (suggestion: add openshift_release=\"1.2\" to inventory)"
+  when: is_containerized | bool and openshift.common.deployment_type == 'origin' and openshift_release is not defined and openshift_image_tag is not defined
+
+# Normalize some values that we need in a certain format that might be confusing:
+- set_fact:
+    openshift_release: "{{ openshift_release[1:] }}"
+  when: openshift_release is defined and openshift_release[0] == 'v'
+
+- set_fact:
+    openshift_image_tag: "{{ 'v' + openshift_image_tag }}"
+  when: openshift_image_tag is defined and openshift_image_tag[0] != 'v'
+
+- set_fact:
+    openshift_pkg_version: "{{ '-' + openshift_pkg_version }}"
+  when: openshift_pkg_version is defined and openshift_pkg_version[0] != '-'
+
+# Make sure we copy this to a fact if given a var:
+- set_fact:
+    openshift_version: "{{ openshift_version }}"
+  when: openshift_version is defined
+
+# Protect the installed version by default unless explicitly told not to, or given an
+# openshift_version already.
+- name: Use openshift.common.version fact as version to configure if already installed
+  set_fact:
+    openshift_version: "{{ openshift.common.version }}"
+  when: openshift.common.version is defined and openshift_version is not defined and openshift_protect_installed_version | bool
+
+- name: Set openshift_version for rpm installation
+  include: set_version_rpm.yml
+  when: not is_containerized | bool
+
+- name: Set openshift_version for containerized installation
+  include: set_version_containerized.yml
+  when: is_containerized | bool
+
+# At this point we know openshift_version is set appropriately. Now we set
+# openshift_image_tag and openshift_pkg_version, so all roles can always assume
+# each of this variables *will* be set correctly and can use them per their
+# intended purpose.
+
+- set_fact:
+    openshift_image_tag: v{{ openshift_version }}
+  when: openshift_image_tag is not defined
+
+- set_fact:
+    openshift_pkg_version: -{{ openshift_version }}
+  when: openshift_pkg_version is not defined
+
+- fail:
+    msg: openshift_version role was unable to set openshift_version
+  when: openshift_version is not defined
+
+- fail:
+    msg: openshift_version role was unable to set openshift_image_tag
+  when: openshift_image_tag is not defined
+
+- fail:
+    msg: openshift_version role was unable to set openshift_pkg_version
+  when: openshift_pkg_version is not defined
+
+# We can't map an openshift_release to full rpm version like we can with containers, make sure
+# the rpm version we looked up matches the release requested and error out if not.
+- fail:
+    msg: "Detected openshift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories or specify an exact openshift_pkg_version."
+  when: not is_containerized | bool and openshift_release is defined and not openshift_version.startswith(openshift_release) | bool

+ 39 - 0
roles/openshift_version/tasks/set_version_containerized.yml

@@ -0,0 +1,39 @@
+---
+- name: Set containerized version to configure if openshift_image_tag specified
+  set_fact:
+    # Expects a leading "v" in inventory, strip it off here:
+    openshift_version: "{{ openshift_image_tag[1:].split('-')[0] }}"
+  when: openshift_image_tag is defined and openshift_version is not defined
+
+- name: Set containerized version to configure if openshift_release specified
+  set_fact:
+    openshift_version: "{{ openshift_release }}"
+  when: openshift_release is defined and openshift_version is not defined
+
+- name: Lookup latest containerized version if no version specified
+  command: >
+    docker run --rm {{ openshift.common.cli_image }}:latest version
+  register: cli_image_version
+  when: openshift_version is not defined
+
+# Origin latest = pre-release version (i.e. v1.3.0-alpha.1-321-gb095e3a)
+- set_fact:
+    openshift_version: "{{ (cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2] | join('-'))[1:] }}"
+  when: openshift_version is not defined and openshift.common.deployment_type == 'origin' and cli_image_version.stdout_lines[0].split('-') | length > 1
+
+- set_fact:
+    openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
+  when: openshift_version is not defined
+
+# If we got an openshift_version like "3.2", lookup the latest 3.2 container version
+# and use that value instead.
+- name: Set precise containerized version to configure if openshift_release specified
+  command: >
+    docker run --rm {{ openshift.common.cli_image }}:v{{ openshift_version }} version
+  register: cli_image_version
+  when: openshift_version is defined and openshift_version.split('.') | length == 2
+
+- set_fact:
+    openshift_version: "{{ cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0:2][1:] | join('-') if openshift.common.deployment_type == 'origin' else cli_image_version.stdout_lines[0].split(' ')[1].split('-')[0][1:] }}"
+  when: openshift_version is defined and openshift_version.split('.') | length == 2
+

+ 18 - 0
roles/openshift_version/tasks/set_version_rpm.yml

@@ -0,0 +1,18 @@
+---
+- name: Set rpm version to configure if openshift_pkg_version specified
+  set_fact:
+    # Expects a leading "-" in inventory, strip it off here, and remove trailing release,
+    openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"
+  when: openshift_pkg_version is defined and openshift_version is not defined
+
+- name: Gather common package version
+  command: >
+    {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type}}"
+  register: common_version
+  failed_when: false
+  changed_when: false
+  when: openshift_version is not defined
+
+- set_fact:
+    openshift_version: "{{ common_version.stdout | default('0.0', True) }}"
+  when: openshift_version is not defined

+ 51 - 32
utils/src/ooinstall/cli_installer.py

@@ -1,17 +1,17 @@
 # TODO: Temporarily disabled due to importing old code into openshift-ansible
 # repo. We will work on these over time.
-# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
+# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter,too-many-lines
 
-import click
 import os
 import re
 import sys
+from distutils.version import LooseVersion
+import click
 from ooinstall import openshift_ansible
 from ooinstall import OOConfig
 from ooinstall.oo_config import OOConfigInvalidHostError
 from ooinstall.oo_config import Host
 from ooinstall.variants import find_variant, get_variant_version_combos
-from distutils.version import LooseVersion
 
 DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
 DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
@@ -32,7 +32,7 @@ def is_valid_hostname(hostname):
     return all(allowed.match(x) for x in hostname.split("."))
 
 def validate_prompt_hostname(hostname):
-    if '' == hostname or is_valid_hostname(hostname):
+    if hostname == '' or is_valid_hostname(hostname):
         return hostname
     raise click.BadParameter('Invalid hostname. Please double-check this value and re-enter it.')
 
@@ -146,10 +146,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
             if rpm_or_container == 'container':
                 host_props['containerized'] = True
 
-        if existing_env:
-            host_props['new_host'] = True
-        else:
-            host_props['new_host'] = False
+        host_props['new_host'] = existing_env
 
         host = Host(**host_props)
 
@@ -377,7 +374,7 @@ Notes:
     default_facts_lines = []
     default_facts = {}
     for h in hosts:
-        if h.preconfigured == True:
+        if h.preconfigured:
             continue
         try:
             default_facts[h.connect_to] = {}
@@ -824,50 +821,68 @@ def uninstall(ctx):
 @click.option('--latest-minor', '-l', is_flag=True, default=False)
 @click.option('--next-major', '-n', is_flag=True, default=False)
 @click.pass_context
+#pylint: disable=bad-builtin,too-many-statements
 def upgrade(ctx, latest_minor, next_major):
     oo_cfg = ctx.obj['oo_cfg']
     verbose = ctx.obj['verbose']
 
+    # major/minor fields are optional, as we don't always support minor/major
+    # upgrade for what you're currently running.
     upgrade_mappings = {
-                        '3.0':{
-                               'minor_version' :'3.0',
-                               'minor_playbook':'v3_0_minor/upgrade.yml',
-                               'major_version' :'3.1',
-                               'major_playbook':'v3_0_to_v3_1/upgrade.yml',
-                              },
                         '3.1':{
-                               'minor_version' :'3.1',
-                               'minor_playbook':'v3_1_minor/upgrade.yml',
                                'major_playbook':'v3_1_to_v3_2/upgrade.yml',
                                'major_version' :'3.2',
-                            }
+                            },
+                        '3.2':{
+                               'minor_playbook':'v3_1_to_v3_2/upgrade.yml',
+# Uncomment these when we're ready to support 3.3.
+#                               'major_version' :'3.3',
+#                               'major_playbook':'v3_1_to_v3_2/upgrade.yml',
+                            },
                        }
 
     if len(oo_cfg.hosts) == 0:
         click.echo("No hosts defined in: %s" % oo_cfg.config_path)
         sys.exit(1)
 
-    old_variant = oo_cfg.settings['variant']
+    variant = oo_cfg.settings['variant']
+    if find_variant(variant)[0] is None:
+        click.echo("%s is not a supported variant for upgrade." % variant)
+        sys.exit(0)
+
     old_version = oo_cfg.settings['variant_version']
     mapping = upgrade_mappings.get(old_version)
 
     message = """
         This tool will help you upgrade your existing OpenShift installation.
+        Currently running: %s %s
 """
-    click.echo(message)
+    click.echo(message % (variant, old_version))
 
+    # Map the dynamic upgrade options to the playbook to run for each.
+    # Index offset by 1.
+    # List contains tuples of booleans for (latest_minor, next_major)
+    selections = []
     if not (latest_minor or next_major):
-        click.echo("Version {} found. Do you want to update to the latest version of {} " \
-                   "or migrate to the next major release?".format(old_version, old_version))
-        response = click.prompt("(1) Update to latest {} " \
-                                "(2) Migrate to next release".format(old_version),
-                                type=click.Choice(['1', '2']),)
-        if response == "1":
-            latest_minor = True
-        if response == "2":
-            next_major = True
+        i = 0
+        if 'minor_playbook' in mapping:
+            click.echo("(%s) Update to latest %s" % (i + 1, old_version))
+            selections.append((True, False))
+            i += 1
+        if 'major_playbook' in mapping:
+            click.echo("(%s) Upgrade to next release: %s" % (i + 1, mapping['major_version']))
+            selections.append((False, True))
+            i += 1
+
+        response = click.prompt("\nChoose an option from above",
+                                type=click.Choice(list(map(str, range(1, len(selections) + 1)))))
+        latest_minor, next_major = selections[int(response) - 1]
 
     if next_major:
+        if 'major_playbook' not in mapping:
+            click.echo("No major upgrade supported for %s %s with this version "\
+                       "of atomic-openshift-utils." % (variant, old_version))
+            sys.exit(0)
         playbook = mapping['major_playbook']
         new_version = mapping['major_version']
         # Update config to reflect the version we're targetting, we'll write
@@ -877,11 +892,15 @@ def upgrade(ctx, latest_minor, next_major):
             oo_cfg.settings['variant'] = 'openshift-enterprise'
 
     if latest_minor:
+        if 'minor_playbook' not in mapping:
+            click.echo("No minor upgrade supported for %s %s with this version "\
+                       "of atomic-openshift-utils." % (variant, old_version))
+            sys.exit(0)
         playbook = mapping['minor_playbook']
-        new_version = mapping['minor_version']
+        new_version = old_version
 
-    click.echo("Openshift will be upgraded from %s %s to %s %s on the following hosts:\n" % (
-        old_variant, old_version, oo_cfg.settings['variant'], new_version))
+    click.echo("Openshift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % (
+        variant, old_version, oo_cfg.settings['variant'], new_version))
     for host in oo_cfg.hosts:
         click.echo("  * %s" % host.connect_to)