Browse Source

Create v3_5 upgrade playbooks

Russell Teague 8 years ago
parent
commit
bc2ba98351

+ 2 - 2
playbooks/byo/openshift-cluster/upgrades/README.md

@@ -4,5 +4,5 @@ cluster. Additional notes for the associated upgrade playbooks are
 provided in their respective directories.
 
 # Upgrades available
-- [OpenShift Enterprise 3.2 to 3.3](v3_3/README.md)
-- [OpenShift Enterprise 3.1 to 3.2](v3_2/README.md)
+- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md)
+- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md)

+ 18 - 0
playbooks/byo/openshift-cluster/upgrades/v3_5/README.md

@@ -0,0 +1,18 @@
+# v3.5 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node.
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml

+ 1 - 0
playbooks/byo/openshift-cluster/upgrades/v3_5/roles

@@ -0,0 +1 @@
+../../../../../roles

+ 99 - 0
playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml

@@ -0,0 +1,99 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+  tags:
+  - pre_upgrade
+
+- name: Configure the upgrade target for the common upgrade tasks
+  hosts: l_oo_all_hosts
+  tags:
+  - pre_upgrade
+  tasks:
+  - set_fact:
+      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+  tags:
+  - pre_upgrade
+
+- name: Update repos and initialize facts on all hosts
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+  tags:
+  - pre_upgrade
+  roles:
+  - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade
+  tags:
+  - pre_upgrade
+  tasks:
+  - set_fact:
+      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+                                                    | union(groups['oo_masters_to_config'])
+                                                    | union(groups['oo_etcd_to_config'] | default([])))
+                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                                }}"
+    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+            openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+  tags:
+  - pre_upgrade
+  vars:
+    # Request specific openshift_release and let the openshift_version role handle converting this
+    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+    # defined, and overriding the normal behavior of protecting the installed version
+    openshift_release: "{{ openshift_upgrade_target }}"
+    openshift_protect_installed_version: False
+
+    # We skip the docker role at this point in upgrade to prevent
+    # unintended package, container, or config upgrades which trigger
+    # docker restarts. At this early stage of upgrade we can assume
+    # docker is configured and running.
+    skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-master/validate_restart.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+  tags:
+  - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+  tasks:
+  - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml

+ 102 - 0
playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml

@@ -0,0 +1,102 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+  tags:
+  - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+  tags:
+  - pre_upgrade
+  tasks:
+  - set_fact:
+      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+
+- name: Update repos on control plane hosts
+  hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
+  tags:
+  - pre_upgrade
+  roles:
+  - openshift_repos
+
+- name: Set openshift_no_proxy_internal_hostnames
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade
+  tags:
+  - pre_upgrade
+  tasks:
+  - set_fact:
+      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+                                                    | union(groups['oo_masters_to_config'])
+                                                    | union(groups['oo_etcd_to_config'] | default([])))
+                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                                }}"
+    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+            openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+  tags:
+  - pre_upgrade
+  vars:
+    # Request specific openshift_release and let the openshift_version role handle converting this
+    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+    # defined, and overriding the normal behavior of protecting the installed version
+    openshift_release: "{{ openshift_upgrade_target }}"
+    openshift_protect_installed_version: False
+
+    # We skip the docker role at this point in upgrade to prevent
+    # unintended package, container, or config upgrades which trigger
+    # docker restarts. At this early stage of upgrade we can assume
+    # docker is configured and running.
+    skip_docker_role: True
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-master/validate_restart.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+  tags:
+  - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+  hosts: oo_masters_to_config:oo_etcd_to_config
+  tasks:
+  - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml

+ 100 - 0
playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml

@@ -0,0 +1,100 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../../../common/openshift-cluster/upgrades/init.yml
+  tags:
+  - pre_upgrade
+
+# Configure the upgrade target for the common upgrade tasks:
+- hosts: l_oo_all_hosts
+  tags:
+  - pre_upgrade
+  tasks:
+  - set_fact:
+      openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
+      openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
+
+# Pre-upgrade
+- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+  tags:
+  - pre_upgrade
+
+- name: Update repos on nodes
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
+  roles:
+  - openshift_repos
+  tags:
+  - pre_upgrade
+
+- name: Set openshift_no_proxy_internal_hostnames
+  hosts: oo_masters_to_config:oo_nodes_to_upgrade
+  tags:
+  - pre_upgrade
+  tasks:
+  - set_fact:
+      openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
+                                                    | union(groups['oo_masters_to_config'])
+                                                    | union(groups['oo_etcd_to_config'] | default([])))
+                                                | oo_collect('openshift.common.hostname') | default([]) | join (',')
+                                                }}"
+    when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
+            openshift_generate_no_proxy_hosts | default(True) | bool }}"
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
+  tags:
+  - pre_upgrade
+  vars:
+    # Request specific openshift_release and let the openshift_version role handle converting this
+    # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
+    # defined, and overriding the normal behavior of protecting the installed version
+    openshift_release: "{{ openshift_upgrade_target }}"
+    openshift_protect_installed_version: False
+
+    # We skip the docker role at this point in upgrade to prevent
+    # unintended package, container, or config upgrades which trigger
+    # docker restarts. At this early stage of upgrade we can assume
+    # docker is configured and running.
+    skip_docker_role: True
+
+- name: Verify masters are already upgraded
+  hosts: oo_masters_to_config
+  tags:
+  - pre_upgrade
+  tasks:
+  - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
+    when: openshift.common.version != openshift_version
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+  tags:
+  - pre_upgrade
+
+- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
+  tags:
+  - pre_upgrade
+
+# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
+
+# Separate step so we can execute in parallel and clear out anything unused
+# before we get into the serialized upgrade process which will then remove
+# remaining images if possible.
+- name: Cleanup unused Docker images
+  hosts: oo_nodes_to_upgrade
+  tasks:
+  - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml

+ 14 - 0
playbooks/common/openshift-cluster/upgrades/init.yml

@@ -65,3 +65,17 @@
     when: not openshift.common.is_atomic | bool
     args:
       warn: no
+
+- name: Ensure firewall is not switched during upgrade
+  hosts: oo_all_hosts
+  tasks:
+  - name: Check if iptables is running
+    command: systemctl status iptables
+    ignore_errors: true
+    changed_when: false
+    register: service_iptables_status
+
+  - name: Set fact os_firewall_use_firewalld FALSE for iptables
+    set_fact:
+      os_firewall_use_firewalld: false
+    when: "'Active: active' in service_iptables_status.stdout"

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -32,7 +32,7 @@
   include: ./etcd/main.yml
 
 # Create service signer cert when missing. Service signer certificate
-# is added to master config in the master config hook for v3_3.
+# is added to master config in the master_config_upgrade hook.
 - name: Determine if service signer cert must be created
   hosts: oo_first_master
   tasks:

+ 6 - 3
roles/openshift_facts/library/openshift_facts.py

@@ -867,6 +867,7 @@ def set_deployment_facts_if_unset(facts):
     return facts
 
 
+# pylint: disable=too-many-statements
 def set_version_facts_if_unset(facts):
     """ Set version facts. This currently includes common.version and
         common.version_gte_3_1_or_1_1.
@@ -904,8 +905,8 @@ def set_version_facts_if_unset(facts):
             version_gte_3_1_1_or_1_1_1 = True
             version_gte_3_2_or_1_2 = True
             version_gte_3_3_or_1_3 = True
-            version_gte_3_4_or_1_4 = False
-            version_gte_3_5_or_1_5 = False
+            version_gte_3_4_or_1_4 = True
+            version_gte_3_5_or_1_5 = True
             version_gte_3_6_or_1_6 = False
         facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
         facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
@@ -915,7 +916,9 @@ def set_version_facts_if_unset(facts):
         facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
         facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
 
-        if version_gte_3_4_or_1_4:
+        if version_gte_3_5_or_1_5:
+            examples_content_version = 'v1.5'
+        elif version_gte_3_4_or_1_4:
             examples_content_version = 'v1.4'
         elif version_gte_3_3_or_1_3:
             examples_content_version = 'v1.3'

+ 58 - 26
roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py

@@ -52,32 +52,64 @@ class LookupModule(LookupBase):
             # convert short_version to origin short_version
             short_version = re.sub('^3.', '1.', short_version)
 
-        if short_version in ['1.1', '1.2']:
-            predicates.append({'name': 'PodFitsHostPorts'})
-            predicates.append({'name': 'PodFitsResources'})
-
-        # applies to all known versions
-        predicates.append({'name': 'NoDiskConflict'})
-
-        # only 1.1 didn't include NoVolumeZoneConflict
-        if short_version != '1.1':
-            predicates.append({'name': 'NoVolumeZoneConflict'})
-
-        if short_version in ['1.1', '1.2']:
-            predicates.append({'name': 'MatchNodeSelector'})
-
-        if short_version != '1.1':
-            predicates.append({'name': 'MaxEBSVolumeCount'})
-            predicates.append({'name': 'MaxGCEPDVolumeCount'})
-
-        if short_version not in ['1.1', '1.2']:
-            predicates.append({'name': 'GeneralPredicates'})
-            predicates.append({'name': 'PodToleratesNodeTaints'})
-            predicates.append({'name': 'CheckNodeMemoryPressure'})
-
-        if short_version not in ['1.1', '1.2', '1.3']:
-            predicates.append({'name': 'CheckNodeDiskPressure'})
-            predicates.append({'name': 'MatchInterPodAffinity'})
+        # Predicates ordered according to OpenShift Origin source:
+        # origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+
+        if short_version == '1.1':
+            predicates.extend([
+                {'name': 'PodFitsHostPorts'},
+                {'name': 'PodFitsResources'},
+                {'name': 'NoDiskConflict'},
+                {'name': 'MatchNodeSelector'},
+            ])
+
+        if short_version == '1.2':
+            predicates.extend([
+                {'name': 'PodFitsHostPorts'},
+                {'name': 'PodFitsResources'},
+                {'name': 'NoDiskConflict'},
+                {'name': 'NoVolumeZoneConflict'},
+                {'name': 'MatchNodeSelector'},
+                {'name': 'MaxEBSVolumeCount'},
+                {'name': 'MaxGCEPDVolumeCount'}
+            ])
+
+        if short_version == '1.3':
+            predicates.extend([
+                {'name': 'NoDiskConflict'},
+                {'name': 'NoVolumeZoneConflict'},
+                {'name': 'MaxEBSVolumeCount'},
+                {'name': 'MaxGCEPDVolumeCount'},
+                {'name': 'GeneralPredicates'},
+                {'name': 'PodToleratesNodeTaints'},
+                {'name': 'CheckNodeMemoryPressure'}
+            ])
+
+        if short_version == '1.4':
+            predicates.extend([
+                {'name': 'NoDiskConflict'},
+                {'name': 'NoVolumeZoneConflict'},
+                {'name': 'MaxEBSVolumeCount'},
+                {'name': 'MaxGCEPDVolumeCount'},
+                {'name': 'GeneralPredicates'},
+                {'name': 'PodToleratesNodeTaints'},
+                {'name': 'CheckNodeMemoryPressure'},
+                {'name': 'CheckNodeDiskPressure'},
+                {'name': 'MatchInterPodAffinity'}
+            ])
+
+        if short_version in ['1.5', '1.6']:
+            predicates.extend([
+                {'name': 'NoVolumeZoneConflict'},
+                {'name': 'MaxEBSVolumeCount'},
+                {'name': 'MaxGCEPDVolumeCount'},
+                {'name': 'MatchInterPodAffinity'},
+                {'name': 'NoDiskConflict'},
+                {'name': 'GeneralPredicates'},
+                {'name': 'PodToleratesNodeTaints'},
+                {'name': 'CheckNodeMemoryPressure'},
+                {'name': 'CheckNodeDiskPressure'},
+            ])
 
         if regions_enabled:
             region_predicate = {

+ 45 - 16
roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py

@@ -11,11 +11,7 @@ class LookupModule(LookupBase):
     def run(self, terms, variables=None, zones_enabled=True, short_version=None,
             deployment_type=None, **kwargs):
 
-        priorities = [
-            {'name': 'LeastRequestedPriority', 'weight': 1},
-            {'name': 'BalancedResourceAllocation', 'weight': 1},
-            {'name': 'SelectorSpreadPriority', 'weight': 1}
-        ]
+        priorities = []
 
         if short_version is None or deployment_type is None:
             if 'openshift' not in variables:
@@ -57,18 +53,51 @@ class LookupModule(LookupBase):
             # convert short_version to origin short_version
             short_version = re.sub('^3.', '1.', short_version)
 
-        if short_version == '1.4':
-            priorities.append({'name': 'NodePreferAvoidPodsPriority', 'weight': 10000})
-
-        # only 1.1 didn't include NodeAffinityPriority
-        if short_version != '1.1':
-            priorities.append({'name': 'NodeAffinityPriority', 'weight': 1})
+        if short_version == '1.1':
+            priorities.extend([
+                {'name': 'LeastRequestedPriority', 'weight': 1},
+                {'name': 'BalancedResourceAllocation', 'weight': 1},
+                {'name': 'SelectorSpreadPriority', 'weight': 1}
+            ])
+
+        if short_version == '1.2':
+            priorities.extend([
+                {'name': 'LeastRequestedPriority', 'weight': 1},
+                {'name': 'BalancedResourceAllocation', 'weight': 1},
+                {'name': 'SelectorSpreadPriority', 'weight': 1},
+                {'name': 'NodeAffinityPriority', 'weight': 1}
+            ])
+
+        if short_version == '1.3':
+            priorities.extend([
+                {'name': 'LeastRequestedPriority', 'weight': 1},
+                {'name': 'BalancedResourceAllocation', 'weight': 1},
+                {'name': 'SelectorSpreadPriority', 'weight': 1},
+                {'name': 'NodeAffinityPriority', 'weight': 1},
+                {'name': 'TaintTolerationPriority', 'weight': 1}
+            ])
 
-        if short_version not in ['1.1', '1.2']:
-            priorities.append({'name': 'TaintTolerationPriority', 'weight': 1})
-
-        if short_version not in ['1.1', '1.2', '1.3']:
-            priorities.append({'name': 'InterPodAffinityPriority', 'weight': 1})
+        if short_version == '1.4':
+            priorities.extend([
+                {'name': 'LeastRequestedPriority', 'weight': 1},
+                {'name': 'BalancedResourceAllocation', 'weight': 1},
+                {'name': 'SelectorSpreadPriority', 'weight': 1},
+                {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+                {'name': 'NodeAffinityPriority', 'weight': 1},
+                {'name': 'TaintTolerationPriority', 'weight': 1},
+                {'name': 'InterPodAffinityPriority', 'weight': 1}
+            ])
+
+        if short_version in ['1.5', '1.6']:
+            priorities.extend([
+                {'name': 'SelectorSpreadPriority', 'weight': 1},
+                {'name': 'InterPodAffinityPriority', 'weight': 1},
+                {'name': 'LeastRequestedPriority', 'weight': 1},
+                {'name': 'BalancedResourceAllocation', 'weight': 1},
+                {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+                {'name': 'NodeAffinityPriority', 'weight': 1},
+                {'name': 'TaintTolerationPriority', 'weight': 1}
+            ])
 
         if zones_enabled:
             zone_priority = {

+ 19 - 4
roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py

@@ -9,6 +9,9 @@ sys.path = [os.path.abspath(os.path.dirname(__file__) + "/../lookup_plugins/")]
 
 from openshift_master_facts_default_predicates import LookupModule  # noqa: E402
 
+# Predicates ordered according to OpenShift Origin source:
+# origin/vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+
 DEFAULT_PREDICATES_1_1 = [
     {'name': 'PodFitsHostPorts'},
     {'name': 'PodFitsResources'},
@@ -48,6 +51,18 @@ DEFAULT_PREDICATES_1_4 = [
     {'name': 'MatchInterPodAffinity'}
 ]
 
+DEFAULT_PREDICATES_1_5 = [
+    {'name': 'NoVolumeZoneConflict'},
+    {'name': 'MaxEBSVolumeCount'},
+    {'name': 'MaxGCEPDVolumeCount'},
+    {'name': 'MatchInterPodAffinity'},
+    {'name': 'NoDiskConflict'},
+    {'name': 'GeneralPredicates'},
+    {'name': 'PodToleratesNodeTaints'},
+    {'name': 'CheckNodeMemoryPressure'},
+    {'name': 'CheckNodeDiskPressure'},
+]
+
 REGION_PREDICATE = {
     'name': 'Region',
     'argument': {
@@ -66,10 +81,10 @@ TEST_VARS = [
     ('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
     ('1.4', 'origin', DEFAULT_PREDICATES_1_4),
     ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
-    ('1.5', 'origin', DEFAULT_PREDICATES_1_4),
-    ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
-    ('1.6', 'origin', DEFAULT_PREDICATES_1_4),
-    ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+    ('1.5', 'origin', DEFAULT_PREDICATES_1_5),
+    ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
+    ('1.6', 'origin', DEFAULT_PREDICATES_1_5),
+    ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_5),
 ]
 
 

+ 15 - 1
roles/openshift_master_facts/test/openshift_master_facts_default_priorities_tests.py

@@ -40,6 +40,16 @@ DEFAULT_PRIORITIES_1_4 = [
     {'name': 'InterPodAffinityPriority', 'weight': 1}
 ]
 
+DEFAULT_PRIORITIES_1_5 = [
+    {'name': 'SelectorSpreadPriority', 'weight': 1},
+    {'name': 'InterPodAffinityPriority', 'weight': 1},
+    {'name': 'LeastRequestedPriority', 'weight': 1},
+    {'name': 'BalancedResourceAllocation', 'weight': 1},
+    {'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
+    {'name': 'NodeAffinityPriority', 'weight': 1},
+    {'name': 'TaintTolerationPriority', 'weight': 1}
+]
+
 ZONE_PRIORITY = {
     'name': 'Zone',
     'argument': {
@@ -58,7 +68,11 @@ TEST_VARS = [
     ('1.3', 'origin', DEFAULT_PRIORITIES_1_3),
     ('3.3', 'openshift-enterprise', DEFAULT_PRIORITIES_1_3),
     ('1.4', 'origin', DEFAULT_PRIORITIES_1_4),
-    ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4)
+    ('3.4', 'openshift-enterprise', DEFAULT_PRIORITIES_1_4),
+    ('1.5', 'origin', DEFAULT_PRIORITIES_1_5),
+    ('3.5', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
+    ('1.6', 'origin', DEFAULT_PRIORITIES_1_5),
+    ('3.6', 'openshift-enterprise', DEFAULT_PRIORITIES_1_5),
 ]
 
 

+ 6 - 24
utils/src/ooinstall/cli_installer.py

@@ -25,33 +25,15 @@ QUIET_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible-quiet.cfg'
 DEFAULT_PLAYBOOK_DIR = '/usr/share/ansible/openshift-ansible/'
 
 UPGRADE_MAPPINGS = {
-    '3.0': {
-        'minor_version': '3.0',
-        'minor_playbook': 'v3_0_minor/upgrade.yml',
-        'major_version': '3.1',
-        'major_playbook': 'v3_0_to_v3_1/upgrade.yml',
-    },
-    '3.1': {
-        'minor_version': '3.1',
-        'minor_playbook': 'v3_1_minor/upgrade.yml',
-        'major_playbook': 'v3_1_to_v3_2/upgrade.yml',
-        'major_version': '3.2',
-    },
-    '3.2': {
-        'minor_version': '3.2',
-        'minor_playbook': 'v3_2/upgrade.yml',
-        'major_playbook': 'v3_3/upgrade.yml',
-        'major_version': '3.3',
-    },
-    '3.3': {
-        'minor_version': '3.3',
-        'minor_playbook': 'v3_3/upgrade.yml',
-        'major_playbook': 'v3_4/upgrade.yml',
-        'major_version': '3.4',
-    },
     '3.4': {
         'minor_version': '3.4',
         'minor_playbook': 'v3_4/upgrade.yml',
+        'major_playbook': 'v3_5/upgrade.yml',
+        'major_version': '3.5',
+    },
+    '3.5': {
+        'minor_version': '3.5',
+        'minor_playbook': 'v3_5/upgrade.yml',
     },
 }