Browse Source

Remove more legacy upgrade playbooks.

Devan Goodwin 8 years ago
parent
commit
2a52c5e576

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/library

@@ -1 +0,0 @@
-../library

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/lookup_plugins

@@ -1 +0,0 @@
-../../../../../lookup_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_minor/roles

@@ -1 +0,0 @@
-../../../../../roles

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/library

@@ -1 +0,0 @@
-../library

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/lookup_plugins

@@ -1 +0,0 @@
-../../../../../lookup_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/roles

@@ -1 +0,0 @@
-../../../../../roles

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/filter_plugins

@@ -1 +0,0 @@
-../../../../../filter_plugins

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/library

@@ -1 +0,0 @@
-../library

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/lookup_plugins

@@ -1 +0,0 @@
-../../../../../lookup_plugins

+ 0 - 57
playbooks/common/openshift-cluster/upgrades/v3_1_minor/post.yml

@@ -1,57 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade default router, default registry and examples
-###############################################################################
-- name: Upgrade default router and default registry
-  hosts: oo_first_master
-  vars:
-    openshift_deployment_type: "{{ deployment_type }}"
-    registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + g_new_version  ) }}"
-    router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
-    oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
-  roles:
-  # Create the new templates shipped in 3.1.z, existing templates are left
-  # unmodified. This prevents the subsequent role definition for
-  # openshift_examples from failing when trying to replace templates that do
-  # not already exist. We could have potentially done a replace --force to
-  # create and update in one step.
-  - openshift_examples
-  # Update the existing templates
-  - role: openshift_examples
-    openshift_examples_import_command: replace
-  pre_tasks:
-  - name: Collect all routers
-    command: >
-      {{ oc_cmd }} get pods --all-namespaces -l 'router' -o json
-    register: all_routers
-    failed_when: false
-    changed_when: false
-
-  - set_fact: haproxy_routers="{{ (all_routers.stdout | from_json)['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}"
-    when: all_routers.rc == 0
-
-  - set_fact: haproxy_routers=[]
-    when: all_routers.rc != 0
-
-  - name: Update router image to current version
-    when: all_routers.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -p
-      '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'
-      --api-version=v1
-    with_items: haproxy_routers
-
-  - name: Check for default registry
-    command: >
-      {{ oc_cmd }} get -n default dc/docker-registry
-    register: _default_registry
-    failed_when: false
-    changed_when: false
-
-  - name: Update registry image to current version
-    when: _default_registry.rc == 0
-    command: >
-      {{ oc_cmd }} patch dc/docker-registry -p
-      '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
-      --api-version=v1
-

+ 0 - 88
playbooks/common/openshift-cluster/upgrades/v3_1_minor/pre.yml

@@ -1,88 +0,0 @@
----
-###############################################################################
-# Evaluate host groups and gather facts
-###############################################################################
-- name: Load openshift_facts
-  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config:oo_lb_to_config
-  roles:
-  - openshift_facts
-
-###############################################################################
-# Pre-upgrade checks
-###############################################################################
-- name: Verify upgrade can proceed
-  hosts: oo_first_master
-  vars:
-    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
-    target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
-  gather_facts: no
-  tasks:
-  - fail:
-      msg: >
-        This upgrade is only supported for origin, openshift-enterprise, and online
-        deployment types
-    when: deployment_type not in ['origin','openshift-enterprise', 'online']
-
-  - fail:
-      msg: >
-        openshift_pkg_version is {{ openshift_pkg_version }} which is not a
-        valid version for a {{ target_version }} upgrade
-    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(target_version ,'<')
-
-- name: Verify upgrade targets
-  hosts: oo_masters_to_config:oo_nodes_to_config
-  vars:
-    target_version: "{{ '1.1.1' if deployment_type == 'origin' else '3.1.1' }}"
-  tasks:
-  - name: Clean package cache
-    command: "{{ ansible_pkg_mgr }} clean all"
-    when: not openshift.common.is_atomic | bool
-
-  - set_fact:
-      g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
-
-  - name: Determine available versions
-    script: ../files/rpm_versions.sh {{ g_new_service_name }}
-    register: g_versions_result
-
-  - set_fact:
-      g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"
-
-  - set_fact:
-      g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"
-
-  - fail:
-      msg: This playbook requires Origin 1.1 or later
-    when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.1','<')
-
-  - fail:
-      msg: This playbook requires Atomic Enterprise Platform/OpenShift Enterprise 3.1 or later
-    when: deployment_type == 'atomic-openshift' and g_aos_versions.curr_version | version_compare('3.1','<')
-
-  - fail:
-      msg: Upgrade packages not found
-    when: (g_aos_versions.avail_version | default(g_aos_versions.curr_version, true) | version_compare(target_version, '<'))
-
-  - set_fact:
-      pre_upgrade_complete: True
-
-
-##############################################################################
-# Gate on pre-upgrade checks
-##############################################################################
-- name: Gate on pre-upgrade checks
-  hosts: localhost
-  connection: local
-  become: no
-  vars:
-    pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
-  tasks:
-  - set_fact:
-      pre_upgrade_completed: "{{ hostvars
-                                 | oo_select_keys(pre_upgrade_hosts)
-                                 | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
-  - set_fact:
-      pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
-  - fail:
-      msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
-    when: pre_upgrade_failed | length > 0

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_1_minor/roles

@@ -1 +0,0 @@
-../../../../../roles