Browse Source

Run storage upgrade pre and post master upgrade

Russell Teague 7 years ago
parent
commit
ff1d1ee8d6

+ 18 - 0
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -3,6 +3,16 @@
 # Upgrade Masters
 ###############################################################################
 
+# oc adm migrate storage should be run prior to etcd v3 upgrade
+# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060
+- name: Pre master upgrade - Upgrade job storage
+  hosts: oo_first_master
+  tasks:
+  - name: Upgrade job storage
+    command: >
+      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      migrate storage --confirm
+
 # If facts cache were for some reason deleted, this fact may not be set, and if not set
 # it will always default to true. This causes problems for the etcd data dir fact detection
 # so we must first make sure this is set correctly before attempting the backup.
@@ -133,6 +143,14 @@
   - set_fact:
       master_update_complete: True
 
+- name: Post master upgrade - Upgrade job storage
+  hosts: oo_first_master
+  tasks:
+  - name: Upgrade job storage
+    command: >
+      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      migrate storage --confirm
+
 ##############################################################################
 # Gate on master update complete
 ##############################################################################

+ 0 - 18
playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml

@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
-  hosts: oo_first_master
-  roles:
-  - { role: openshift_cli }
-  vars:
-    # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
-    # restart.
-    skip_docker_role: True
-  tasks:
-  - name: Upgrade job storage
-    command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-      migrate storage --include=jobs --confirm
-    run_once: true

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml

@@ -115,5 +115,3 @@
 - include: ../upgrade_nodes.yml
 
 - include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml

@@ -119,5 +119,3 @@
     master_config_hook: "v3_5/master_config_upgrade.yml"
 
 - include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml

+ 0 - 18
playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml

@@ -1,18 +0,0 @@
----
-###############################################################################
-# Post upgrade - Upgrade job storage
-###############################################################################
-- name: Upgrade job storage
-  hosts: oo_first_master
-  roles:
-  - { role: openshift_cli }
-  vars:
-    # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
-    # restart.
-    skip_docker_role: True
-  tasks:
-  - name: Upgrade job storage
-    command: >
-      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-      migrate storage --include=jobs --confirm
-    run_once: true

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml

@@ -115,5 +115,3 @@
 - include: ../upgrade_nodes.yml
 
 - include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml

@@ -119,5 +119,3 @@
     master_config_hook: "v3_6/master_config_upgrade.yml"
 
 - include: ../post_control_plane.yml
-
-- include: storage_upgrade.yml