Przeglądaj źródła

Containerized installs on RHEL were downgrading docker unnecessarily

Brenton Leanhardt 9 lat temu
rodzic
commit
477f04a7a9

+ 5 - 2
filter_plugins/oo_filters.py

@@ -791,7 +791,7 @@ class FilterModule(object):
         return retval
 
     @staticmethod
-    def oo_image_tag_to_rpm_version(version):
+    def oo_image_tag_to_rpm_version(version, include_dash=False):
         """ Convert an image tag string to an RPM version if necessary
             Empty strings and strings that are already in rpm version format
             are ignored.
@@ -802,7 +802,10 @@ class FilterModule(object):
             raise errors.AnsibleFilterError("|failed expects a string or unicode")
 
         if version.startswith("v"):
-            version = "-" + version.replace("v", "")
+            version = version.replace("v", "")
+
+            if include_dash:
+                version = "-" + version
 
         return version
 

+ 5 - 5
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml

@@ -57,6 +57,11 @@
   roles:
   - openshift_facts
   tasks:
+  - openshift_facts:
+      role: master
+      local_facts:
+        ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
   - name: Ensure Master is running
     service:
       name: "{{ openshift.common.service_type }}-master"
@@ -77,11 +82,6 @@
       state: started
       enabled: yes
     when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-  post_tasks:
-  - openshift_facts:
-      role: master
-      local_facts:
-        ha: "{{ groups.oo_masters_to_config | length > 1 }}"
 
 - name: Verify upgrade can proceed
   hosts: oo_nodes_to_config

+ 14 - 12
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml

@@ -1,5 +1,19 @@
 ---
+###############################################################################
+# The restart playbook should be run after this playbook completes.
+###############################################################################
+
+- name: Upgrade docker
+  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  - include: docker_upgrade.yml
+    when: not openshift.common.is_atomic | bool
+
 # This is a workaround for authenticated registries
+# This has to happen after the docker upgrade due to a bug in docker 1.8.2's
+# --add-registry implementation
 - name: Download new images
   hosts: oo_nodes_to_config
   roles:
@@ -29,18 +43,6 @@
     - "{{ openshift.master.master_image }}"
 
 ###############################################################################
-# The restart playbook should be run after this playbook completes.
-###############################################################################
-
-- name: Upgrade docker
-  hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  - include: docker_upgrade.yml
-    when: not openshift.common.is_atomic | bool
-
-###############################################################################
 # Upgrade Masters
 ###############################################################################
 - name: Upgrade master

+ 14 - 9
roles/docker/tasks/main.yml

@@ -7,22 +7,27 @@
   register: docker_version_result
   changed_when: false
 
+- stat: path=/etc/sysconfig/docker-storage
+  register: docker_storage_check
+
+- name: Remove deferred deletion for downgrades from 1.9
+  command: >
+    sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage
+  when: docker_storage_check.stat.exists | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<')
+
 - name: Downgrade docker if necessary
   command: "{{ ansible_pkg_mgr }} downgrade -y docker-{{ docker_version }}"
   register: docker_downgrade_result
   when: not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'gt')
 
 - name: Install docker
-  action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version != '' else '' }} state=present"
-  when: not openshift.common.is_atomic | bool and not docker_downgrade_result | changed
+  action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present"
+  when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt')
 
-- stat: path=/etc/sysconfig/docker-storage
-  register: docker_storage_check
-
-- name: Remove deferred deletion for downgrades from 1.9
-  command: >
-    sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage
-  when: docker_downgrade_result | changed and docker_storage_check.stat.exists | bool and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<')
+# We're getting ready to start docker.  This is a workaround for cases where it
+# seems a package install/upgrade/downgrade has rebooted docker and crashed it.
+- name: Reset docker service state
+  command: systemctl reset-failed docker.service
 
 - name: enable and start the docker service
   service:

+ 1 - 1
roles/openshift_common/tasks/main.yml

@@ -33,7 +33,7 @@
 # versions or image tags.  openshift_common's usage requires that it be a RPM
 # version and openshift_cli expects it to be an image tag.
 - name: Install the base package for versioning
-  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version }} state=present"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
   when: not openshift.common.is_containerized | bool
 
 - name: Set version facts

+ 17 - 3
roles/openshift_docker_facts/tasks/main.yml

@@ -46,10 +46,24 @@
   register: common_version
   failed_when: false
   changed_when: false
-  when: not openshift.common.is_atomic | bool
+  when: not openshift.common.is_containerized | bool
+
+- set_fact:
+    l_common_version: "{{ openshift.common.image_tag | default('0.0', True) | oo_image_tag_to_rpm_version }}"
+  when: openshift.common.is_containerized | bool
+
+- set_fact:
+    l_common_version: "{{ common_version.stdout | default('0.0', True) }}"
+  when: not openshift.common.is_containerized | bool
 
 - name: Set docker version to be installed
   set_fact:
     docker_version: "{{ '1.8.2' }}"
-  when: " ( common_version.stdout | default('0.0', True) | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or
-          ( common_version.stdout | default('0.0', True) | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )"
+  when: " ( l_common_version | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or
+          ( l_common_version | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )"
+
+- name: Set docker version to be installed
+  set_fact:
+    docker_version: "{{ '1.9.1' }}"
+  when: " ( l_common_version | version_compare('3.2','>') and openshift.common.service_type == 'atomic-openshift' ) or
+          ( l_common_version | version_compare('1.2','>') and openshift.common.service_type == 'origin' )"

+ 2 - 2
roles/openshift_master/handlers/main.yml

@@ -6,12 +6,12 @@
 
 - name: restart master api
   service: name={{ openshift.common.service_type }}-master-api state=restarted
-  when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
   notify: Verify API Server
 
 - name: restart master controllers
   service: name={{ openshift.common.service_type }}-master-controllers state=restarted
-  when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
 
 - name: Verify API Server
   # Using curl here since the uri module requires python-httplib2 and