Jelajahi Sumber

Merge pull request #1718 from brenton/docker1

Containerized installs on RHEL were downgrading docker unnecessarily
Brenton Leanhardt 9 tahun lalu
induk
melakukan
7967d12771

+ 5 - 2
filter_plugins/oo_filters.py

@@ -791,7 +791,7 @@ class FilterModule(object):
         return retval
         return retval
 
 
     @staticmethod
     @staticmethod
-    def oo_image_tag_to_rpm_version(version):
+    def oo_image_tag_to_rpm_version(version, include_dash=False):
         """ Convert an image tag string to an RPM version if necessary
         """ Convert an image tag string to an RPM version if necessary
             Empty strings and strings that are already in rpm version format
             Empty strings and strings that are already in rpm version format
             are ignored.
             are ignored.
@@ -802,7 +802,10 @@ class FilterModule(object):
             raise errors.AnsibleFilterError("|failed expects a string or unicode")
             raise errors.AnsibleFilterError("|failed expects a string or unicode")
 
 
         if version.startswith("v"):
         if version.startswith("v"):
-            version = "-" + version.replace("v", "")
+            version = version.replace("v", "")
+
+            if include_dash:
+                version = "-" + version
 
 
         return version
         return version
 
 

+ 5 - 5
playbooks/common/openshift-cluster/upgrades/files/openshift_container_versions.sh

@@ -3,19 +3,19 @@
 # Here we don't really care if this is a master, api, controller or node image.
 # Here we don't really care if this is a master, api, controller or node image.
 # We just need to know the version of one of them.
 # We just need to know the version of one of them.
 unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1)
 unit_file=$(ls /etc/systemd/system/${1}*.service | head -n1)
-installed_container_name=$(basename -s .service ${unit_file})
-installed=$(docker exec ${installed_container_name} openshift version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
 
 
 if [ ${1} == "origin" ]; then
 if [ ${1} == "origin" ]; then
     image_name="openshift/origin"
     image_name="openshift/origin"
 elif grep aep $unit_file 2>&1 > /dev/null; then
 elif grep aep $unit_file 2>&1 > /dev/null; then
-    image_name="aep3/aep"
+    image_name="aep3/node"
 elif grep openshift3 $unit_file 2>&1 > /dev/null; then
 elif grep openshift3 $unit_file 2>&1 > /dev/null; then
-    image_name="openshift3/ose"
+    image_name="openshift3/node"
 fi
 fi
 
 
+installed=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+
 docker pull ${image_name} 2>&1 > /dev/null
 docker pull ${image_name} 2>&1 > /dev/null
-available=$(docker run --rm ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
+available=$(docker run --rm --entrypoint=/bin/openshift ${image_name} version 2> /dev/null | grep openshift | awk '{ print $2 }' | cut -f1 -d"-" | tr -d 'v')
 
 
 echo "---"
 echo "---"
 echo "curr_version: ${installed}"
 echo "curr_version: ${installed}"

+ 15 - 7
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml

@@ -38,7 +38,7 @@
       msg: >
       msg: >
         This upgrade does not support Pacemaker:
         This upgrade does not support Pacemaker:
         https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
         https://docs.openshift.{{ g_pacemaker_upgrade_url_segment }}/install_config/upgrading/pacemaker_to_native_ha.html
-    when: openshift.master.cluster_method == 'pacemaker'
+    when: openshift.master.cluster_method is defined and openshift.master.cluster_method == 'pacemaker'
 
 
   - fail:
   - fail:
       msg: >
       msg: >
@@ -57,6 +57,11 @@
   roles:
   roles:
   - openshift_facts
   - openshift_facts
   tasks:
   tasks:
+  - openshift_facts:
+      role: master
+      local_facts:
+        ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+
   - name: Ensure Master is running
   - name: Ensure Master is running
     service:
     service:
       name: "{{ openshift.common.service_type }}-master"
       name: "{{ openshift.common.service_type }}-master"
@@ -77,11 +82,6 @@
       state: started
       state: started
       enabled: yes
       enabled: yes
     when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
     when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool
-  post_tasks:
-  - openshift_facts:
-      role: master
-      local_facts:
-        ha: "{{ groups.oo_masters_to_config | length > 1 }}"
 
 
 - name: Verify upgrade can proceed
 - name: Verify upgrade can proceed
   hosts: oo_nodes_to_config
   hosts: oo_nodes_to_config
@@ -105,8 +105,12 @@
   - include: ../../../../../roles/openshift_master/handlers/main.yml
   - include: ../../../../../roles/openshift_master/handlers/main.yml
   - include: ../../../../../roles/openshift_node/handlers/main.yml
   - include: ../../../../../roles/openshift_node/handlers/main.yml
   roles:
   roles:
+  # We want the cli role to evaluate so that the containerized oc/oadm wrappers
+  # are modified to use the correct image tag.  However, this can trigger a
+  # docker restart if new configuration is laid down which would immediately
+  # pull the latest image and defeat the purpose of these tasks.
   - openshift_cli
   - openshift_cli
-  tasks:
+  pre_tasks:
   - name: Clean package cache
   - name: Clean package cache
     command: "{{ ansible_pkg_mgr }} clean all"
     command: "{{ ansible_pkg_mgr }} clean all"
     when: not openshift.common.is_atomic | bool
     when: not openshift.common.is_atomic | bool
@@ -147,6 +151,10 @@
 
 
   - fail:
   - fail:
       msg: Verifying the correct version was found
       msg: Verifying the correct version was found
+    when: g_aos_versions.curr_version == ""
+
+  - fail:
+      msg: Verifying the correct version was found
     when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
     when: verify_upgrade_version is defined and g_new_version != verify_upgrade_version
 
 
   - include_vars: ../../../../../roles/openshift_master/vars/main.yml
   - include_vars: ../../../../../roles/openshift_master/vars/main.yml

+ 14 - 29
playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml

@@ -1,33 +1,4 @@
 ---
 ---
-# This is a workaround for authenticated registries
-- name: Download new images
-  hosts: oo_nodes_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  - name: Pull Images
-    command: >
-      docker pull {{ item }}:v{{ g_new_version }}
-    with_items:
-    - "{{ openshift.node.node_image }}"
-    - "{{ openshift.node.ovs_image }}"
-    - "{{ openshift.common.pod_image }}"
-    - "{{ openshift.common.router_image }}"
-    - "{{ openshift.common.registry_image }}"
-    - "{{ openshift.common.deployer_image }}"
-
-# This is a workaround for authenticated registries
-- name: Download new images
-  hosts: oo_masters_to_config
-  roles:
-  - openshift_facts
-  tasks:
-  - name: Pull Images
-    command: >
-      docker pull {{ item }}:v{{ g_new_version }}
-    with_items:
-    - "{{ openshift.master.master_image }}"
-
 ###############################################################################
 ###############################################################################
 # The restart playbook should be run after this playbook completes.
 # The restart playbook should be run after this playbook completes.
 ###############################################################################
 ###############################################################################
@@ -40,6 +11,20 @@
   - include: docker_upgrade.yml
   - include: docker_upgrade.yml
     when: not openshift.common.is_atomic | bool
     when: not openshift.common.is_atomic | bool
 
 
+# The cli image is used by openshift_facts to determine the currently installed
+# version.  We need to explicitly pull the latest image to handle cases where
+# the locally cached 'latest' tag is older the g_new_version.
+- name: Download cli image
+  hosts: oo_masters_to_config:oo_nodes_to_config
+  roles:
+  - openshift_facts
+  tasks:
+  - name: Pull Images
+    command: >
+      docker pull {{ item }}:latest
+    with_items:
+    - "{{ openshift.common.cli_image }}"
+
 ###############################################################################
 ###############################################################################
 # Upgrade Masters
 # Upgrade Masters
 ###############################################################################
 ###############################################################################

+ 14 - 9
roles/docker/tasks/main.yml

@@ -7,22 +7,27 @@
   register: docker_version_result
   register: docker_version_result
   changed_when: false
   changed_when: false
 
 
+- stat: path=/etc/sysconfig/docker-storage
+  register: docker_storage_check
+
+- name: Remove deferred deletion for downgrades from 1.9
+  command: >
+    sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage
+  when: docker_storage_check.stat.exists | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<')
+
 - name: Downgrade docker if necessary
 - name: Downgrade docker if necessary
   command: "{{ ansible_pkg_mgr }} downgrade -y docker-{{ docker_version }}"
   command: "{{ ansible_pkg_mgr }} downgrade -y docker-{{ docker_version }}"
   register: docker_downgrade_result
   register: docker_downgrade_result
   when: not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'gt')
   when: not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'gt')
 
 
 - name: Install docker
 - name: Install docker
-  action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version != '' else '' }} state=present"
-  when: not openshift.common.is_atomic | bool and not docker_downgrade_result | changed
+  action: "{{ ansible_pkg_mgr }} name=docker{{ '-' + docker_version if docker_version is defined and docker_version != '' else '' }} state=present"
+  when: not openshift.common.is_atomic | bool and not docker_version_result | skipped and docker_version_result.stdout | default('0.0', True) | version_compare(docker_version, 'lt')
 
 
-- stat: path=/etc/sysconfig/docker-storage
-  register: docker_storage_check
-
-- name: Remove deferred deletion for downgrades from 1.9
-  command: >
-    sed -i 's/--storage-opt dm.use_deferred_deletion=true//' /etc/sysconfig/docker-storage
-  when: docker_downgrade_result | changed and docker_storage_check.stat.exists | bool and docker_version_result.stdout | default('0.0', True) | version_compare('1.9', '>=') and docker_version | version_compare('1.9', '<')
+# We're getting ready to start docker.  This is a workaround for cases where it
+# seems a package install/upgrade/downgrade has rebooted docker and crashed it.
+- name: Reset docker service state
+  command: systemctl reset-failed docker.service
 
 
 - name: enable and start the docker service
 - name: enable and start the docker service
   service:
   service:

+ 1 - 0
roles/etcd/templates/etcd.docker.service

@@ -11,6 +11,7 @@ ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v /var/lib/etcd:/v
 ExecStop=/usr/bin/docker stop {{ etcd_service }}
 ExecStop=/usr/bin/docker stop {{ etcd_service }}
 SyslogIdentifier=etcd_container
 SyslogIdentifier=etcd_container
 Restart=always
 Restart=always
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=docker.service
 WantedBy=docker.service

+ 1 - 1
roles/openshift_common/tasks/main.yml

@@ -33,7 +33,7 @@
 # versions or image tags.  openshift_common's usage requires that it be a RPM
 # versions or image tags.  openshift_common's usage requires that it be a RPM
 # version and openshift_cli expects it to be an image tag.
 # version and openshift_cli expects it to be an image tag.
 - name: Install the base package for versioning
 - name: Install the base package for versioning
-  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version }} state=present"
+  action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
   when: not openshift.common.is_containerized | bool
   when: not openshift.common.is_containerized | bool
 
 
 - name: Set version facts
 - name: Set version facts

+ 17 - 3
roles/openshift_docker_facts/tasks/main.yml

@@ -46,10 +46,24 @@
   register: common_version
   register: common_version
   failed_when: false
   failed_when: false
   changed_when: false
   changed_when: false
-  when: not openshift.common.is_atomic | bool
+  when: not openshift.common.is_containerized | bool
+
+- set_fact:
+    l_common_version: "{{ openshift.common.image_tag | default('0.0', True) | oo_image_tag_to_rpm_version }}"
+  when: openshift.common.is_containerized | bool
+
+- set_fact:
+    l_common_version: "{{ common_version.stdout | default('0.0', True) }}"
+  when: not openshift.common.is_containerized | bool
 
 
 - name: Set docker version to be installed
 - name: Set docker version to be installed
   set_fact:
   set_fact:
     docker_version: "{{ '1.8.2' }}"
     docker_version: "{{ '1.8.2' }}"
-  when: " ( common_version.stdout | default('0.0', True) | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or
-          ( common_version.stdout | default('0.0', True) | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )"
+  when: " ( l_common_version | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or
+          ( l_common_version | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )"
+
+- name: Set docker version to be installed
+  set_fact:
+    docker_version: "{{ '1.9.1' }}"
+  when: " ( l_common_version | version_compare('3.2','>') and openshift.common.service_type == 'atomic-openshift' ) or
+          ( l_common_version | version_compare('1.2','>') and openshift.common.service_type == 'origin' )"

+ 11 - 8
roles/openshift_facts/library/openshift_facts.py

@@ -1080,13 +1080,9 @@ def get_openshift_version(facts, cli_image=None):
         elif 'node' in facts:
         elif 'node' in facts:
             container = facts['common']['service_type'] + '-node'
             container = facts['common']['service_type'] + '-node'
 
 
-        if container is not None:
-            exit_code, output, _ = module.run_command(['docker', 'exec', container, 'openshift', 'version'])
-            # if for some reason the container is installed but not running
-            # we'll fall back to using docker run later in this method.
-            if exit_code == 0:
-                version = parse_openshift_version(output)
-
+	# Try to get the version fromthe available cli image _before_ resorting
+	# to exec'ing in to the running container.  This is to be more fault
+	# tolerant in environments where the container is not running.
         if version is None and cli_image is not None:
         if version is None and cli_image is not None:
             # Assume we haven't installed the environment yet and we need
             # Assume we haven't installed the environment yet and we need
             # to query the latest image, but only if docker is installed
             # to query the latest image, but only if docker is installed
@@ -1094,6 +1090,13 @@ def get_openshift_version(facts, cli_image=None):
                 exit_code, output, _ = module.run_command(['docker', 'run', '--rm', cli_image, 'version'])
                 exit_code, output, _ = module.run_command(['docker', 'run', '--rm', cli_image, 'version'])
                 version = parse_openshift_version(output)
                 version = parse_openshift_version(output)
 
 
+        if version is None and container is not None:
+            exit_code, output, _ = module.run_command(['docker', 'exec', container, 'openshift', 'version'])
+            # if for some reason the container is installed but not running
+            # we'll fall back to using docker run later in this method.
+            if exit_code == 0:
+                version = parse_openshift_version(output)
+
     return version
     return version
 
 
 def parse_openshift_version(output):
 def parse_openshift_version(output):
@@ -1351,7 +1354,7 @@ def set_container_facts_if_unset(facts):
         facts['common']['admin_binary'] = '/usr/local/bin/oadm'
         facts['common']['admin_binary'] = '/usr/local/bin/oadm'
         facts['common']['client_binary'] = '/usr/local/bin/oc'
         facts['common']['client_binary'] = '/usr/local/bin/oc'
         openshift_version = get_openshift_version(facts, cli_image)
         openshift_version = get_openshift_version(facts, cli_image)
-        if openshift_version is not None:
+        if openshift_version is not None and openshift_version is not "":
             base_version = openshift_version.split('-')[0]
             base_version = openshift_version.split('-')[0]
             facts['common']['image_tag'] = "v" + base_version
             facts['common']['image_tag'] = "v" + base_version
 
 

+ 3 - 3
roles/openshift_master/handlers/main.yml

@@ -1,17 +1,17 @@
 ---
 ---
 - name: restart master
 - name: restart master
   service: name={{ openshift.common.service_type }}-master state=restarted
   service: name={{ openshift.common.service_type }}-master state=restarted
-  when: (openshift.master.ha is defined and not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
+  when: (openshift.master.ha is not defined or not openshift.master.ha | bool) and (not (master_service_status_changed | default(false) | bool))
   notify: Verify API Server
   notify: Verify API Server
 
 
 - name: restart master api
 - name: restart master api
   service: name={{ openshift.common.service_type }}-master-api state=restarted
   service: name={{ openshift.common.service_type }}-master-api state=restarted
-  when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
   notify: Verify API Server
   notify: Verify API Server
 
 
 - name: restart master controllers
 - name: restart master controllers
   service: name={{ openshift.common.service_type }}-master-controllers state=restarted
   service: name={{ openshift.common.service_type }}-master-controllers state=restarted
-  when: (openshift.master.ha is defined and openshift_master_ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  when: (openshift.master.ha is defined and openshift.master.ha | bool) and (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
 
 
 - name: Verify API Server
 - name: Verify API Server
   # Using curl here since the uri module requires python-httplib2 and
   # Using curl here since the uri module requires python-httplib2 and

+ 1 - 0
roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2

@@ -20,6 +20,7 @@ LimitCORE=infinity
 WorkingDirectory={{ openshift.common.data_dir }}
 WorkingDirectory={{ openshift.common.data_dir }}
 SyslogIdentifier={{ openshift.common.service_type }}-master-api
 SyslogIdentifier={{ openshift.common.service_type }}-master-api
 Restart=always
 Restart=always
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=docker.service
 WantedBy=docker.service

+ 1 - 0
roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2

@@ -19,6 +19,7 @@ LimitCORE=infinity
 WorkingDirectory={{ openshift.common.data_dir }}
 WorkingDirectory={{ openshift.common.data_dir }}
 SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
 SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
 Restart=on-failure
 Restart=on-failure
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=docker.service
 WantedBy=docker.service

+ 1 - 0
roles/openshift_master/templates/docker/master.docker.service.j2

@@ -12,6 +12,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.c
 ExecStartPost=/usr/bin/sleep 10
 ExecStartPost=/usr/bin/sleep 10
 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
 Restart=always
 Restart=always
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=docker.service
 WantedBy=docker.service

+ 1 - 0
roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.service.j2

@@ -15,6 +15,7 @@ LimitNOFILE=131072
 LimitCORE=infinity
 LimitCORE=infinity
 WorkingDirectory={{ openshift.common.data_dir }}
 WorkingDirectory={{ openshift.common.data_dir }}
 SyslogIdentifier=atomic-openshift-master-api
 SyslogIdentifier=atomic-openshift-master-api
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=multi-user.target
 WantedBy=multi-user.target

+ 1 - 0
roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.service.j2

@@ -20,6 +20,7 @@ LimitCORE=infinity
 WorkingDirectory={{ openshift.common.data_dir }}
 WorkingDirectory={{ openshift.common.data_dir }}
 SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
 SyslogIdentifier={{ openshift.common.service_type }}-master-controllers
 Restart=on-failure
 Restart=on-failure
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=multi-user.target
 WantedBy=multi-user.target

+ 1 - 0
roles/openshift_node/templates/openshift.docker.node.service

@@ -17,6 +17,7 @@ ExecStartPost=/usr/bin/sleep 10
 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
 ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
 SyslogIdentifier={{ openshift.common.service_type }}-node
 SyslogIdentifier={{ openshift.common.service_type }}-node
 Restart=always
 Restart=always
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=docker.service
 WantedBy=docker.service

+ 1 - 0
roles/openshift_node/templates/openvswitch.docker.service

@@ -11,6 +11,7 @@ ExecStartPost=/usr/bin/sleep 5
 ExecStop=/usr/bin/docker stop openvswitch
 ExecStop=/usr/bin/docker stop openvswitch
 SyslogIdentifier=openvswitch
 SyslogIdentifier=openvswitch
 Restart=always
 Restart=always
+RestartSec=5s
 
 
 [Install]
 [Install]
 WantedBy=docker.service
 WantedBy=docker.service