Browse Source

Use consistent image references and split out node sync

All roles that include images should at least be consistent by using the
same basic pattern.

Split the node sync job out of openshift_sdn into openshift_node_group.
Clayton Coleman 7 năm trước cách đây
mục cha
commit
0ba7d01c3b
29 tập tin đã thay đổi với 374 bổ sung203 xóa
  1. 4 0
      playbooks/openshift-master/private/config.yml
  2. 4 7
      roles/lib_utils/library/openshift_container_binary_sync.py
  3. 13 0
      roles/openshift_bootstrap_autoapprover/defaults/main.yaml
  4. 6 1
      roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml
  5. 10 0
      roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-images.yaml
  6. 23 15
      roles/openshift_bootstrap_autoapprover/tasks/main.yml
  7. 15 1
      roles/openshift_cli/defaults/main.yml
  8. 3 5
      roles/openshift_cli/tasks/main.yml
  9. 2 2
      roles/openshift_control_plane/defaults/main.yml
  10. 1 1
      roles/openshift_control_plane/templates/master.yaml.v1.j2
  11. 2 2
      roles/openshift_examples/defaults/main.yml
  12. 2 2
      roles/openshift_hosted/defaults/main.yml
  13. 2 2
      roles/openshift_hosted_templates/defaults/main.yml
  14. 23 24
      roles/openshift_node/defaults/main.yml
  15. 0 1
      roles/openshift_node/tasks/bootstrap.yml
  16. 6 6
      roles/openshift_node/tasks/node_system_container.yml
  17. 21 10
      roles/openshift_node_group/defaults/main.yml
  18. 10 0
      roles/openshift_node_group/files/sync-images.yaml
  19. 8 0
      roles/openshift_node_group/files/sync-policy.yaml
  20. 135 0
      roles/openshift_node_group/files/sync.yaml
  21. 48 0
      roles/openshift_node_group/tasks/sync.yml
  22. 12 0
      roles/openshift_node_group/templates/node-config.yaml.j2
  23. 2 2
      roles/openshift_sdn/defaults/main.yml
  24. 2 2
      roles/openshift_sdn/files/sdn-images.yaml
  25. 1 1
      roles/openshift_sdn/files/sdn-ovs.yaml
  26. 1 93
      roles/openshift_sdn/files/sdn.yaml
  27. 11 13
      roles/openshift_sdn/tasks/main.yml
  28. 6 12
      roles/openshift_web_console/defaults/main.yml
  29. 1 1
      roles/openshift_web_console/tasks/install.yml

+ 4 - 0
playbooks/openshift-master/private/config.yml

@@ -221,6 +221,10 @@
   - role: calico_master
     when: openshift_use_calico | default(false) | bool
   tasks:
+  - name: Set up automatic node config reconcilation
+    import_role:
+      name: openshift_node_group
+      tasks_from: sync
   - import_role:
       name: kuryr
       tasks_from: master

+ 4 - 7
roles/lib_utils/library/openshift_container_binary_sync.py

@@ -31,13 +31,12 @@ class BinarySyncer(object):
     a container onto the host system.
     """
 
-    def __init__(self, module, image, tag, backend):
+    def __init__(self, module, image, backend):
         self.module = module
         self.changed = False
         self.output = []
         self.bin_dir = '/usr/local/bin'
         self._image = image
-        self.tag = tag
         self.backend = backend
         self.temp_dir = None  # TBD
 
@@ -51,7 +50,7 @@ class BinarySyncer(object):
         self.temp_dir = tempfile.mkdtemp()
         temp_dir_mount = tempfile.mkdtemp()
         try:
-            image_spec = '%s:%s' % (self.image, self.tag)
+            image_spec = self.image
             rc, stdout, stderr = self.module.run_command(['atomic', 'mount',
                                                           '--storage', "ostree",
                                                           image_spec, temp_dir_mount])
@@ -71,7 +70,7 @@ class BinarySyncer(object):
     def _sync_docker(self):
         container_name = "openshift-cli-%s" % random.randint(1, 100000)
         rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',
-                                                      container_name, '%s:%s' % (self.image, self.tag)])
+                                                      container_name, self.image])
         if rc:
             raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" %
                                   (stdout, stderr))
@@ -177,20 +176,18 @@ def main():
     module = AnsibleModule(  # noqa: F405
         argument_spec=dict(
             image=dict(required=True),
-            tag=dict(required=True),
             backend=dict(required=True),
         ),
         supports_check_mode=True
     )
 
     image = module.params['image']
-    tag = module.params['tag']
     backend = module.params['backend']
 
     if backend not in ["docker", "atomic"]:
         module.fail_json(msg="unknown backend")
 
-    binary_syncer = BinarySyncer(module, image, tag, backend)
+    binary_syncer = BinarySyncer(module, image, backend)
 
     try:
         binary_syncer.sync()

+ 13 - 0
roles/openshift_bootstrap_autoapprover/defaults/main.yaml

@@ -0,0 +1,13 @@
+---
+l_openshift_master_images_dict:
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
+l_osm_registry_url_default: "{{ l_openshift_master_images_dict[openshift_deployment_type] }}"
+l_os_registry_url: "{{ oreg_url | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
+
+l_openshift_master_prefix_dict:
+  origin: 'origin-${component}'
+  openshift-enterprise: 'ose-${component}'
+l_os_prefix: "{{ l_openshift_master_prefix_dict[openshift_deployment_type] }}"
+# TODO: we should publish oreg_url component=node
+osn_image: "{{ l_os_registry_url | regex_replace(l_os_prefix | regex_escape, 'node') }}"

+ 6 - 1
roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-controller.yaml

@@ -3,6 +3,9 @@ apiVersion: apps/v1beta1
 metadata:
   name: bootstrap-autoapprover
   namespace: openshift-infra
+  annotations:
+    image.openshift.io/triggers: |
+      [{"from":{"kind":"ImageStreamTag","name":"node:v3.10"},"fieldPath":"spec.template.spec.containers[?(@.name==\"signer\")].image"}]
 spec:
   updateStrategy:
     type: RollingUpdate
@@ -11,11 +14,13 @@ spec:
       labels:
         app: bootstrap-autoapprover
     spec:
+      nodeSelector:
+        node-role.kubernetes.io/master: 'true'
       serviceAccountName: bootstrap-autoapprover
       terminationGracePeriodSeconds: 1
       containers:
       - name: signer
-        image: openshift/node:v3.7.0-rc.0
+        image: " "
         command:
         - /bin/bash
         - -c

+ 10 - 0
roles/openshift_bootstrap_autoapprover/files/openshift-bootstrap-images.yaml

@@ -0,0 +1,10 @@
+apiVersion: image.openshift.io/v1
+kind: ImageStreamTag
+metadata:
+  name: node:v3.10
+  namespace: openshift-infra
+tag:
+  reference: true
+  from:
+    kind: DockerImage
+    name: openshift/node:v3.10.0

+ 23 - 15
roles/openshift_bootstrap_autoapprover/tasks/main.yml

@@ -1,28 +1,36 @@
 ---
-- name: Copy auto-approver config to host
-  run_once: true
+- name: Make temp directory for templates
+  command: mktemp -d /tmp/ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+
+- name: Copy templates to temp directory
   copy:
     src: "{{ item }}"
-    dest: /tmp/openshift-approver/
-    owner: root
-    mode: 0400
+    dest: "{{ mktemp.stdout }}/{{ item | basename }}"
   with_fileglob:
-    - "*.yaml"
+    - "files/*.yaml"
 
-- name: Set auto-approver nodeSelector
+- name: Update the image tag
   run_once: true
   yedit:
-    src: "/tmp/openshift-approver/openshift-bootstrap-controller.yaml"
-    key: spec.template.spec.nodeSelector
-    value: "{{ openshift_master_bootstrap_auto_approver_node_selector | default({}) }}"
-    value_type: list
+    src: "{{ mktemp.stdout }}/openshift-bootstrap-images.yaml"
+    key: 'tag.from.name'
+    value: "{{ osn_image }}"
+
+# TODO: temporary until we fix apply for image stream tags
+- name: Remove the image stream tag
+  shell: >
+    {{ openshift_client_binary }} delete -n openshift-infra istag node:v3.10 --ignore-not-found
 
-- name: Create auto-approver on cluster
+- name: Apply the config
   run_once: true
-  command: "{{ openshift_client_binary }} apply -f /tmp/openshift-approver/"
+  shell: >
+    {{ openshift_client_binary }} apply -f "{{ mktemp.stdout }}"
 
-- name: Remove auto-approver config
+- name: Remove temp directory
   run_once: true
   file:
-    path: /tmp/openshift-approver/
     state: absent
+    name: "{{ mktemp.stdout }}"
+  changed_when: False

+ 15 - 1
roles/openshift_cli/defaults/main.yml

@@ -2,9 +2,23 @@
 system_images_registry_dict:
   openshift-enterprise: "registry.access.redhat.com"
   origin: "docker.io"
-
 system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
 
+l_openshift_images_dict:
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
+l_osm_registry_url_default: "{{ l_openshift_images_dict[openshift_deployment_type] }}"
+l_os_registry_url: "{{ oreg_url | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
+
+l_openshift_prefix_dict:
+  origin: 'origin-${component}'
+  openshift-enterprise: 'ose-${component}'
+l_os_prefix: "{{ l_openshift_prefix_dict[openshift_deployment_type] }}"
+# TODO: we should publish oreg_url component=node
+openshift_image_default: "{{ l_os_registry_url | regex_replace(l_os_prefix | regex_escape, 'node') }}"
+openshift_cli_image: "{{ (system_images_registry == 'docker') | ternary(openshift_image_default, (openshift_image_default.split('/')|length==2) | ternary(system_images_registry + '/' + openshift_image_default, openshift_image_default)) }}"
+system_openshift_cli_image: "{{ (system_images_registry == 'docker') | ternary('docker:' + openshift_cli_image, openshift_cli_image) }}"
+
 openshift_use_crio_only: False
 openshift_crio_use_rpm: False
 

+ 3 - 5
roles/openshift_cli/tasks/main.yml

@@ -8,7 +8,7 @@
 - block:
   - name: Pull CLI Image
     command: >
-      docker pull {{ openshift_cli_image }}:{{ openshift_image_tag }}
+      docker pull {{ openshift_cli_image }}
     register: pull_result
     changed_when: "'Downloaded newer image' in pull_result.stdout"
 
@@ -16,7 +16,6 @@
   - name: Copy client binaries/symlinks out of CLI image for use on the host
     openshift_container_binary_sync:
       image: "{{ openshift_cli_image }}"
-      tag: "{{ openshift_image_tag }}"
       backend: "docker"
   when:
   - openshift_is_containerized | bool
@@ -25,15 +24,14 @@
 - block:
   - name: Pull CLI Image
     command: >
-      atomic pull --storage ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}:{{ openshift_image_tag }}
+      atomic pull --storage ostree {{ system_openshift_cli_image }}
     register: pull_result
     changed_when: "'Pulling layer' in pull_result.stdout"
 
   # openshift_container_binary_sync is a custom module in lib_utils
   - name: Copy client binaries/symlinks out of CLI image for use on the host
     openshift_container_binary_sync:
-      image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift_cli_image }}"
-      tag: "{{ openshift_image_tag }}"
+      image: "{{ openshift_cli_image }}"
       backend: "atomic"
   when:
   - openshift_is_containerized | bool

+ 2 - 2
roles/openshift_control_plane/defaults/main.yml

@@ -8,8 +8,8 @@ r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 
 l_openshift_images_dict:
-  origin: 'openshift/origin-${component}:${version}'
-  openshift-enterprise: 'openshift3/ose-${component}:${version}'
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
 l_osm_registry_url_default: "{{ l_openshift_images_dict[openshift_deployment_type] }}"
 l_os_registry_url: "{{ oreg_url_master | default(oreg_url) | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
 

+ 1 - 1
roles/openshift_control_plane/templates/master.yaml.v1.j2

@@ -86,7 +86,7 @@ etcdStorageConfig:
   openShiftStoragePrefix: openshift.io
   openShiftStorageVersion: v1
 imageConfig:
-  format: {{ l_osm_registry_url }}
+  format: {{ l_os_registry_url }}
   latest: {{ openshift_master_image_config_latest }}
 imagePolicyConfig:{{ openshift.master.image_policy_config | default({"internalRegistryHostname":"docker-registry.default.svc:5000"}) | lib_utils_to_padded_yaml(level=1) }}
 kubeletClientInfo:

+ 2 - 2
roles/openshift_examples/defaults/main.yml

@@ -28,6 +28,6 @@ openshift_examples_import_command: "create"
 registry_host: "{{ openshift_examples_registryurl.split('/')[0] if '.' in openshift_examples_registryurl.split('/')[0] else '' }}"
 
 openshift_hosted_images_dict:
-  origin: 'openshift/origin-${component}:${version}'
-  openshift-enterprise: 'openshift3/ose-${component}:${version}'
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
 openshift_examples_registryurl: "{{ oreg_url_master | default(oreg_url) | default(openshift_hosted_images_dict[openshift_deployment_type]) }}"

+ 2 - 2
roles/openshift_hosted/defaults/main.yml

@@ -22,8 +22,8 @@ openshift_master_config_dir: "{{ openshift.common.config_base | default(openshif
 openshift_cluster_domain: 'cluster.local'
 
 openshift_hosted_images_dict:
-  origin: 'openshift/origin-${component}:${version}'
-  openshift-enterprise: 'openshift3/ose-${component}:${version}'
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
 
 ##########
 # Router #

+ 2 - 2
roles/openshift_hosted_templates/defaults/main.yml

@@ -5,8 +5,8 @@ hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' el
 content_version: "{{ openshift_examples_content_version }}"
 
 openshift_hosted_images_dict:
-  origin: 'openshift/origin-${component}:${version}'
-  openshift-enterprise: 'openshift3/ose-${component}:${version}'
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
 
 openshift_hosted_templates_registryurl: "{{ oreg_url_master | default(oreg_url) | default(openshift_hosted_images_dict[openshift_deployment_type]) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
 registry_host: "{{ openshift_hosted_templates_registryurl.split('/')[0] if '.' in openshift_hosted_templates_registryurl.split('/')[0] else '' }}"

+ 23 - 24
roles/openshift_node/defaults/main.yml

@@ -10,6 +10,29 @@ openshift_node_proxy_mode: iptables
 openshift_set_node_ip: False
 openshift_config_base: '/etc/origin'
 
+openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
+
+system_images_registry_dict:
+  openshift-enterprise: "registry.access.redhat.com"
+  origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+
+l_openshift_images_dict:
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
+l_osm_registry_url_default: "{{ l_openshift_images_dict[openshift_deployment_type] }}"
+l_os_registry_url: "{{ oreg_url | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
+
+l_openshift_prefix_dict:
+  origin: 'origin-${component}'
+  openshift-enterprise: 'ose-${component}'
+l_os_prefix: "{{ l_openshift_prefix_dict[openshift_deployment_type] }}"
+# TODO: we should publish oreg_url component=node
+osn_image_default: "{{ l_os_registry_url | regex_replace(l_os_prefix | regex_escape, 'node') }}"
+osn_image: "{{ (system_images_registry == 'docker') | ternary(osn_image_default, (osn_image_default.split('/')|length==2) | ternary(system_images_registry + '/' + osn_image_default, osn_image_default)) }}"
+system_osn_image: "{{ (system_images_registry == 'docker') | ternary('docker:' + osn_image, osn_image) }}"
+
 openshift_oreg_url_default_dict:
   origin: "openshift/origin-${component}:${version}"
   openshift-enterprise: "openshift3/ose-${component}:${version}"
@@ -89,35 +112,11 @@ openshift_node_syscon_auth_mounts_l:
 # This should be in the same format as auth_mounts_l above.
 openshift_node_syscon_add_mounts_l: []
 
-
-openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
-
-l_openshift_images_dict:
-  origin: 'openshift/origin-${component}:${version}'
-  openshift-enterprise: 'openshift3/ose-${component}:${version}'
-l_osm_registry_url_default: "{{ l_openshift_images_dict[openshift_deployment_type] }}"
-l_os_registry_url: "{{ oreg_url | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
-
-l_openshift_prefix_dict:
-  origin: 'origin-${component}'
-  openshift-enterprise: 'ose-${component}'
-l_os_prefix: "{{ l_openshift_prefix_dict[openshift_deployment_type] }}"
-# TODO: we should publish oreg_url component=node
-osn_image: "{{ l_os_registry_url | regex_replace(l_os_prefix | regex_escape, 'node') }}"
-
 openshift_service_type_dict:
   origin: origin
   openshift-enterprise: atomic-openshift
 openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"
 
-system_images_registry_dict:
-  openshift-enterprise: "registry.access.redhat.com"
-  origin: "docker.io"
-
-system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
-
-openshift_image_tag: ''
-
 default_r_openshift_node_image_prep_packages:
 - "{{ openshift_service_type }}-node"
 - "{{ openshift_service_type }}-docker-excluder"

+ 0 - 1
roles/openshift_node/tasks/bootstrap.yml

@@ -33,7 +33,6 @@
   import_tasks: aws.yml
   when: not (openshift_node_use_instance_profiles | default(False))
 
-
 - name: "disable {{ openshift_service_type }}-node service"
   systemd:
     name: "{{ item }}"

+ 6 - 6
roles/openshift_node/tasks/node_system_container.yml

@@ -5,12 +5,6 @@
 # /etc/systemd/system/origin-node.service (origin) or
 # /etc/systemd/system/atomic-openshift-node.service (enterprise)
 
-- name: Pre-pull node system container image
-  command: >
-    atomic pull --storage=ostree {{ osn_image }}
-  register: pull_result
-  changed_when: "'Pulling layer' in pull_result.stdout"
-
 # TODO: remove when system container is fixed to not include it
 - name: Ensure old system path is set
   file:
@@ -18,6 +12,12 @@
     path: "/etc/origin/openvswitch"
     mode: '0750'
 
+- name: Pre-pull node system container image
+  command: >
+    atomic pull --storage=ostree {{ system_osn_image }}
+  register: pull_result
+  changed_when: "'Pulling layer' in pull_result.stdout"
+
 - name: Install or Update node system container
   oc_atomic_container:
     name: "{{ openshift_service_type }}-node"

+ 21 - 10
roles/openshift_node_group/defaults/main.yml

@@ -2,31 +2,42 @@
 openshift_node_groups:
 - name: node-config-master
   labels:
-  - 'type=master'
+  - 'node-role.kubernetes.io/master=true'
   edits: []
 - name: node-config-infra
   labels:
-  - 'type=infra'
+  - 'node-role.kubernetes.io/infra=true'
   edits: []
 - name: node-config-compute
   labels:
-  - 'type=compute'
+  - 'node-role.kubernetes.io/compute=true'
   edits: []
 
 openshift_node_group_edits: []
 openshift_node_group_namespace: openshift-node
 openshift_node_group_labels: []
 
-openshift_oreg_url_default_dict:
-  origin: "openshift/origin-${component}:${version}"
-  openshift-enterprise: openshift3/ose-${component}:${version}
-openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}"
-oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
+openshift_use_crio: False
+l_crio_use_new_var_sock: "{{ openshift_version | version_compare('3.9', '>=') }}"
+l_crio_var_sock: "{{ l_crio_use_new_var_sock | ternary('/var/run/crio/crio.sock', '/var/run/crio.sock') }}"
 
-openshift_imageconfig_format: "{{ oreg_url_node }}"
-openshift_node_group_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"
+openshift_node_group_cloud_provider: "{{ openshift_cloudprovider_kind | default(None) }}"
 openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
 openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}"
 openshift_node_group_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
 openshift_node_group_node_data_dir: "{{ openshift_node_group_node_data_dir_default }}"
 openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) | int }}"
+
+l_openshift_images_dict:
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
+l_osm_registry_url_default: "{{ l_openshift_images_dict[openshift_deployment_type] }}"
+l_os_registry_url: "{{ oreg_url | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
+openshift_imageconfig_format: "{{ l_os_registry_url }}"
+
+l_openshift_prefix_dict:
+  origin: 'origin-${component}'
+  openshift-enterprise: 'ose-${component}'
+l_os_prefix: "{{ l_openshift_prefix_dict[openshift_deployment_type] }}"
+# TODO: we should publish oreg_url component=node
+osn_image: "{{ l_os_registry_url | regex_replace(l_os_prefix | regex_escape, 'node') }}"

+ 10 - 0
roles/openshift_node_group/files/sync-images.yaml

@@ -0,0 +1,10 @@
+apiVersion: image.openshift.io/v1
+kind: ImageStreamTag
+metadata:
+  name: node:v3.10
+  namespace: openshift-node
+tag:
+  reference: true
+  from:
+    kind: DockerImage
+    name: openshift/node:v3.10.0

+ 8 - 0
roles/openshift_node_group/files/sync-policy.yaml

@@ -0,0 +1,8 @@
+kind: List
+apiVersion: v1
+items:
+- kind: ServiceAccount
+  apiVersion: v1
+  metadata:
+    name: sync
+    namespace: openshift-node

+ 135 - 0
roles/openshift_node_group/files/sync.yaml

@@ -0,0 +1,135 @@
+kind: DaemonSet
+apiVersion: apps/v1
+metadata:
+  name: sync
+  namespace: openshift-node
+  annotations:
+    kubernetes.io/description: |
+      This daemon set provides dynamic configuration of nodes and relabels nodes as appropriate.
+    image.openshift.io/triggers: |
+      [
+        {"from":{"kind":"ImageStreamTag","name":"node:v3.10"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sync\")].image"}
+      ]
+spec:
+  selector:
+    matchLabels:
+      app: sync
+  updateStrategy:
+    type: RollingUpdate
+  template:
+    metadata:
+      labels:
+        app: sync
+        component: network
+        type: infra
+        openshift.io/component: sync
+      annotations:
+        scheduler.alpha.kubernetes.io/critical-pod: ''
+    spec:
+      serviceAccountName: sync
+      terminationGracePeriodSeconds: 1
+      # Must be hostPID because it invokes operations on processes in the host space.
+      hostPID: true
+      # Must be hostNetwork in order to schedule before any network plugins are loaded.
+      hostNetwork: true
+      containers:
+
+      # The sync container is a temporary config loop until Kubelet dynamic config is implemented. It refreshes
+      # the contents of /etc/origin/node/ with the config map ${BOOTSTRAP_CONFIG_NAME} from the openshift-node
+      # namespace. It will restart the Kubelet on the host if it detects the node-config.yaml has changed.
+      #
+      # 1. Dynamic Kubelet config must pull down a full configmap
+      # 2. Nodes must relabel themselves https://github.com/kubernetes/kubernetes/issues/59314
+      #
+      - name: sync
+        image: " "
+        command:
+        - /bin/bash
+        - -c
+        - |
+          #!/bin/bash
+          set -euo pipefail
+
+          # loop until BOOTSTRAP_CONFIG_NAME is set
+          set -o allexport
+          while true; do
+            if [[ -f /etc/sysconfig/origin-node ]]; then
+              source /etc/sysconfig/origin-node
+              if [[ -z "${BOOTSTRAP_CONFIG_NAME-}" ]]; then
+                echo "info: Waiting for BOOTSTRAP_CONFIG_NAME to be set" 2>&1
+                sleep 15
+                continue
+              fi
+              break
+            fi
+          done
+
+          # track the current state of the config
+          if [[ -f /etc/origin/node/node-config.yaml ]]; then
+            md5sum /etc/origin/node/node-config.yaml > /tmp/.old
+          else
+            touch /tmp/.old
+          fi
+
+          # periodically refresh both node-config.yaml and relabel the node
+          while true; do
+            name=${BOOTSTRAP_CONFIG_NAME}
+            if ! oc extract --config=/etc/origin/node/node.kubeconfig "cm/${BOOTSTRAP_CONFIG_NAME}" -n openshift-node --to=/etc/origin/node --confirm; then
+              echo "error: Unable to retrieve latest config for node" 2>&1
+              sleep 15
+              continue
+            fi
+            # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet.
+            md5sum /etc/origin/node/node-config.yaml > /tmp/.new
+            if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then
+              echo "info: Configuration changed, restarting kubelet" 2>&1
+              # TODO: kubelet doesn't relabel nodes, best effort for now
+              # https://github.com/kubernetes/kubernetes/issues/59314
+              if args="$(openshift start node --write-flags --config /etc/origin/node/node-config.yaml)"; then
+                labels=' --node-labels=([^ ]+) '
+                if [[ ${args} =~ ${labels} ]]; then
+                  labels="${BASH_REMATCH[1]//,/ }"
+                  echo "info: Applying node labels $labels" 2>&1
+                  if ! oc label --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" ${labels} --overwrite; then
+                    echo "error: Unable to apply labels, will retry in 10" 2>&1
+                    sleep 10
+                    continue
+                  fi
+                fi
+              fi
+              if ! pgrep -U 0 -f 'hyperkube kubelet ' | xargs kill; then
+                echo "error: Unable to restart Kubelet" 2>&1
+              fi
+            fi
+            cp -f /tmp/.new /tmp/.old
+            sleep 180
+          done
+
+        env:
+        - name: NODE_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+        securityContext:
+          runAsUser: 0
+          privileged: true
+        volumeMounts:
+        # Directory which contains the host configuration. We read from this directory
+        - mountPath: /etc/origin/node/
+          name: host-config
+        - mountPath: /etc/sysconfig/origin-node
+          name: host-sysconfig-node
+          readOnly: true
+
+      volumes:
+      # In bootstrap mode, the host config contains information not easily available
+      # from other locations.
+      - name: host-config
+        hostPath:
+          path: /etc/origin/node
+      - name: host-sysconfig-node
+        hostPath:
+          path: /etc/sysconfig/origin-node
+      - name: host-modules
+        hostPath:
+          path: /lib/modules

+ 48 - 0
roles/openshift_node_group/tasks/sync.yml

@@ -0,0 +1,48 @@
+---
+- name: Ensure project exists
+  oc_project:
+    name: openshift-node
+    state: present
+    node_selector:
+      - ""
+
+- name: Make temp directory for templates
+  command: mktemp -d /tmp/ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+
+- name: Copy templates to temp directory
+  copy:
+    src: "{{ item }}"
+    dest: "{{ mktemp.stdout }}/{{ item | basename }}"
+  with_fileglob:
+    - "files/*.yaml"
+
+- name: Update the image tag
+  yedit:
+    src: "{{ mktemp.stdout }}/sync-images.yaml"
+    key: 'tag.from.name'
+    value: "{{ osn_image }}"
+
+- name: Ensure the service account can run privileged
+  oc_adm_policy_user:
+    namespace: "openshift-node"
+    resource_kind: scc
+    resource_name: privileged
+    state: present
+    user: "system:serviceaccount:openshift-node:sync"
+
+# TODO: temporary until we fix apply for image stream tags
+- name: Remove the image stream tag
+  shell: >
+    {{ openshift_client_binary }} delete -n openshift-node istag node:v3.10 --ignore-not-found
+
+- name: Apply the config
+  shell: >
+    {{ openshift_client_binary }} apply -f {{ mktemp.stdout }}
+
+- name: Remove temp directory
+  file:
+    state: absent
+    name: "{{ mktemp.stdout }}"
+  changed_when: False

+ 12 - 0
roles/openshift_node_group/templates/node-config.yaml.j2

@@ -20,6 +20,16 @@ imageConfig:
   latest: false
 iptablesSyncPeriod: 30s
 kubeletArguments:
+{% if openshift_use_crio | bool %}
+  container-runtime:
+  - remote
+  container-runtime-endpoint:
+  - {{ l_crio_var_sock }}
+  image-service-endpoint:
+  - {{ l_crio_var_sock }}
+  runtime-request-timeout:
+  - 10m
+{% endif %}
   pod-manifest-path:
   - /etc/origin/node/pods
   bootstrap-kubeconfig:
@@ -30,10 +40,12 @@ kubeletArguments:
   - "true"
   cert-dir:
   - /etc/origin/node/certificates
+{% if openshift_node_group_cloud_provider is defined %}
   cloud-config:
   - /etc/origin/cloudprovider/{{ openshift_node_group_cloud_provider }}.conf
   cloud-provider:
   - {{ openshift_node_group_cloud_provider }}
+{% endif %}
   node-labels: 
   - "{{ openshift_node_group_labels | join(',') }}"
   enable-controller-attach-detach:

+ 2 - 2
roles/openshift_sdn/defaults/main.yml

@@ -1,7 +1,7 @@
 ---
 l_openshift_images_dict:
-  origin: 'openshift/origin-${component}:${version}'
-  openshift-enterprise: 'openshift3/ose-${component}:${version}'
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
 l_osm_registry_url_default: "{{ l_openshift_images_dict[openshift_deployment_type] }}"
 l_os_registry_url: "{{ oreg_url | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
 

+ 2 - 2
roles/openshift_sdn/files/sdn-images.yaml

@@ -1,10 +1,10 @@
 apiVersion: image.openshift.io/v1
 kind: ImageStreamTag
 metadata:
-  name: node:v3.9
+  name: node:v3.10
   namespace: openshift-sdn
 tag:
   reference: true
   from:
     kind: DockerImage
-    name: openshift/node:v3.9
+    name: openshift/node:v3.10.0

+ 1 - 1
roles/openshift_sdn/files/sdn-ovs.yaml

@@ -7,7 +7,7 @@ metadata:
     kubernetes.io/description: |
       This daemon set launches the openvswitch daemon.
     image.openshift.io/triggers: |
-      [{"from":{"kind":"ImageStreamTag","name":"node:v3.9"},"fieldPath":"spec.template.spec.containers[?(@.name==\"openvswitch\")].image"}]
+      [{"from":{"kind":"ImageStreamTag","name":"node:v3.10"},"fieldPath":"spec.template.spec.containers[?(@.name==\"openvswitch\")].image"}]
 spec:
   selector:
     matchLabels:

+ 1 - 93
roles/openshift_sdn/files/sdn.yaml

@@ -9,8 +9,7 @@ metadata:
       It expects that OVS is running on the node.
     image.openshift.io/triggers: |
       [
-        {"from":{"kind":"ImageStreamTag","name":"node:v3.9"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sync\")].image"},
-        {"from":{"kind":"ImageStreamTag","name":"node:v3.9"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sdn\")].image"}
+        {"from":{"kind":"ImageStreamTag","name":"node:v3.10"},"fieldPath":"spec.template.spec.containers[?(@.name==\"sdn\")].image"}
       ]
 spec:
   selector:
@@ -32,98 +31,7 @@ spec:
       # as all pods.
       serviceAccountName: sdn
       hostNetwork: true
-      # Must be hostPID because it invokes operations on processes in the host space
-      hostPID: true
       containers:
-
-      # The sync container is a temporary config loop until Kubelet dynamic config is implemented. It refreshes
-      # the contents of /etc/origin/node/ with the config map ${BOOTSTRAP_CONFIG_NAME} from the openshift-node
-      # namespace. It will restart the Kubelet on the host if it detects the node-config.yaml has changed.
-      #
-      # 1. Dynamic Kubelet config must pull down a full configmap
-      # 2. Nodes must relabel themselves https://github.com/kubernetes/kubernetes/issues/59314
-      #
-      - name: sync
-        image: " "
-        command:
-        - /bin/bash
-        - -c
-        - |
-          #!/bin/bash
-          set -euo pipefail
-
-          # loop until BOOTSTRAP_CONFIG_NAME is set
-          set -o allexport
-          while true; do
-            if [[ -f /etc/sysconfig/origin-node ]]; then
-              source /etc/sysconfig/origin-node
-              if [[ -z "${BOOTSTRAP_CONFIG_NAME-}" ]]; then
-                echo "info: Waiting for BOOTSTRAP_CONFIG_NAME to be set" 2>&1
-                sleep 15
-                continue
-              fi
-              break
-            fi
-          done
-
-          # track the current state of the config
-          if [[ -f /etc/origin/node/node-config.yaml ]]; then
-            md5sum /etc/origin/node/node-config.yaml > /tmp/.old
-          else
-            touch /tmp/.old
-          fi
-
-          # periodically refresh both node-config.yaml and relabel the node
-          while true; do
-            name=${BOOTSTRAP_CONFIG_NAME}
-            if ! oc extract --config=/etc/origin/node/node.kubeconfig "cm/${BOOTSTRAP_CONFIG_NAME}" -n openshift-node --to=/etc/origin/node --confirm; then
-              echo "error: Unable to retrieve latest config for node" 2>&1
-              sleep 15
-              continue
-            fi
-            # detect whether the node-config.yaml has changed, and if so trigger a restart of the kubelet.
-            md5sum /etc/origin/node/node-config.yaml > /tmp/.new
-            if [[ "$( cat /tmp/.old )" != "$( cat /tmp/.new )" ]]; then
-              echo "info: Configuration changed, restarting kubelet" 2>&1
-              # TODO: kubelet doesn't relabel nodes, best effort for now
-              # https://github.com/kubernetes/kubernetes/issues/59314
-              if args="$(openshift start node --write-flags --config /etc/origin/node/node-config.yaml)"; then
-                labels=' --node-labels=([^ ]+) '
-                if [[ ${args} =~ ${labels} ]]; then
-                  labels="${BASH_REMATCH[1]//,/ }"
-                  echo "info: Applying node labels $labels" 2>&1
-                  if ! oc label --config=/etc/origin/node/node.kubeconfig "node/${NODE_NAME}" ${labels} --overwrite; then
-                    echo "error: Unable to apply labels, will retry in 10" 2>&1
-                    sleep 10
-                    continue
-                  fi
-                fi
-              fi
-              if ! pgrep -U 0 -f 'hyperkube kubelet ' | xargs kill; then
-                echo "error: Unable to restart Kubelet" 2>&1
-              fi
-            fi
-            cp -f /tmp/.new /tmp/.old
-            sleep 180
-          done
-
-        env:
-        - name: NODE_NAME
-          valueFrom:
-            fieldRef:
-              fieldPath: spec.nodeName
-        securityContext:
-          runAsUser: 0
-          # Permission could be reduced by selecting an appropriate SELinux policy
-          privileged: true
-        volumeMounts:
-        # Directory which contains the host configuration. We write to this directory
-        - mountPath: /etc/origin/node/
-          name: host-config
-        - mountPath: /etc/sysconfig/origin-node
-          name: host-sysconfig-node
-          readOnly: true
-
       # The network container launches the openshift-sdn process, the kube-proxy, and the local DNS service.
       # It relies on an up to date node-config.yaml being present.
       - name: sdn

+ 11 - 13
roles/openshift_sdn/tasks/main.yml

@@ -1,12 +1,5 @@
 ---
-# Fact setting
-# - name: Set default image variables based on deployment type
-#   include_vars: "{{ item }}"
-#   with_first_found:
-#     - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
-#     - "default_images.yml"
-
-- name: Ensure openshift-sdn project exists
+- name: Ensure project exists
   oc_project:
     name: openshift-sdn
     state: present
@@ -14,11 +7,11 @@
       - ""
 
 - name: Make temp directory for templates
-  command: mktemp -d /tmp/console-ansible-XXXXXX
+  command: mktemp -d /tmp/ansible-XXXXXX
   register: mktemp
   changed_when: False
 
-- name: Copy web console templates to temp directory
+- name: Copy templates to temp directory
   copy:
     src: "{{ item }}"
     dest: "{{ mktemp.stdout }}/{{ item | basename }}"
@@ -31,7 +24,7 @@
     key: 'tag.from.name'
     value: "{{ osn_image }}"
 
-- name: Ensure the SDN can run privileged
+- name: Ensure the service account can run privileged
   oc_adm_policy_user:
     namespace: "openshift-sdn"
     resource_kind: scc
@@ -39,9 +32,14 @@
     state: present
     user: "system:serviceaccount:openshift-sdn:sdn"
 
-- name: Apply the SDN config
+# TODO: temporary until we fix apply for image stream tags
+- name: Remove the image stream tag
+  shell: >
+    {{ openshift_client_binary }} delete -n openshift-sdn istag node:v3.10 --ignore-not-found
+
+- name: Apply the config
   shell: >
-    {{ openshift_client_binary }} apply -f {{ mktemp.stdout }}
+    {{ openshift_client_binary }} apply -f "{{ mktemp.stdout }}"
 
 - name: Remove temp directory
   file:

+ 6 - 12
roles/openshift_web_console/defaults/main.yml

@@ -4,18 +4,12 @@ openshift_web_console_nodeselector: {"node-role.kubernetes.io/master":"true"}
 __console_template_file: "console-template.yaml"
 __console_config_file: "console-config.yaml"
 
-openshift_web_console_image_dict:
-  origin:
-    prefix: "docker.io/openshift/origin-"
-    version: "{{ openshift_image_tag }}"
-    image_name: "web-console"
-  openshift-enterprise:
-    prefix: "registry.access.redhat.com/openshift3/ose-"
-    version: "{{ openshift_image_tag }}"
-    image_name: "web-console"
+l_openshift_master_images_dict:
+  origin: 'docker.io/openshift/origin-${component}:${version}'
+  openshift-enterprise: 'registry.access.redhat.com/openshift3/ose-${component}:${version}'
+l_osm_registry_url_default: "{{ l_openshift_master_images_dict[openshift_deployment_type] }}"
+l_osm_registry_url: "{{ oreg_url_master | default(oreg_url) | default(l_osm_registry_url_default) | regex_replace('${version}' | regex_escape, openshift_image_tag | default('${version}')) }}"
+openshift_web_console_image_name: "{{ l_osm_registry_url | regex_replace('${component}' | regex_escape, 'web-console') }}"
 
-openshift_web_console_prefix: "{{ openshift_web_console_image_dict[openshift_deployment_type]['prefix'] }}"
-openshift_web_console_version: "{{ openshift_web_console_image_dict[openshift_deployment_type]['version'] }}"
-openshift_web_console_image_name: "{{ openshift_web_console_image_dict[openshift_deployment_type]['image_name'] }}"
 # Default the replica count to the number of masters.
 openshift_web_console_replica_count: "{{ groups.oo_masters_to_config | length }}"

+ 1 - 1
roles/openshift_web_console/tasks/install.yml

@@ -118,7 +118,7 @@
   shell: >
     {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __console_template_file }}"
     --param API_SERVER_CONFIG="{{ updated_console_config['content'] | b64decode }}"
-    --param IMAGE="{{ openshift_web_console_prefix }}{{ openshift_web_console_image_name }}:{{ openshift_web_console_version }}"
+    --param IMAGE="{{ openshift_web_console_image_name }}"
     --param NODE_SELECTOR={{ openshift_web_console_nodeselector | to_json | quote }}
     --param REPLICA_COUNT="{{ openshift_web_console_replica_count }}"
     --config={{ mktemp.stdout }}/admin.kubeconfig