Jelajahi Sumber

Merge remote-tracking branch 'upstream/master' into get_kuryr_services

Emilio garcia 6 tahun lalu
induk
melakukan
3b9defb1b4
28 mengubah file dengan 455 tambahan dan 37 penghapusan
  1. 43 0
      README.md
  2. 2 0
      playbooks/azure/OWNERS
  3. 12 0
      playbooks/azure/openshift-cluster/build_node_image.yml
  4. 1 1
      playbooks/openshift-hosted/config.yml
  5. 0 4
      playbooks/openshift-master/openshift_node_config.yml
  6. 1 5
      playbooks/openshift-master/openshift_node_group.yml
  7. 0 0
      playbooks/openshift-master/private/openshift_node_group.yml
  8. 17 0
      playbooks/openshift-master/private/upgrade.yml
  9. 1 0
      roles/ansible_service_broker/meta/main.yml
  10. 6 1
      roles/openshift_control_plane/files/controller.yaml
  11. 25 0
      roles/openshift_control_plane/files/scripts/crio/master-exec
  12. 28 0
      roles/openshift_control_plane/files/scripts/crio/master-logs
  13. 25 0
      roles/openshift_control_plane/files/scripts/crio/master-restart
  14. 11 3
      roles/openshift_control_plane/tasks/static_shim.yml
  15. 2 1
      roles/openshift_metrics/defaults/main.yaml
  16. 17 0
      roles/openshift_metrics/tasks/clean_old_namespace.yaml
  17. 10 4
      roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml
  18. 42 0
      roles/openshift_metrics/tasks/install_metrics.yaml
  19. 2 0
      roles/openshift_metrics/tasks/main.yaml
  20. 99 0
      roles/openshift_metrics/tasks/migrate_cassandra_pvcs.yaml
  21. 36 0
      roles/openshift_metrics/tasks/oc_apply.yaml
  22. 10 0
      roles/openshift_metrics/templates/create_project.j2
  23. 3 0
      roles/openshift_metrics/templates/pvc.j2
  24. 24 0
      roles/openshift_node/files/clean-up-crio-pods.sh
  25. 13 8
      roles/openshift_node/tasks/upgrade.yml
  26. 7 1
      roles/openshift_node/tasks/upgrade/restart.yml
  27. 13 8
      roles/openshift_node/tasks/upgrade/stop_services.yml
  28. 5 1
      roles/openshift_node/tasks/upgrade_pre.yml

+ 43 - 0
README.md

@@ -94,6 +94,49 @@ cd openshift-ansible
 sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml
 sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml
 ```
+## Node Group Definition and Mapping
+In 3.10 and newer all members of the [nodes] inventory group must be assigned an
+`openshift_node_group_name`. This value is used to select the configmap that
+configures each node. By default there are three configmaps created; one for
+each node group defined in `openshift_node_groups` and they're named
+`node-config-master` `node-config-infra` `node-config-compute`. It's important
+to note that the configmap is also the authoritative definition of node labels,
+the old `openshift_node_labels` value is effectively ignored.
+
+The default set of node groups is defined in
+[roles/openshift_facts/defaults/main.yml] like so
+
+```
+openshift_node_groups:
+  - name: node-config-master
+    labels:
+      - 'node-role.kubernetes.io/master=true'
+    edits: []
+  - name: node-config-infra
+    labels:
+      - 'node-role.kubernetes.io/infra=true'
+    edits: []
+  - name: node-config-compute
+    labels:
+      - 'node-role.kubernetes.io/compute=true'
+    edits: []
+```
+
+When configuring this in the INI based inventory this must be translated into a
+Python dictionary. Here's an example of a group named `node-config-all-in-one`
+which is suitable for an All-In-One installation with
+kubeletArguments.pods-per-core set to 20
+
+```
+openshift_node_groups=[{'name': 'node-config-all-in-one', 'labels': ['node-role.kubernetes.io/master=true', 'node-role.kubernetes.io/infra=true', 'node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
+```
+
+For upgrades, the upgrade process will block until you have the required
+configmaps in the openshift-node namespace. Please define
+`openshift_node_groups` as explained above or accept the defaults and run the
+playbooks/openshift-master/openshift_node_group.yml playbook to have them
+created for you automatically.
+
 
 ## Complete Production Installation Documentation:
 

+ 2 - 0
playbooks/azure/OWNERS

@@ -1,6 +1,8 @@
 reviewers:
 - jim-minter
 - kwoodson
+- pweil-
 approvers:
 - jim-minter
 - kwoodson
+- pweil-

+ 12 - 0
playbooks/azure/openshift-cluster/build_node_image.yml

@@ -56,6 +56,8 @@
 
 - name: install openshift
   import_playbook: ../../openshift-node/private/image_prep.yml
+  vars:
+    etcd_image: "{{ etcd_image_dict[openshift_deployment_type] }}"
 
 - hosts: nodes
   tasks:
@@ -67,6 +69,16 @@
       list: installed
     register: yum
 
+  - name: setup data disk
+    lineinfile:
+      path: /etc/waagent.conf
+      regexp: "{{ item.regexp }}"
+      line: "{{ item.line }}"
+    with_items:
+    - { regexp: '^ResourceDisk\.Filesystem=', line: 'ResourceDisk.Filesystem=xfs' }
+    - { regexp: '^ResourceDisk\.MountPoint=', line: 'ResourceDisk.MountPoint=/var/lib/docker' }
+    - { regexp: '^ResourceDisk\.MountOptions=', line: 'ResourceDisk.MountOptions=rw,relatime,seclabel,attr2,inode64,grpquota' }
+
   - name: run waagent deprovision
     shell: sleep 2 && waagent -deprovision+user -force
     async: 1

+ 1 - 1
playbooks/openshift-hosted/config.yml

@@ -1,7 +1,7 @@
 ---
 - import_playbook: ../init/main.yml
   vars:
-    l_init_fact_hosts: "oo_masters_to_config"
+    l_init_fact_hosts: "oo_masters_to_config:oo_glusterfs_to_config"
     l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
     l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
 

+ 0 - 4
playbooks/openshift-master/openshift_node_config.yml

@@ -1,4 +0,0 @@
----
-- import_playbook: ../init/main.yml
-
-- import_playbook: private/openshift_node_config.yml

+ 1 - 5
playbooks/openshift-master/openshift_node_group.yml

@@ -5,8 +5,4 @@
     l_openshift_version_set_hosts: "all:!all"
     l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
 
-- name: Setup node-group configmaps
-  hosts: oo_first_master
-  tasks:
-  - import_role:
-      name: openshift_node_group
+- import_playbook: private/openshift_node_group.yml

playbooks/openshift-master/private/openshift_node_config.yml → playbooks/openshift-master/private/openshift_node_group.yml


+ 17 - 0
playbooks/openshift-master/private/upgrade.yml

@@ -12,6 +12,8 @@
       {{ openshift_client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --additive-only=true -o name
     register: check_reconcile_scc_result
     when: openshift_reconcile_sccs_reject_change | default(true) | bool
+    until: check_reconcile_scc_result.rc == 0
+    retries: 3
 
   - fail:
       msg: >
@@ -67,6 +69,19 @@
     failed_when:
     - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0
     - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
+    retries: 2
+    delay: 30
+
+  - name: Migrate legacy HPA scale target refs
+    command: >
+      {{ openshift_client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      migrate legacy-hpa --confirm
+    register: migrate_legacy_hpa_result
+    when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool
+    failed_when:
+    - migrate_legacy_hpa_result.rc != 0
+    - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool
+
 
 # Set openshift_master_facts separately. In order to reconcile
 # admission_config's, we currently must run openshift_master_facts and
@@ -189,6 +204,8 @@
     failed_when:
     - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
     - openshift_upgrade_post_storage_migration_fatal | default(false) | bool
+    retries: 2
+    delay: 30
 
   - set_fact:
       reconcile_complete: True

+ 1 - 0
roles/ansible_service_broker/meta/main.yml

@@ -12,5 +12,6 @@ galaxy_info:
   categories:
   - cloud
 dependencies:
+- role: openshift_facts
 - role: lib_utils
 - role: lib_openshift

+ 6 - 1
roles/openshift_control_plane/files/controller.yaml

@@ -31,6 +31,8 @@ spec:
        name: master-config
      - mountPath: /etc/origin/cloudprovider/
        name: master-cloud-provider
+     - mountPath: /etc/containers/registries.d/
+       name: signature-import
     livenessProbe:
       httpGet:
         scheme: HTTPS
@@ -44,4 +46,7 @@ spec:
       path: /etc/origin/master/
   - name: master-cloud-provider
     hostPath:
-      path: /etc/origin/cloudprovider
+      path: /etc/origin/cloudprovider
+  - hostPath:
+      path: /etc/containers/registries.d
+    name: signature-import

+ 25 - 0
roles/openshift_control_plane/files/scripts/crio/master-exec

@@ -0,0 +1,25 @@
+#!/bin/bash
+set -euo pipefail
+
+# Exec a file in the named component by component name and container name.
+# Remaining arguments are passed to the command. If no static pods have been
+# created yet this will execute on the host.
+if [[ -z "${1-}" || -z "${2-}" ]]; then
+  echo "A component name like 'api', 'etcd', or 'controllers' must be specified along with the container name within that component." 1>&2
+  exit 1
+fi
+
+# We haven't started using static pods yet, assume this command is to be direct executed
+if [[ ! -d /etc/origin/node/pods || -z "$( ls -A /etc/origin/node/pods )" ]]; then
+  exec "${@:3}"
+fi
+
+pod=$(crictl pods -l -q --label "openshift.io/component=${1}" --label "io.kubernetes.container.name=POD" 2>/dev/null)
+uid=$(crictl inspectp ${pod} 2>/dev/null | python -c 'import sys, json; print json.load(sys.stdin)["status"]["labels"]["io.kubernetes.pod.uid"]')
+
+if [[ -z "${uid}" ]]; then
+  echo "Component ${1} is stopped or not running" 1>&2
+  exit 0
+fi
+container=$(crictl ps -l -q --label "io.kubernetes.pod.uid=${uid}" --label "io.kubernetes.container.name=${2}" 2>/dev/null)
+exec crictl exec "${container}" "${@:3}"

+ 28 - 0
roles/openshift_control_plane/files/scripts/crio/master-logs

@@ -0,0 +1,28 @@
+#!/bin/bash
+set -euo pipefail
+
+# Return the logs for a given static pod by component name and container name. Remaining arguments are passed to the
+# current container runtime.
+if [[ -z "${1-}" || -z "${2-}" ]]; then
+  echo "A component name like 'api', 'etcd', or 'controllers' must be specified along with the container name within that component." 1>&2
+  exit 1
+fi
+
+# container name is ignored for services
+types=( "atomic-openshift" "origin" )
+for type in "${types[@]}"; do
+  if systemctl cat "${type}-master-${1}.service" &>/dev/null; then
+    journalctl -u "${type}-master-${1}.service" "${@:3}"
+    exit 0
+  fi
+done
+
+pod=$(crictl pods -l -q --label "openshift.io/component=${1}" --label "io.kubernetes.container.name=POD" 2>/dev/null)
+uid=$(crictl inspectp ${pod} 2>/dev/null | python -c 'import sys, json; print json.load(sys.stdin)["status"]["labels"]["io.kubernetes.pod.uid"]')
+
+if [[ -z "${uid}" ]]; then
+  echo "Component ${1} is stopped or not running" 1>&2
+  exit 0
+fi
+container=$(crictl ps -l -q --label "io.kubernetes.pod.uid=${uid}" --label "io.kubernetes.container.name=${2}" 2>/dev/null)
+exec crictl logs "${@:3}" "${container}"

+ 25 - 0
roles/openshift_control_plane/files/scripts/crio/master-restart

@@ -0,0 +1,25 @@
+#!/bin/bash
+set -euo pipefail
+
+# Restart the named component by stopping its base container.
+if [[ -z "${1-}" ]]; then
+  echo "A component name like 'api', 'etcd', or 'controllers' must be specified." 1>&2
+  exit 1
+fi
+
+types=( "atomic-openshift" "origin" )
+for type in "${types[@]}"; do
+  if systemctl cat "${type}-master-${1}.service" &>/dev/null; then
+    systemctl restart "${type}-master-${1}.service"
+    exit 0
+  fi
+done
+
+pod=$(crictl pods -l -q --label "openshift.io/component=${1}" --label "io.kubernetes.container.name=POD" 2>/dev/null)
+if [[ -z "${pod}" ]]; then
+  echo "Component ${1} is already stopped" 1>&2
+  exit 0
+fi
+# Stop the pod
+# TODO(runcom): expose timeout in the CRI
+crictl stopp "${pod}" >/dev/null

+ 11 - 3
roles/openshift_control_plane/tasks/static_shim.yml

@@ -6,6 +6,14 @@
     dest: "/usr/local/bin/"
     mode: 0500
   with_items:
-  - scripts/docker/master-exec
-  - scripts/docker/master-logs
-  - scripts/docker/master-restart
+  - "scripts/{{ l_runtime }}/master-exec"
+  - "scripts/{{ l_runtime }}/master-logs"
+  - "scripts/{{ l_runtime }}/master-restart"
+  vars:
+    l_runtime: "{{ 'crio' if openshift_use_crio | default(False) else 'docker' }}"
+
+- name: Ensure cri-tools installed
+  package:
+    name: cri-tools
+    state: present
+  when: openshift_use_crio | default(False)

+ 2 - 1
roles/openshift_metrics/defaults/main.yaml

@@ -60,7 +60,8 @@ openshift_metrics_resolution: 30s
 
 openshift_metrics_master_url: https://kubernetes.default.svc
 openshift_metrics_node_id: nodename
-openshift_metrics_project: openshift-infra
+openshift_metrics_old_project: openshift-infra
+openshift_metrics_project: openshift-metrics
 
 openshift_metrics_cassandra_pvc_prefix: metrics-cassandra
 openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"

+ 17 - 0
roles/openshift_metrics/tasks/clean_old_namespace.yaml

@@ -0,0 +1,17 @@
+---
+- name: remove metrics components on {{ openshift_metrics_old_project }}
+  command: >
+    {{ openshift_client_binary }} -n {{ openshift_metrics_old_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    delete --ignore-not-found --selector=metrics-infra
+    all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole
+  register: delete_metrics
+  changed_when: delete_metrics.stdout != 'No resources found'
+
+- name: remove rolebindings on {{ openshift_metrics_old_project }}
+  command: >
+    {{ openshift_client_binary }} -n {{ openshift_metrics_old_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    delete --ignore-not-found
+    rolebinding/hawkular-view
+    clusterrolebinding/heapster-cluster-reader
+    clusterrolebinding/hawkular-metrics
+  changed_when: delete_metrics.stdout != 'No resources found'

+ 10 - 4
roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml

@@ -1,17 +1,23 @@
 ---
-- name: Check to see if PVC already exists
+- name: Check to see if PVC already exists in actual namespace
   oc_obj:
     state: list
     kind: pvc
     name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
-    namespace: "{{ openshift_metrics_project }}"
-  register: _metrics_pvc
+    namespace: "{{openshift_metrics_project}}"
+  register: metrics_pvc
+
+# Migration process for PV to another namespace.
+- name: Check if we need to move cassandra PV to another namespace.
+  include_tasks: migrate_cassandra_pvcs.yaml
 
 # _metrics_pvc.results.results | length > 0 returns a false positive
 # so we check for the presence of 'stderr' to determine if the obj exists or not
 # the RC for existing and not existing is both 0
+
+# No PVC exist (either new or old namespace), so it's a new installation
 - when:
-    - _metrics_pvc.results.stderr is defined
+    - metrics_pvc.results.stderr is defined and not openshift_metrics_migrate_namespace
   block:
     - name: generate hawkular-cassandra persistent volume claims
       template:

+ 42 - 0
roles/openshift_metrics/tasks/install_metrics.yaml

@@ -1,6 +1,48 @@
 ---
 - include_tasks: pre_install.yaml
 
+- name: Get annotations from {{ openshift_metrics_old_project }}
+  oc_obj:
+    state: list
+    kind: project
+    name: "{{ openshift_metrics_old_project }}"
+    namespace: "{{ openshift_metrics_old_project }}"
+  register: openshift_metrics_infra
+
+- name: Set {{openshift_metrics_old_project }} annotations fact.
+  set_fact:
+    openshift_metrics_old_project_annotations: >
+        { "openshift.io/sa.scc.mcs":"{{ openshift_metrics_infra.results.results[0].metadata.annotations['openshift.io/sa.scc.mcs'] }}",
+          "openshift.io/sa.scc.supplemental-groups": "{{ openshift_metrics_infra.results.results[0].metadata.annotations['openshift.io/sa.scc.supplemental-groups'] }}",
+          "openshift.io/sa.scc.uid-range": "{{ openshift_metrics_infra.results.results[0].metadata.annotations['openshift.io/sa.scc.uid-range'] }}"
+        }
+  when:
+    - openshift_metrics_infra.results is defined
+    - openshift_metrics_infra.results.results is defined
+    - openshift_metrics_infra.results.results[0] is defined
+    - openshift_metrics_infra.results.results[0].metadata is defined
+    - openshift_metrics_infra.results.results[0].metadata.annotations is defined
+    - openshift_metrics_infra.results.results[0].metadata.annotations['openshift.io/sa.scc.mcs'] is defined
+    - openshift_metrics_infra.results.results[0].metadata.annotations['openshift.io/sa.scc.supplemental-groups'] is defined
+    - openshift_metrics_infra.results.results[0].metadata.annotations['openshift.io/sa.scc.uid-range'] is defined
+
+- name: Generate template for project {{ openshift_metrics_project }} creation
+  template:
+    src: create_project.j2
+    dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-projects.yaml"
+  vars:
+    project_name: "{{ openshift_metrics_project }}"
+    annotations: "{{ openshift_metrics_old_project_annotations | default(false) }}"
+
+- name: Create {{ openshift_metrics_project }} namespace
+  oc_obj:
+    state: present
+    name: "{{ openshift_metrics_project }}"
+    kind: Project
+    files:
+      - "{{ mktemp.stdout }}/templates/hawkular-metrics-projects.yaml"
+    delete_after: true
+
 - name: Install Metrics
   include_tasks: "install_{{ include_file }}.yaml"
   with_items:

+ 2 - 0
roles/openshift_metrics/tasks/main.yaml

@@ -49,3 +49,5 @@
   changed_when: False
   check_mode: no
   become: false
+
+- include_tasks: clean_old_namespace.yaml

+ 99 - 0
roles/openshift_metrics/tasks/migrate_cassandra_pvcs.yaml

@@ -0,0 +1,99 @@
+---
+- name: Check to see if PVC exists in an old namespace
+  oc_obj:
+    state: list
+    kind: pvc
+    name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+    namespace: "{{openshift_metrics_old_project}}"
+  register: openshift_metrics_cassandra_old_pvc
+
+- name: Set PVC vars
+  set_fact:
+    pvc_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+    pvc_exists: "{{ openshift_metrics_cassandra_old_pvc.results.results[0] | length > 0 }}"
+
+- name: Set fact if we need to move to another namespace
+  set_fact:
+    openshift_metrics_migrate_namespace: "{{ pvc_exists and openshift_metrics_cassandra_old_pvc.results.results[0]['metadata']['name'] == pvc_name }}"
+
+- name: Migrate cassandra PV and PVC to another namespace
+  when: openshift_metrics_migrate_namespace
+  block:
+    - name: Get PV name attached to cassandra PVC
+      set_fact:
+        openshift_metrics_pv_name: "{{ openshift_metrics_cassandra_old_pvc['results']['results'][0]['spec']['volumeName'] }}"
+
+    - name: Get PV object attached to cassandra PVC.
+      oc_obj:
+        state: list
+        kind: pv
+        name: "{{ openshift_metrics_pv_name }}"
+        namespace: "{{ openshift_metrics_old_project }}"
+      register: metrics_pv
+
+    - name: Get PV persistent policy attached to cassandra PVC.
+      set_fact:
+        openshift_metrics_cassandra_pvc_persistent_policy: "{{ metrics_pv['results']['results'][0]['spec']['persistentVolumeReclaimPolicy'] }}"
+
+    - name: Set PV persistent volume reclaim policy to Retain
+      command: >
+       {{ openshift_client_binary }} -n {{ openshift_metrics_old_project }}
+        --config={{ mktemp.stdout }}/admin.kubeconfig
+        patch pv {{ openshift_metrics_pv_name }} -p '{"spec": {"persistentVolumeReclaimPolicy": "Retain"}}'
+
+    - name: Generate persistent volume claim templates for namespace {{ openshift_metrics_project }}
+      template:
+        src: pvc.j2
+        dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml"
+      vars:
+        obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+        labels:
+          metrics-infra: hawkular-cassandra
+        access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
+        size: "{{ openshift_metrics_cassandra_pvc_size }}"
+        pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+        storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
+        volume_name: "{{ openshift_metrics_pv_name }}"
+
+    - name: Create PVC persistent volume claim {{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }} for {{ openshift_metrics_project }}
+      oc_obj:
+        state: present
+        name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+        namespace: "{{ openshift_metrics_project }}"
+        kind: persistentvolumeclaim
+        files:
+          - "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml"
+        delete_after: true
+
+    - name: Wait for Cassandra persistent volume claim to be created on {{ openshift_metrics_project }}
+      oc_obj:
+        state: list
+        kind: pvc
+        name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+        namespace: "{{openshift_metrics_project}}"
+      register: openshift_metrics_new_pvc
+      until: openshift_metrics_new_pvc.results.results[0] | length > 0
+
+    - set_fact:
+        openshift_metrics_pvc_uuid: "{{openshift_metrics_new_pvc['results']['results'][0]['metadata']['uid']}}"
+
+    - name: Attach PV to the new PVC
+      command: >
+       {{ openshift_client_binary }} -n {{ openshift_metrics_project }}
+        --config={{ mktemp.stdout }}/admin.kubeconfig
+        patch pv {{ openshift_metrics_pv_name }} -p '{"spec": {"claimRef": { "namespace": "{{ openshift_metrics_project }}", "name": "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}", "uid": "{{ openshift_metrics_pvc_uuid }}" }}}'
+
+    - name: Wait until the PV is attached to new PVC
+      oc_obj:
+        state: list
+        kind: pvc
+        name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}"
+        namespace: "{{openshift_metrics_project}}"
+      register: openshift_metrics_new_pvc
+      until: openshift_metrics_new_pvc.results.results[0].spec.volumeName == openshift_metrics_pv_name
+
+    - name: Restore persistent volume reclaim policy
+      command: >
+       {{ openshift_client_binary }} -n {{ openshift_metrics_old_project }}
+        --config={{ mktemp.stdout }}/admin.kubeconfig
+        patch pv {{ openshift_metrics_pv_name }} -p '{"spec": {"persistentVolumeReclaimPolicy": "{{openshift_metrics_cassandra_pvc_persistent_policy}}"}}'

+ 36 - 0
roles/openshift_metrics/tasks/oc_apply.yaml

@@ -9,6 +9,18 @@
   register: generation_init
   failed_when: false
   changed_when: no
+  when: namespace is defined
+
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+  command: >
+    {{ openshift_client_binary }}
+    --config={{ kubeconfig }}
+    get {{file_content.kind}} {{file_content.metadata.name}}
+    -o jsonpath='{.metadata.resourceVersion}'
+  register: generation_init_no_ns
+  failed_when: false
+  changed_when: no
+  when: namespace is not defined
 
 - name: Applying {{file_name}}
   command: >
@@ -18,6 +30,16 @@
   register: generation_apply
   failed_when: "'error' in generation_apply.stderr or (generation_apply.rc | int != 0)"
   changed_when: no
+  when: namespace is defined
+
+- name: Applying {{file_name}}
+  command: >
+    {{ openshift_client_binary }} --config={{ kubeconfig }}
+    apply -f {{ file_name }}
+  register: generation_apply
+  failed_when: "'error' in generation_apply.stderr or (generation_apply.rc | int != 0)"
+  changed_when: no
+  when: namespace is not defined
 
 - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
   command: >
@@ -30,3 +52,17 @@
     init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
   failed_when: "'error' in version_changed.stderr or version_changed.rc | int != 0"
   changed_when: version_changed.stdout | int  > init_version | int
+  when: namespace is defined
+
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+  command: >
+    {{ openshift_client_binary }} --config={{ kubeconfig }}
+    get {{file_content.kind}} {{file_content.metadata.name}}
+    -o jsonpath='{.metadata.resourceVersion}'
+  register: version_changed
+  vars:
+    init_version: "{{ (generation_init_no_ns is defined) | ternary(generation_init_no_ns.stdout, '0') }}"
+  failed_when: "'error' in version_changed.stderr or version_changed.rc | int != 0"
+  changed_when: version_changed.stdout | int  > init_version | int
+  when: namespace is not defined

+ 10 - 0
roles/openshift_metrics/templates/create_project.j2

@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: Project
+metadata:
+{% if annotations is mapping %}
+  annotations:
+{% for key, value in annotations.items() %}
+    {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+  name: {{ project_name }}

+ 3 - 0
roles/openshift_metrics/templates/pvc.j2

@@ -18,6 +18,9 @@ metadata:
 {% endfor %}
 {% endif %}
 spec:
+{% if volume_name is defined %}
+  volumeName: {{volume_name}}
+{% endif %}
 {% if pv_selector is defined and pv_selector is mapping %}
   selector:
     matchLabels:

+ 24 - 0
roles/openshift_node/files/clean-up-crio-pods.sh

@@ -0,0 +1,24 @@
+#!/bin/bash
+for c in $(runc list -q); do
+        output=$(runc state $c | grep io.kubernetes.cri-o.ContainerType)
+        if [[ "$output" =~ "container" ]]; then
+                runc delete -f $c
+        fi
+        for m in $(mount | grep $c | awk '{print $3}'); do
+                umount -R $m
+        done
+done
+for c in $(runc list -q); do
+        output=$(runc state $c | grep io.kubernetes.cri-o.ContainerType)
+        if [[ "$output" =~ "sandbox" ]]; then
+                runc delete -f $c
+        fi
+        for m in $(mount | grep $c | awk '{print $3}'); do
+                umount -R $m
+        done
+done
+mount | grep overlay | awk '{print $3}' | xargs umount | true
+umount -R /var/lib/containers/storage/overlay
+umount -R /var/lib/containers/storage
+rm -rf /var/run/containers/storage/*
+rm -rf /var/lib/containers/storage/*

+ 13 - 8
roles/openshift_node/tasks/upgrade.yml

@@ -22,19 +22,24 @@
 
 - name: Ensure cri-o is updated
   package:
-    name: cri-o
+    name: "{{ crio_pkgs | join (',') }}"
     state: latest
   when:
   - openshift_use_crio | default(False)
   register: crio_update
+  vars:
+    crio_pkgs:
+    - "cri-o"
+    - "cri-tools"
 
-- name: Restart cri-o
-  systemd:
-    name: cri-o
-    state: restarted
-  when:
-  - openshift_use_crio | default(False)
-  - crio_update is changed
+- name: Remove CRI-O default configuration files
+  file:
+    path: "{{ item }}"
+    state: absent
+  with_items:
+  - "/etc/cni/net.d/200-loopback.conf"
+  - "/etc/cni/net.d/100-crio-bridge.conf"
+  when: crio_update | changed
 
 - name: install pre-pulled rpms.
   import_tasks: upgrade/rpm_upgrade_install.yml

+ 7 - 1
roles/openshift_node/tasks/upgrade/restart.yml

@@ -15,7 +15,7 @@
 - name: Restart support services
   service:
     name: "{{ item }}"
-    state: restarted
+    state: started
     enabled: True
   with_items:
     - NetworkManager
@@ -30,6 +30,12 @@
   retries: 3
   delay: 30
 
+- name: Restart cri-o
+  service:
+    name: cri-o
+    state: started
+  when: openshift_use_crio | default(False)
+
 - name: Start node service
   service:
     name: "{{ openshift_service_type }}-node"

+ 13 - 8
roles/openshift_node/tasks/upgrade/stop_services.yml

@@ -8,14 +8,6 @@
   - "{{ openshift_service_type }}-node"
   failed_when: false
 
-- name: Ensure static containerized services stopped before Docker restart
-  command: /usr/local/bin/master-restart "{{ item }}"
-  with_items:
-  - api
-  - controllers
-  - etcd
-  failed_when: false
-
 - service:
     name: docker
     state: stopped
@@ -26,3 +18,16 @@
   when:
   - l_docker_upgrade is defined
   - l_docker_upgrade | bool
+
+- name: Stop crio
+  service:
+    name: cri-o
+    state: stopped
+  when: openshift_use_crio | default(False)
+
+# TODO: Need to determine if this is needed long term or just 3.9 to 3.10
+# Upgrading cri-o, at least from 1.9 to 1.10, requires that all
+# pods be stopped
+- name: Clean up cri-o pods
+  script: clean-up-crio-pods.sh
+  when: openshift_use_crio | default(False)

+ 5 - 1
roles/openshift_node/tasks/upgrade_pre.yml

@@ -35,11 +35,15 @@
   - l_docker_upgrade | bool
 
 - name: Stage cri-o updates
-  command: "{{ ansible_pkg_mgr }} install -y --downloadonly cri-o"
+  command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ crio_pkgs | join(' ') }}"
   register: result
   until: result is succeeded
   when:
   - openshift_use_crio | default(False)
+  vars:
+    crio_pkgs:
+    - "cri-o"
+    - "cri-tools"
 
 - import_tasks: upgrade/rpm_upgrade.yml
   when: not openshift_is_atomic | bool