Browse Source

Merge pull request #8212 from mtnbikenc/add-failed_when-check

Add templating check in failed_when conditions
Scott Dodson 7 years ago
parent
commit
05b5b60cac

+ 0 - 20
playbooks/byo/openshift-cluster/upgrades/v3_9/README.md

@@ -1,20 +0,0 @@
-# v3.9 Major and Minor Upgrade Playbook
-
-## Overview
-This playbook currently performs the following steps.
-
- * Upgrade and restart master services
- * Unschedule node
- * Upgrade and restart docker
- * Upgrade and restart node services
- * Modifies the subset of the configuration necessary
- * Applies the latest cluster policies
- * Updates the default router if one exists
- * Updates the default registry if one exists
- * Updates image streams and quickstarts
-
-## Usage
-
-```
-ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml
-```

+ 0 - 5
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade.yml

@@ -1,5 +0,0 @@
----
-#
-# Full Control Plane + Nodes Upgrade
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade.yml

+ 0 - 16
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml

@@ -1,16 +0,0 @@
----
-#
-# Control Plane Upgrade Playbook
-#
-# Upgrades masters and Docker (only on standalone etcd hosts)
-#
-# This upgrade does not include:
-# - node service running on masters
-# - docker running on masters
-# - node service running on dedicated nodes
-#
-# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml
-
-- import_playbook: ../../../../openshift-master/private/restart.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Upgrade Playbook
-#
-# Upgrades nodes only, but requires the control plane to have already been upgraded.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/v3_9/upgrade_nodes.yml

+ 0 - 7
playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml

@@ -1,7 +0,0 @@
----
-#
-# Node Scale Group Upgrade Playbook
-#
-# Upgrades scale group nodes only.
-#
-- import_playbook: ../../../../common/openshift-cluster/upgrades/upgrade_scale_group.yml

+ 0 - 66
playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml

@@ -1,66 +0,0 @@
----
-- name: create new scale group
-  hosts: localhost
-  tasks:
-  - name: build upgrade scale groups
-    import_role:
-      name: openshift_aws
-      tasks_from: upgrade_node_group.yml
-
-  - fail:
-      msg: "Ensure that new scale groups were provisioned before proceeding to update."
-    when:
-    - "'oo_sg_new_nodes' not in groups or groups.oo_sg_new_nodes|length == 0"
-    - "'oo_sg_current_nodes' not in groups or groups.oo_sg_current_nodes|length == 0"
-    - groups.oo_sg_current_nodes == groups.oo_sg_new_nodes
-
-- name: initialize upgrade bits
-  import_playbook: init.yml
-
-- name: unschedule nodes
-  hosts: oo_sg_current_nodes
-  tasks:
-  - name: Load lib_openshift modules
-    import_role:
-      name: ../roles/lib_openshift
-
-  - name: Mark node unschedulable
-    oc_adm_manage_node:
-      node: "{{ openshift.node.nodename | lower }}"
-      schedulable: False
-    delegate_to: "{{ groups.oo_first_master.0 }}"
-    retries: 10
-    delay: 5
-    register: node_unschedulable
-    until: node_unschedulable is succeeded
-
-- name: Drain nodes
-  hosts: oo_sg_current_nodes
-  # This var must be set with -e on invocation, as it is not a per-host inventory var
-  # and is evaluated early. Values such as "20%" can also be used.
-  serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
-  max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
-  tasks:
-  - name: Drain Node for Kubelet upgrade
-    command: >
-      {{ hostvars[groups.oo_first_master.0]['first_master_client_binary'] }} adm drain {{ openshift.node.nodename | lower }}
-      --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-      --force --delete-local-data --ignore-daemonsets
-      --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
-    delegate_to: "{{ groups.oo_first_master.0 }}"
-    register: l_upgrade_nodes_drain_result
-    until: not (l_upgrade_nodes_drain_result is failed)
-    retries: "{{ 1 if ( openshift_upgrade_nodes_drain_timeout | default(0) | int ) == 0 else 0 }}"
-    delay: 5
-    failed_when:
-    - l_upgrade_nodes_drain_result is failed
-    - openshift_upgrade_nodes_drain_timeout | default(0) | int == 0
-
-# Alright, let's clean up!
-- name: clean up the old scale group
-  hosts: localhost
-  tasks:
-  - name: clean up scale group
-    import_role:
-      name: openshift_aws
-      tasks_from: remove_scale_group.yml

+ 3 - 0
setup.py

@@ -275,12 +275,15 @@ class OpenShiftAnsibleSyntaxCheck(Command):
         failed_items = []
 
         search_results = recursive_search(yaml_contents, 'when')
+        search_results.append(recursive_search(yaml_contents, 'failed_when'))
         for item in search_results:
             if isinstance(item, str):
                 if '{{' in item or '{%' in item:
                     failed_items.append(item)
             else:
                 for sub_item in item:
+                    if isinstance(sub_item, bool):
+                        continue
                     if '{{' in sub_item or '{%' in sub_item:
                         failed_items.append(sub_item)