浏览代码

Deprecate using Ansible tests as filters

Russell Teague 7 年之前
父节点
当前提交
c113074f5b
共有 89 个文件被更改,包括 232 次插入189 次删除
  1. 7 7
      playbooks/adhoc/uninstall.yml
  2. 4 4
      playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml
  3. 1 1
      playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml
  4. 2 2
      playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml
  5. 1 1
      playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
  6. 9 3
      playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml
  7. 2 2
      playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
  8. 9 9
      playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
  9. 4 4
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  10. 3 3
      playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml
  11. 1 1
      playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
  12. 3 3
      playbooks/init/facts.yml
  13. 1 1
      playbooks/openshift-etcd/private/upgrade_image_members.yml
  14. 1 1
      playbooks/openshift-etcd/private/upgrade_rpm_members.yml
  15. 1 1
      playbooks/openshift-node/private/network_manager.yml
  16. 1 1
      playbooks/openshift-node/private/restart.yml
  17. 1 1
      roles/calico/handlers/main.yml
  18. 1 1
      roles/cockpit/tasks/main.yml
  19. 1 1
      roles/container_runtime/handlers/main.yml
  20. 3 3
      roles/container_runtime/tasks/common/syscontainer_packages.yml
  21. 19 4
      roles/container_runtime/tasks/docker_sanity.yml
  22. 12 5
      roles/container_runtime/tasks/docker_upgrade_check.yml
  23. 8 5
      roles/container_runtime/tasks/package_docker.yml
  24. 4 4
      roles/container_runtime/tasks/systemcontainer_docker.yml
  25. 1 1
      roles/contiv/tasks/download_bins.yml
  26. 3 3
      roles/contiv/tasks/netplugin.yml
  27. 3 3
      roles/contiv/tasks/pkgMgrInstallers/centos-install.yml
  28. 1 1
      roles/contiv_facts/tasks/fedora-install.yml
  29. 1 1
      roles/etcd/tasks/auxiliary/drop_etcdctl.yml
  30. 1 1
      roles/etcd/tasks/backup/backup.yml
  31. 1 1
      roles/etcd/tasks/certificates/deploy_ca.yml
  32. 1 1
      roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
  33. 5 3
      roles/etcd/tasks/main.yml
  34. 6 2
      roles/etcd/tasks/system_container.yml
  35. 1 1
      roles/etcd/tasks/upgrade/upgrade_image.yml
  36. 1 1
      roles/etcd/tasks/upgrade/upgrade_rpm.yml
  37. 2 2
      roles/flannel/handlers/main.yml
  38. 1 1
      roles/flannel/tasks/main.yml
  39. 1 1
      roles/nickhammond.logrotate/tasks/main.yml
  40. 1 1
      roles/nuage_ca/tasks/main.yaml
  41. 2 2
      roles/openshift_ca/tasks/main.yml
  42. 2 2
      roles/openshift_cli/tasks/main.yml
  43. 1 1
      roles/openshift_clock/tasks/main.yaml
  44. 4 4
      roles/openshift_excluder/tasks/install.yml
  45. 1 1
      roles/openshift_excluder/tasks/verify_excluder.yml
  46. 1 1
      roles/openshift_expand_partition/tasks/main.yml
  47. 2 2
      roles/openshift_loadbalancer/tasks/main.yml
  48. 1 1
      roles/openshift_manage_node/tasks/main.yml
  49. 1 1
      roles/openshift_master/tasks/journald.yml
  50. 12 12
      roles/openshift_master/tasks/main.yml
  51. 2 2
      roles/openshift_master/tasks/set_loopback_context.yml
  52. 1 1
      roles/openshift_master/tasks/systemd_units.yml
  53. 1 1
      roles/openshift_master/tasks/upgrade/rpm_upgrade.yml
  54. 3 3
      roles/openshift_metrics/tasks/install_hawkular.yaml
  55. 1 1
      roles/openshift_nfs/tasks/create_export.yml
  56. 2 2
      roles/openshift_nfs/tasks/setup.yml
  57. 2 2
      roles/openshift_node/handlers/main.yml
  58. 1 1
      roles/openshift_node/tasks/bootstrap.yml
  59. 6 6
      roles/openshift_node/tasks/config.yml
  60. 1 1
      roles/openshift_node/tasks/dnsmasq/no-network-manager.yml
  61. 1 1
      roles/openshift_node/tasks/dnsmasq_install.yml
  62. 3 3
      roles/openshift_node/tasks/install.yml
  63. 3 1
      roles/openshift_node/tasks/main.yml
  64. 1 1
      roles/openshift_node/tasks/storage_plugins/ceph.yml
  65. 3 3
      roles/openshift_node/tasks/storage_plugins/glusterfs.yml
  66. 1 1
      roles/openshift_node/tasks/storage_plugins/iscsi.yml
  67. 3 3
      roles/openshift_node/tasks/storage_plugins/nfs.yml
  68. 1 1
      roles/openshift_node/tasks/upgrade.yml
  69. 1 1
      roles/openshift_node/tasks/upgrade/config_changes.yml
  70. 1 1
      roles/openshift_node/tasks/upgrade/restart.yml
  71. 2 2
      roles/openshift_node/tasks/upgrade/rpm_upgrade.yml
  72. 1 1
      roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml
  73. 1 1
      roles/openshift_node/tasks/upgrade/stop_services.yml
  74. 2 2
      roles/openshift_node/tasks/upgrade_pre.yml
  75. 1 1
      roles/openshift_node_certificates/handlers/main.yml
  76. 2 2
      roles/openshift_openstack/tasks/container-storage-setup.yml
  77. 2 2
      roles/openshift_openstack/tasks/node-packages.yml
  78. 1 1
      roles/openshift_openstack/tasks/populate-dns.yml
  79. 1 1
      roles/openshift_repos/tasks/main.yaml
  80. 1 1
      roles/openshift_sanitize_inventory/tasks/main.yml
  81. 1 1
      roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml
  82. 1 1
      roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
  83. 3 3
      roles/openshift_storage_nfs/tasks/main.yml
  84. 1 1
      roles/openshift_storage_nfs_lvm/tasks/nfs.yml
  85. 2 2
      roles/openshift_version/tasks/main.yml
  86. 7 5
      roles/os_firewall/tasks/firewalld.yml
  87. 6 4
      roles/os_firewall/tasks/iptables.yml
  88. 1 1
      roles/os_update_latest/tasks/main.yml
  89. 2 2
      roles/rhel_subscribe/tasks/main.yml

+ 7 - 7
playbooks/adhoc/uninstall.yml

@@ -126,13 +126,13 @@
         - tuned-profiles-atomic-openshift-node
         - tuned-profiles-origin-node
         register: result
-        until: result | success
+        until: result is succeeded
 
       - name: Remove flannel package
         package: name=flannel state=absent
         when: openshift_use_flannel | default(false) | bool
         register: result
-        until: result | success
+        until: result is succeeded
       when: not is_atomic | bool
 
     - shell: systemctl reset-failed
@@ -286,9 +286,9 @@
   - name: restart docker
     service: name=docker state=stopped enabled=no
     failed_when: false
-    when: not (container_engine | changed)
+    when: not (container_engine is changed)
     register: l_docker_restart_docker_in_pb_result
-    until: not l_docker_restart_docker_in_pb_result | failed
+    until: not (l_docker_restart_docker_in_pb_result is failed)
     retries: 3
     delay: 30
 
@@ -384,7 +384,7 @@
     - origin-docker-excluder
     - origin-master
     register: result
-    until: result | success
+    until: result is succeeded
 
   - shell: systemctl reset-failed
     changed_when: False
@@ -499,7 +499,7 @@
     - etcd
     - etcd3
     register: result
-    until: result | success
+    until: result is succeeded
 
   - shell: systemctl reset-failed
     changed_when: False
@@ -558,7 +558,7 @@
     with_items:
     - haproxy
     register: result
-    until: result | success
+    until: result is succeeded
 
   - shell: systemctl reset-failed
     changed_when: False

+ 4 - 4
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -46,7 +46,7 @@
     retries: 10
     delay: 5
     register: node_unschedulable
-    until: node_unschedulable|succeeded
+    until: node_unschedulable is succeeded
     when:
     - l_docker_upgrade is defined
     - l_docker_upgrade | bool
@@ -58,7 +58,7 @@
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
     register: l_docker_upgrade_drain_result
-    until: not l_docker_upgrade_drain_result | failed
+    until: not (l_docker_upgrade_drain_result is failed)
     retries: 60
     delay: 60
 
@@ -73,5 +73,5 @@
     retries: 10
     delay: 5
     register: node_schedulable
-    until: node_schedulable|succeeded
-    when: node_unschedulable|changed
+    until: node_schedulable is succeeded
+    when: node_unschedulable is changed

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml

@@ -2,7 +2,7 @@
 - name: Restart docker
   service: name=docker state=restarted
   register: l_docker_restart_docker_in_upgrade_result
-  until: not l_docker_restart_docker_in_upgrade_result | failed
+  until: not (l_docker_restart_docker_in_upgrade_result is failed)
   retries: 3
   delay: 30
 

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml

@@ -35,14 +35,14 @@
     name: docker
     state: stopped
   register: l_pb_docker_upgrade_stop_result
-  until: not l_pb_docker_upgrade_stop_result | failed
+  until: not (l_pb_docker_upgrade_stop_result is failed)
   retries: 3
   delay: 30
 
 - name: Upgrade Docker
   package: name=docker{{ '-' + docker_version }} state=present
   register: result
-  until: result | success
+  until: result is succeeded
 
 - include_tasks: restart.yml
   when: not skip_docker_restart | default(False) | bool

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/post_control_plane.yml

@@ -126,7 +126,7 @@
     debug:
       msg: "WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information."
     when:
-    - not grep_plugin_order_override | skipped
+    - not (grep_plugin_order_override is skipped)
     - grep_plugin_order_override.rc == 0
 
   - name: Warn if shared-resource-viewer could not be updated

+ 9 - 3
playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml

@@ -20,13 +20,17 @@
       msg: >
         openshift_pkg_version is {{ openshift_pkg_version }} which is not a
         valid version for a {{ openshift_upgrade_target }} upgrade
-    when: openshift_pkg_version is defined and openshift_pkg_version.split('-',1).1 | version_compare(openshift_upgrade_target ,'<')
+    when:
+    - openshift_pkg_version is defined
+    - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')
 
   - fail:
       msg: >
         openshift_image_tag is {{ openshift_image_tag }} which is not a
         valid version for a {{ openshift_upgrade_target }} upgrade
-    when: openshift_image_tag is defined and openshift_image_tag.split('v',1).1 | version_compare(openshift_upgrade_target ,'<')
+    when:
+    - openshift_image_tag is defined
+    - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')
 
   - set_fact:
       openshift_release: "{{ openshift_release[1:] }}"
@@ -36,7 +40,9 @@
       msg: >
         openshift_release is {{ openshift_release }} which is not a
         valid release for a {{ openshift_upgrade_target }} upgrade
-    when: openshift_release is defined and not openshift_release | version_compare(openshift_upgrade_target ,'=')
+    when:
+    - openshift_release is defined
+    - not (openshift_release is version_compare(openshift_upgrade_target ,'='))
 
 - name: Verify master processes
   hosts: oo_masters_to_config

+ 2 - 2
playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml

@@ -43,11 +43,11 @@
     fail:
       msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
     when:
-    - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
+    - (openshift_pkg_version | default('-0.0', True)).split('-')[1] is version_compare(openshift_release, '<')
 
 - name: Fail when openshift version does not meet minium requirement for Origin upgrade
   fail:
     msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
   when:
   - deployment_type == 'origin'
-  - openshift.common.version | version_compare(openshift_upgrade_min,'<')
+  - openshift.common.version is version_compare(openshift_upgrade_min,'<')

+ 9 - 9
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -91,7 +91,7 @@
     register: l_pb_upgrade_control_plane_post_upgrade_storage
     when:
     - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
-    - openshift_version | version_compare('3.7','<')
+    - openshift_version is version_compare('3.7','<')
     failed_when:
     - openshift_upgrade_post_storage_migration_enabled | default(true) | bool
     - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0
@@ -136,7 +136,7 @@
       {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       policy reconcile-cluster-roles --additive-only=true --confirm -o name
     register: reconcile_cluster_role_result
-    when: openshift_version | version_compare('3.7','<')
+    when: openshift_version is version_compare('3.7','<')
     changed_when:
     - reconcile_cluster_role_result.stdout != ''
     - reconcile_cluster_role_result.rc == 0
@@ -151,7 +151,7 @@
       --exclude-groups=system:unauthenticated
       --exclude-users=system:anonymous
       --additive-only=true --confirm -o name
-    when: openshift_version | version_compare('3.7','<')
+    when: openshift_version is version_compare('3.7','<')
     register: reconcile_bindings_result
     changed_when:
     - reconcile_bindings_result.stdout != ''
@@ -167,9 +167,9 @@
     - reconcile_jenkins_role_binding_result.stdout != ''
     - reconcile_jenkins_role_binding_result.rc == 0
     when:
-    - openshift_version | version_compare('3.7','<')
+    - openshift_version is version_compare('3.7','<')
 
-  - when: openshift_upgrade_target | version_compare('3.7','<')
+  - when: openshift_upgrade_target is version_compare('3.7','<')
     block:
     - name: Retrieve shared-resource-viewer
       oc_obj:
@@ -287,14 +287,14 @@
     retries: 10
     delay: 5
     register: node_unschedulable
-    until: node_unschedulable|succeeded
+    until: node_unschedulable is succeeded
 
   - name: Drain Node for Kubelet upgrade
     command: >
       {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_control_plane_drain_result
-    until: not l_upgrade_control_plane_drain_result | failed
+    until: not (l_upgrade_control_plane_drain_result is failed)
     retries: 60
     delay: 60
 
@@ -314,5 +314,5 @@
     retries: 10
     delay: 5
     register: node_schedulable
-    until: node_schedulable|succeeded
-    when: node_unschedulable|changed
+    until: node_schedulable is succeeded
+    when: node_unschedulable is changed

+ 4 - 4
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -31,14 +31,14 @@
     retries: 10
     delay: 5
     register: node_unschedulable
-    until: node_unschedulable|succeeded
+    until: node_unschedulable is succeeded
 
   - name: Drain Node for Kubelet upgrade
     command: >
       {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_nodes_drain_result
-    until: not l_upgrade_nodes_drain_result | failed
+    until: not (l_upgrade_nodes_drain_result is failed)
     retries: 60
     delay: 60
 
@@ -56,8 +56,8 @@
     retries: 10
     delay: 5
     register: node_schedulable
-    until: node_schedulable|succeeded
-    when: node_unschedulable|changed
+    until: node_schedulable is succeeded
+    when: node_unschedulable is changed
 
 - name: Re-enable excluders
   hosts: oo_nodes_to_upgrade:!oo_masters_to_config

+ 3 - 3
playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml

@@ -32,7 +32,7 @@
     retries: 10
     delay: 5
     register: node_unschedulable
-    until: node_unschedulable|succeeded
+    until: node_unschedulable is succeeded
 
 - name: Drain nodes
   hosts: oo_sg_current_nodes
@@ -49,11 +49,11 @@
       --timeout={{ openshift_upgrade_nodes_drain_timeout | default(0) }}s
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_nodes_drain_result
-    until: not l_upgrade_nodes_drain_result | failed
+    until: not (l_upgrade_nodes_drain_result is failed)
     retries: "{{ 1 if openshift_upgrade_nodes_drain_timeout | default(0) == '0' else 0  | int }}"
     delay: 5
     failed_when:
-    - l_upgrade_nodes_drain_result | failed
+    - l_upgrade_nodes_drain_result is failed
     - openshift_upgrade_nodes_drain_timeout | default(0) == '0'
 
 # Alright, let's clean up!

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml

@@ -16,7 +16,7 @@
     command: >
       {{ openshift.common.client_binary }} adm migrate authorization
     when:
-    - openshift_currently_installed_version | version_compare('3.7','<')
+    - openshift_currently_installed_version is version_compare('3.7','<')
     - openshift_upgrade_pre_authorization_migration_enabled | default(true) | bool
     changed_when: false
     register: l_oc_result

+ 3 - 3
playbooks/init/facts.yml

@@ -69,7 +69,7 @@
     - name: assert atomic host docker version is 1.12 or later
       assert:
         that:
-        - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=')
+        - l_atomic_docker_version.stdout | replace('"', '') is version_compare('1.12','>=')
         msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host.
 
   - when:
@@ -85,7 +85,7 @@
       - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}"
       - yum-utils
       register: result
-      until: result | success
+      until: result is succeeded
 
     - name: Ensure various deps for running system containers are installed
       package:
@@ -103,7 +103,7 @@
         or (openshift_use_node_system_container | default(False)) | bool
         or (openshift_use_master_system_container | default(False)) | bool
       register: result
-      until: result | success
+      until: result is succeeded
 
   - name: Gather Cluster facts and set is_containerized if needed
     openshift_facts:

+ 1 - 1
playbooks/openshift-etcd/private/upgrade_image_members.yml

@@ -13,5 +13,5 @@
       r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
       etcd_peer: "{{ openshift.common.hostname }}"
     when:
-    - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<')
+    - etcd_container_version | default('99') is version_compare(etcd_upgrade_version,'<')
     - openshift.common.is_containerized | bool

+ 1 - 1
playbooks/openshift-etcd/private/upgrade_rpm_members.yml

@@ -13,6 +13,6 @@
       r_etcd_upgrade_version: "{{ etcd_upgrade_version }}"
       etcd_peer: "{{ openshift.common.hostname }}"
     when:
-    - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<')
+    - etcd_rpm_version.stdout | default('99') is version_compare(etcd_upgrade_version, '<')
     - ansible_distribution == 'RedHat'
     - not openshift.common.is_containerized | bool

+ 1 - 1
playbooks/openshift-node/private/network_manager.yml

@@ -8,7 +8,7 @@
       name: 'NetworkManager'
       state: present
     register: result
-    until: result | success
+    until: result is succeeded
 
   - name: configure NetworkManager
     lineinfile:

+ 1 - 1
playbooks/openshift-node/private/restart.yml

@@ -12,7 +12,7 @@
       name: docker
       state: restarted
     register: l_docker_restart_docker_in_node_result
-    until: not l_docker_restart_docker_in_node_result | failed
+    until: not (l_docker_restart_docker_in_node_result is failed)
     retries: 3
     delay: 30
 

+ 1 - 1
roles/calico/handlers/main.yml

@@ -9,6 +9,6 @@
     name: "{{ openshift_docker_service_name }}"
     state: restarted
   register: l_docker_restart_docker_in_calico_result
-  until: not l_docker_restart_docker_in_calico_result | failed
+  until: not (l_docker_restart_docker_in_calico_result is failed)
   retries: 3
   delay: 30

+ 1 - 1
roles/cockpit/tasks/main.yml

@@ -12,7 +12,7 @@
     - "{{ cockpit_plugins }}"
   when: not openshift.common.is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Enable cockpit-ws
   systemd:

+ 1 - 1
roles/container_runtime/handlers/main.yml

@@ -6,7 +6,7 @@
     state: restarted
     daemon_reload: yes
   register: r_docker_restart_docker_result
-  until: not r_docker_restart_docker_result | failed
+  until: not (r_docker_restart_docker_result is failed)
   retries: 3
   delay: 30
   when: not docker_service_status_changed | default(false) | bool

+ 3 - 3
roles/container_runtime/tasks/common/syscontainer_packages.yml

@@ -6,7 +6,7 @@
     state: present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 # Used to pull and install the system container
 - name: Ensure atomic is installed
@@ -15,7 +15,7 @@
     state: present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 # At the time of writing the atomic command requires runc for it's own use. This
 # task is here in the even that the atomic package ever removes the dependency.
@@ -25,4 +25,4 @@
     state: present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded

+ 19 - 4
roles/container_runtime/tasks/docker_sanity.yml

@@ -5,23 +5,38 @@
 - name: Error out if Docker pre-installed but too old
   fail:
     msg: "Docker {{ curr_docker_version.stdout }} is installed, but >= 1.9.1 is required."
-  when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.9.1', '<') and not docker_version is defined
+  when:
+  - not (curr_docker_version is skipped)
+  - curr_docker_version.stdout != ''
+  - curr_docker_version.stdout is version_compare('1.9.1', '<')
+  - not (docker_version is defined)
 
 - name: Error out if requested Docker is too old
   fail:
     msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
-  when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
+  when:
+  - docker_version is defined
+  - docker_version is version_compare('1.9.1', '<')
 
 # If a docker_version was requested, sanity check that we can install or upgrade to it, and
 # no downgrade is required.
 - name: Fail if Docker version requested but downgrade is required
   fail:
     msg: "Docker {{ curr_docker_version.stdout }} is installed, but version {{ docker_version }} was requested."
-  when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and docker_version is defined and curr_docker_version.stdout | version_compare(docker_version, '>')
+  when:
+  - not (curr_docker_version is skipped)
+  - curr_docker_version.stdout != ''
+  - docker_version is defined
+  - curr_docker_version.stdout is version_compare(docker_version, '>')
 
 # This involves an extremely slow migration process, users should instead run the
 # Docker 1.10 upgrade playbook to accomplish this.
 - name: Error out if attempting to upgrade Docker across the 1.10 boundary
   fail:
     msg: "Cannot upgrade Docker to >= 1.10, please upgrade or remove Docker manually, or use the Docker upgrade playbook if OpenShift is already installed."
-  when: not curr_docker_version | skipped and curr_docker_version.stdout != '' and curr_docker_version.stdout | version_compare('1.10', '<') and docker_version is defined and docker_version | version_compare('1.10', '>=')
+  when:
+  - not (curr_docker_version is skipped)
+  - curr_docker_version.stdout != ''
+  - curr_docker_version.stdout is version_compare('1.10', '<')
+  - docker_version is defined
+  - docker_version is version_compare('1.10', '>=')

+ 12 - 5
roles/container_runtime/tasks/docker_upgrade_check.yml

@@ -19,7 +19,7 @@
   command: "{{ repoquery_installed }} --qf '%{version}' docker"
   register: curr_docker_version
   retries: 4
-  until: curr_docker_version | succeeded
+  until: curr_docker_version is succeeded
   changed_when: false
 
 - name: Get latest available version of Docker
@@ -27,7 +27,7 @@
     {{ repoquery_cmd }} --qf '%{version}' "docker"
   register: avail_docker_version
   retries: 4
-  until: avail_docker_version | succeeded
+  until: avail_docker_version is succeeded
   # Don't expect docker rpm to be available on hosts that don't already have it installed:
   when: pkg_check.rc == 0
   failed_when: false
@@ -36,7 +36,10 @@
 - fail:
     msg: This playbook requires access to Docker 1.12 or later
   # Disable the 1.12 requirement if the user set a specific Docker version
-  when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
+  when:
+  - docker_version is not defined
+  - docker_upgrade is not defined or docker_upgrade | bool == True
+  - (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout is version_compare('1.12','<')))
 
 # Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
 - set_fact:
@@ -50,7 +53,9 @@
 - name: Flag for Docker upgrade if necessary
   set_fact:
     l_docker_upgrade: True
-  when: pkg_check.rc == 0 and curr_docker_version.stdout | version_compare(docker_version,'<')
+  when:
+  - pkg_check.rc == 0
+  - curr_docker_version.stdout is version_compare(docker_version,'<')
 
 # Additional checks for Atomic hosts:
 - name: Determine available Docker
@@ -64,4 +69,6 @@
 
 - fail:
     msg: This playbook requires access to Docker 1.12 or later
-  when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
+  when:
+  - openshift.common.is_atomic | bool
+  - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<')

+ 8 - 5
roles/container_runtime/tasks/package_docker.yml

@@ -6,7 +6,7 @@
   when: not openshift.common.is_atomic | bool
   register: curr_docker_version
   retries: 4
-  until: curr_docker_version | succeeded
+  until: curr_docker_version is succeeded
   changed_when: false
 
 # Some basic checks to ensure the role will complete
@@ -19,9 +19,12 @@
   package:
     name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"
     state: present
-  when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
+  when:
+  - not (openshift.common.is_atomic | bool)
+  - not (curr_docker_version is skipped)
+  - not (curr_docker_version.stdout != '')
   register: result
-  until: result | success
+  until: result is succeeded
 
 - block:
   # Extend the default Docker service unit file when using iptables-services
@@ -137,11 +140,11 @@
     state: started
     daemon_reload: yes
   register: r_docker_package_docker_start_result
-  until: not r_docker_package_docker_start_result | failed
+  until: not (r_docker_package_docker_start_result is failed)
   retries: 3
   delay: 30
 
 - set_fact:
-    docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
+    docker_service_status_changed: "{{ (r_docker_package_docker_start_result is changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
 
 - include_tasks: common/post.yml

+ 4 - 4
roles/container_runtime/tasks/systemcontainer_docker.yml

@@ -20,7 +20,7 @@
   package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 # Make sure docker is disabled. Errors are ignored.
 - name: Disable Docker
@@ -31,7 +31,7 @@
     daemon_reload: yes
   ignore_errors: True
   register: r_docker_systemcontainer_docker_stop_result
-  until: not r_docker_systemcontainer_docker_stop_result | failed
+  until: not (r_docker_systemcontainer_docker_stop_result is failed)
   retries: 3
   delay: 30
 
@@ -87,12 +87,12 @@
     state: started
     daemon_reload: yes
   register: r_docker_systemcontainer_docker_start_result
-  until: not r_docker_systemcontainer_docker_start_result | failed
+  until: not (r_docker_systemcontainer_docker_start_result is failed)
   retries: 3
   delay: 30
 
 - set_fact:
-    docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
+    docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result is changed }}"
 
 # Since docker is running as a system container, docker login will fail to create
 # credentials.  Use alternate method if requiring authenticated registries.

+ 1 - 1
roles/contiv/tasks/download_bins.yml

@@ -9,7 +9,7 @@
     name: bzip2
     state: installed
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Download Bins | Download Contiv tar file
   get_url:

+ 3 - 3
roles/contiv/tasks/netplugin.yml

@@ -101,15 +101,15 @@
 
 - name: systemd reload
   command: systemctl daemon-reload
-  when: docker_updated|changed
+  when: docker_updated is changed
 
 - name: Docker | Restart docker
   service:
     name: "{{ openshift_docker_service_name }}"
     state: restarted
-  when: docker_updated|changed
+  when: docker_updated is changed
   register: l_docker_restart_docker_in_contiv_result
-  until: not l_docker_restart_docker_in_contiv_result | failed
+  until: not (l_docker_restart_docker_in_contiv_result is failed)
   retries: 3
   delay: 30
 

+ 3 - 3
roles/contiv/tasks/pkgMgrInstallers/centos-install.yml

@@ -4,7 +4,7 @@
     pkg=net-tools
     state=latest
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: PkgMgr RHEL/CentOS | Get openstack ocata rpm
   get_url:
@@ -23,7 +23,7 @@
   tags:
     - ovs_install
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: PkgMgr RHEL/CentOS | Install ovs
   yum:
@@ -36,4 +36,4 @@
   tags:
     - ovs_install
   register: result
-  until: result | success
+  until: result is succeeded

+ 1 - 1
roles/contiv_facts/tasks/fedora-install.yml

@@ -4,7 +4,7 @@
     name: dnf
     state: installed
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Update repo cache
   command: dnf update -y

+ 1 - 1
roles/etcd/tasks/auxiliary/drop_etcdctl.yml

@@ -3,7 +3,7 @@
   package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Configure etcd profile.d aliases
   template:

+ 1 - 1
roles/etcd/tasks/backup/backup.yml

@@ -44,7 +44,7 @@
   - r_etcd_common_embedded_etcd | bool
   - not l_ostree_booted.stat.exists | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Check selinux label of '{{ etcd_data_dir }}'
   command: >

+ 1 - 1
roles/etcd/tasks/certificates/deploy_ca.yml

@@ -7,7 +7,7 @@
   delegate_to: "{{ etcd_ca_host }}"
   run_once: true
   register: result
-  until: result | success
+  until: result is succeeded
 
 - file:
     path: "{{ item }}"

+ 1 - 1
roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml

@@ -5,7 +5,7 @@
     state: present
   when: not etcd_is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Check status of etcd certificates
   stat:

+ 5 - 3
roles/etcd/tasks/main.yml

@@ -13,7 +13,7 @@
   package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
   when: not etcd_is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - include_tasks: drop_etcdctl.yml
   when:
@@ -93,7 +93,9 @@
       daemon_reload: yes
     when: not l_is_etcd_system_container | bool
     register: task_result
-    failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+    failed_when:
+    - task_result is failed
+    - ('could not' not in task_result.msg|lower)
 
   - name: Install etcd container service file
     template:
@@ -131,4 +133,4 @@
 
 - name: Set fact etcd_service_status_changed
   set_fact:
-    etcd_service_status_changed: "{{ start_result | changed }}"
+    etcd_service_status_changed: "{{ start_result is changed }}"

+ 6 - 2
roles/etcd/tasks/system_container.yml

@@ -29,7 +29,9 @@
     masked: no
     daemon_reload: yes
   register: task_result
-  failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+  failed_when:
+    - task_result is failed
+    - ('could not' not in task_result.msg|lower)
   when: "'etcd' not in etcd_result.stdout"
 
 - name: Disable etcd_container
@@ -39,7 +41,9 @@
     enabled: no
     daemon_reload: yes
   register: task_result
-  failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+  failed_when:
+    - task_result is failed
+    - ('could not' not in task_result.msg|lower)
 
 - name: Remove etcd_container.service
   file:

+ 1 - 1
roles/etcd/tasks/upgrade/upgrade_image.yml

@@ -45,7 +45,7 @@
     state: latest
   when: not l_ostree_booted.stat.exists | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Verify cluster is healthy
   command: "{{ etcdctlv2 }} cluster-health"

+ 1 - 1
roles/etcd/tasks/upgrade/upgrade_rpm.yml

@@ -19,7 +19,7 @@
     name: "{{ l_etcd_target_package }}"
     state: latest
   register: result
-  until: result | success
+  until: result is succeeded
 
 - lineinfile:
     destfile: "{{ etcd_conf_file }}"

+ 2 - 2
roles/flannel/handlers/main.yml

@@ -9,7 +9,7 @@
     name: "{{ openshift_docker_service_name }}"
     state: restarted
   register: l_docker_restart_docker_in_flannel_result
-  until: not l_docker_restart_docker_in_flannel_result | failed
+  until: not (l_docker_restart_docker_in_flannel_result is failed)
   retries: 3
   delay: 30
 
@@ -18,6 +18,6 @@
     name: "{{ openshift_service_type }}-node"
     state: restarted
   register: l_restart_node_result
-  until: not l_restart_node_result | failed
+  until: not (l_restart_node_result is failed)
   retries: 3
   delay: 30

+ 1 - 1
roles/flannel/tasks/main.yml

@@ -4,7 +4,7 @@
   package: name=flannel state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Set flannel etcd options
   become: yes

+ 1 - 1
roles/nickhammond.logrotate/tasks/main.yml

@@ -3,7 +3,7 @@
   package: name=logrotate state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: nickhammond.logrotate | Setup logrotate.d scripts
   template:

+ 1 - 1
roles/nuage_ca/tasks/main.yaml

@@ -3,7 +3,7 @@
   package: name=openssl state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Create CA directory
   file: path="{{ nuage_ca_dir }}" state=directory

+ 2 - 2
roles/openshift_ca/tasks/main.yml

@@ -13,13 +13,13 @@
     state: present
   when: not openshift.common.is_containerized | bool
   register: install_result
-  until: install_result | success
+  until: install_result is succeeded
   delegate_to: "{{ openshift_ca_host }}"
   run_once: true
 
 - name: Reload generated facts
   openshift_facts:
-  when: hostvars[openshift_ca_host].install_result | changed
+  when: hostvars[openshift_ca_host].install_result is changed
 
 - name: Create openshift_ca_config_dir if it does not exist
   file:

+ 2 - 2
roles/openshift_cli/tasks/main.yml

@@ -3,7 +3,7 @@
   package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present
   when: not openshift.common.is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - block:
   - name: Pull CLI Image
@@ -44,4 +44,4 @@
   package: name=bash-completion state=present
   when: not openshift.common.is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded

+ 1 - 1
roles/openshift_clock/tasks/main.yaml

@@ -10,7 +10,7 @@
     - openshift_clock_enabled | bool
     - chrony_installed.rc != 0
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Start and enable ntpd/chronyd
   command: timedatectl set-ntp true

+ 4 - 4
roles/openshift_excluder/tasks/install.yml

@@ -14,7 +14,7 @@
     - r_openshift_excluder_enable_docker_excluder | bool
     - ansible_pkg_mgr == "yum"
     register: result
-    until: result | success
+    until: result is succeeded
 
 
   # For DNF we do not need the "*" and if we add it, it causes an error because
@@ -29,7 +29,7 @@
     - r_openshift_excluder_enable_docker_excluder | bool
     - ansible_pkg_mgr == "dnf"
     register: result
-    until: result | success
+    until: result is succeeded
 
   - name: Install openshift excluder - yum
     package:
@@ -39,7 +39,7 @@
     - r_openshift_excluder_enable_openshift_excluder | bool
     - ansible_pkg_mgr == "yum"
     register: result
-    until: result | success
+    until: result is succeeded
 
   # For DNF we do not need the "*" and if we add it, it causes an error because
   # it's not a valid pkg_spec
@@ -53,7 +53,7 @@
     - r_openshift_excluder_enable_openshift_excluder | bool
     - ansible_pkg_mgr == "dnf"
     register: result
-    until: result | success
+    until: result is succeeded
 
   - set_fact:
       r_openshift_excluder_install_ran: True

+ 1 - 1
roles/openshift_excluder/tasks/verify_excluder.yml

@@ -29,4 +29,4 @@
     msg: "Available {{ excluder }} version {{ excluder_version }} is higher than the upgrade target version"
   when:
   - excluder_version != ''
-  - excluder_version.split('.')[0:2] | join('.') | version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True)
+  - excluder_version.split('.')[0:2] | join('.') is version_compare(r_openshift_excluder_upgrade_target.split('.')[0:2] | join('.'), '>', strict=True)

+ 1 - 1
roles/openshift_expand_partition/tasks/main.yml

@@ -3,7 +3,7 @@
   package: name=cloud-utils-growpart state=present
   when: not openshift.common.is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Determine if growpart is installed
   command: "rpm -q cloud-utils-growpart"

+ 2 - 2
roles/openshift_loadbalancer/tasks/main.yml

@@ -6,7 +6,7 @@
   package: name=haproxy state=present
   when: not openshift.common.is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Pull haproxy image
   command: >
@@ -70,4 +70,4 @@
   register: start_result
 
 - set_fact:
-    haproxy_start_result_changed: "{{ start_result | changed }}"
+    haproxy_start_result_changed: "{{ start_result is changed }}"

+ 1 - 1
roles/openshift_manage_node/tasks/main.yml

@@ -41,7 +41,7 @@
   retries: 10
   delay: 5
   register: node_schedulable
-  until: node_schedulable|succeeded
+  until: node_schedulable is succeeded
   when: "'nodename' in openshift.node"
   delegate_to: "{{ openshift_master_host }}"
 

+ 1 - 1
roles/openshift_master/tasks/journald.yml

@@ -26,4 +26,4 @@
   delay: 5
   register: result
   until: result.rc == 0
-  when: journald_update | changed
+  when: journald_update is changed

+ 12 - 12
roles/openshift_master/tasks/main.yml

@@ -21,7 +21,7 @@
   when:
   - not openshift.common.is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Create r_openshift_master_data_dir
   file:
@@ -72,7 +72,7 @@
   - not openshift.common.is_atomic | bool
   with_items: "{{ openshift.master.identity_providers }}"
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Ensure htpasswd directory exists
   file:
@@ -147,7 +147,7 @@
     register: l_already_set
 
   - set_fact:
-      openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+      openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
 
 - name: Set fact of all etcd host IPs
   openshift_facts:
@@ -209,17 +209,17 @@
   when:
   - inventory_hostname == openshift_master_hosts[0]
   register: l_start_result
-  until: not l_start_result | failed
+  until: not (l_start_result is failed)
   retries: 1
   delay: 60
 
 - name: Dump logs from master-api if it failed
   command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
   when:
-  - l_start_result | failed
+  - l_start_result is failed
 
 - set_fact:
-    master_api_service_status_changed: "{{ l_start_result | changed }}"
+    master_api_service_status_changed: "{{ l_start_result is changed }}"
   when:
   - inventory_hostname == openshift_master_hosts[0]
 
@@ -236,17 +236,17 @@
   when:
   - inventory_hostname != openshift_master_hosts[0]
   register: l_start_result
-  until: not l_start_result | failed
+  until: not (l_start_result is failed)
   retries: 1
   delay: 60
 
 - name: Dump logs from master-api if it failed
   command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-api
   when:
-  - l_start_result | failed
+  - l_start_result is failed
 
 - set_fact:
-    master_api_service_status_changed: "{{ l_start_result | changed }}"
+    master_api_service_status_changed: "{{ l_start_result is changed }}"
   when:
   - inventory_hostname != openshift_master_hosts[0]
 
@@ -262,18 +262,18 @@
     enabled: yes
     state: started
   register: l_start_result
-  until: not l_start_result | failed
+  until: not (l_start_result is failed)
   retries: 1
   delay: 60
 
 - name: Dump logs from master-controllers if it failed
   command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-master-controllers
   when:
-  - l_start_result | failed
+  - l_start_result is failed
 
 - name: Set fact master_controllers_service_status_changed
   set_fact:
-    master_controllers_service_status_changed: "{{ l_start_result | changed }}"
+    master_controllers_service_status_changed: "{{ l_start_result is changed }}"
 
 - name: node bootstrap settings
   include_tasks: bootstrap.yml

+ 2 - 2
roles/openshift_master/tasks/set_loopback_context.yml

@@ -23,12 +23,12 @@
     {{ openshift.master.loopback_context_name }}
     --config={{ openshift_master_loopback_config }}
   when:
-  - set_loopback_cluster | changed
+  - set_loopback_cluster is changed
   register: l_set_loopback_context
 
 - command: >
     {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
     --config={{ openshift_master_loopback_config }}
   when:
-  - l_set_loopback_context | changed
+  - l_set_loopback_context is changed
   register: set_current_context

+ 1 - 1
roles/openshift_master/tasks/systemd_units.yml

@@ -50,7 +50,7 @@
 
 - command: systemctl daemon-reload
   when:
-  - l_create_ha_unit_files | changed
+  - l_create_ha_unit_files is changed
 # end workaround for missing systemd unit files
 
 - name: enable master services

+ 1 - 1
roles/openshift_master/tasks/upgrade/rpm_upgrade.yml

@@ -19,4 +19,4 @@
       - "{{ openshift_service_type }}-clients{{ openshift_pkg_version }}"
       - "tuned-profiles-{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
   register: result
-  until: result | success
+  until: result is succeeded

+ 3 - 3
roles/openshift_metrics/tasks/install_hawkular.yaml

@@ -23,15 +23,15 @@
 
 - block:
   - set_fact: hawkular_key={{ lookup('file', openshift_metrics_hawkular_key) }}
-    when: openshift_metrics_hawkular_key | exists
+    when: openshift_metrics_hawkular_key is exists
     changed_when: false
 
   - set_fact: hawkular_cert={{ lookup('file', openshift_metrics_hawkular_cert) }}
-    when: openshift_metrics_hawkular_cert | exists
+    when: openshift_metrics_hawkular_cert is exists
     changed_when: false
 
   - set_fact: hawkular_ca={{ lookup('file', openshift_metrics_hawkular_ca) }}
-    when: openshift_metrics_hawkular_ca | exists
+    when: openshift_metrics_hawkular_ca is exists
     changed_when: false
 
   - name: generate the hawkular-metrics route

+ 1 - 1
roles/openshift_nfs/tasks/create_export.yml

@@ -31,4 +31,4 @@
 - name: Re-export NFS filesystems
   command: exportfs -ar
   when:
-    - created_export | changed
+    - created_export is changed

+ 2 - 2
roles/openshift_nfs/tasks/setup.yml

@@ -5,7 +5,7 @@
 - name: Install nfs-utils
   package: name=nfs-utils state=present
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Configure NFS
   lineinfile:
@@ -16,7 +16,7 @@
 
 - name: Restart nfs-config
   systemd: name=nfs-config state=restarted
-  when: nfs_config | changed
+  when: nfs_config is changed
 
 - name: Ensure exports directory exists
   file:

+ 2 - 2
roles/openshift_node/handlers/main.yml

@@ -24,7 +24,7 @@
   - openshift_node_use_openshift_sdn | bool
   - not openshift_node_bootstrap
   register: l_openshift_node_stop_openvswitch_result
-  until: not l_openshift_node_stop_openvswitch_result | failed
+  until: not (l_openshift_node_stop_openvswitch_result is failed)
   retries: 3
   delay: 30
   notify:
@@ -41,7 +41,7 @@
     name: "{{ openshift_service_type }}-node"
     state: restarted
   register: l_openshift_node_restart_node_result
-  until: not l_openshift_node_restart_node_result | failed
+  until: not (l_openshift_node_restart_node_result is failed)
   retries: 3
   delay: 30
   when:

+ 1 - 1
roles/openshift_node/tasks/bootstrap.yml

@@ -5,7 +5,7 @@
     state: present
   with_items: "{{ r_openshift_node_image_prep_packages }}"
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: create the directory for node
   file:

+ 6 - 6
roles/openshift_node/tasks/config.yml

@@ -16,12 +16,12 @@
     - openshift.common.is_containerized | bool
     - openshift_node_use_openshift_sdn | default(true) | bool
   register: ovs_start_result
-  until: not ovs_start_result | failed
+  until: not (ovs_start_result is failed)
   retries: 3
   delay: 30
 
 - set_fact:
-    ovs_service_status_changed: "{{ ovs_start_result | changed }}"
+    ovs_service_status_changed: "{{ ovs_start_result is changed }}"
 
 - file:
     dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}"
@@ -93,19 +93,19 @@
         state: started
         daemon_reload: yes
       register: node_start_result
-      until: not node_start_result | failed
+      until: not node_start_result is failed
       retries: 1
       delay: 30
       ignore_errors: true
 
     - name: Dump logs from node service if it failed
       command: journalctl --no-pager -n 100 -u {{ openshift_service_type }}-node
-      when: node_start_result | failed
+      when: node_start_result is failed
 
     - name: Abort if node failed to start
       fail:
         msg: Node failed to start please inspect the logs and try again
-      when: node_start_result | failed
+      when: node_start_result is failed
 
     - set_fact:
-        node_service_status_changed: "{{ node_start_result | changed }}"
+        node_service_status_changed: "{{ node_start_result is changed }}"

+ 1 - 1
roles/openshift_node/tasks/dnsmasq/no-network-manager.yml

@@ -8,6 +8,6 @@
     state: present
   notify: restart NetworkManager
   register: result
-  until: result | success
+  until: result is succeeded
 
 - include_tasks: network-manager.yml

+ 1 - 1
roles/openshift_node/tasks/dnsmasq_install.yml

@@ -14,7 +14,7 @@
   package: name=dnsmasq state=installed
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: ensure origin/node directory exists
   file:

+ 3 - 3
roles/openshift_node/tasks/install.yml

@@ -6,7 +6,7 @@
       name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
       state: present
     register: result
-    until: result | success
+    until: result is succeeded
 
   - name: Install sdn-ovs package
     package:
@@ -15,14 +15,14 @@
     when:
     - openshift_node_use_openshift_sdn | bool
     register: result
-    until: result | success
+    until: result is succeeded
 
   - name: Install conntrack-tools package
     package:
       name: "conntrack-tools"
       state: present
     register: result
-    until: result | success
+    until: result is succeeded
 
 - when:
   - openshift.common.is_containerized | bool

+ 3 - 1
roles/openshift_node/tasks/main.yml

@@ -52,7 +52,9 @@
     state: restarted
   when: openshift_use_crio
   register: task_result
-  failed_when: task_result|failed and 'could not find the requested service' not in task_result.msg|lower
+  failed_when:
+    - task_result is failed
+    - ('could not find the requested service' not in task_result.msg|lower)
 
 - name: restart NetworkManager to ensure resolv.conf is present
   systemd:

+ 1 - 1
roles/openshift_node/tasks/storage_plugins/ceph.yml

@@ -3,4 +3,4 @@
   package: name=ceph-common state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded

+ 3 - 3
roles/openshift_node/tasks/storage_plugins/glusterfs.yml

@@ -3,7 +3,7 @@
   package: name=glusterfs-fuse state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Check for existence of fusefs sebooleans
   command: getsebool {{ item }}
@@ -31,7 +31,7 @@
   # since getsebool prints the resolved name.  (At some point Ansible's seboolean module
   # should learn to deal with aliases)
   - item.item in item.stdout  # Boolean does not have an alias.
-  - ansible_python_version | version_compare('3', '<')
+  - ansible_python_version is version_compare('3', '<')
   with_items: "{{ fusefs_getsebool_status.results }}"
 
 # Workaround for https://github.com/openshift/openshift-ansible/issues/4438
@@ -52,5 +52,5 @@
   # should learn to deal with aliases)
   - item.item in item.stdout  # Boolean does not have an alias.
   - ('--> off' in item.stdout)  # Boolean is currently off.
-  - ansible_python_version | version_compare('3', '>=')
+  - ansible_python_version is version_compare('3', '>=')
   with_items: "{{ fusefs_getsebool_status.results }}"

+ 1 - 1
roles/openshift_node/tasks/storage_plugins/iscsi.yml

@@ -3,4 +3,4 @@
   package: name=iscsi-initiator-utils state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded

+ 3 - 3
roles/openshift_node/tasks/storage_plugins/nfs.yml

@@ -3,7 +3,7 @@
   package: name=nfs-utils state=present
   when: not openshift.common.is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Check for existence of nfs sebooleans
   command: getsebool {{ item }}
@@ -31,7 +31,7 @@
   # since getsebool prints the resolved name.  (At some point Ansible's seboolean module
   # should learn to deal with aliases)
   - item.item in item.stdout  # Boolean does not have an alias.
-  - ansible_python_version | version_compare('3', '<')
+  - ansible_python_version is version_compare('3', '<')
   with_items: "{{ nfs_getsebool_status.results }}"
 
 # Workaround for https://github.com/openshift/openshift-ansible/issues/4438
@@ -52,5 +52,5 @@
   # should learn to deal with aliases)
   - item.item in item.stdout  # Boolean does not have an alias.
   - ('--> off' in item.stdout)  # Boolean is currently off.
-  - ansible_python_version | version_compare('3', '>=')
+  - ansible_python_version is version_compare('3', '>=')
   with_items: "{{ nfs_getsebool_status.results }}"

+ 1 - 1
roles/openshift_node/tasks/upgrade.yml

@@ -17,7 +17,7 @@
 - name: download docker upgrade rpm
   command: "{{ ansible_pkg_mgr }} install -C -y docker{{ '-' + docker_version }}"
   register: result
-  until: result | success
+  until: result is succeeded
   when:
   - l_docker_upgrade is defined
   - l_docker_upgrade | bool

+ 1 - 1
roles/openshift_node/tasks/upgrade/config_changes.yml

@@ -74,4 +74,4 @@
 #       require a service to be part of the call.
 - name: Reload systemd units
   command: systemctl daemon-reload
-  when: l_node_unit | changed
+  when: l_node_unit is changed

+ 1 - 1
roles/openshift_node/tasks/upgrade/restart.yml

@@ -27,7 +27,7 @@
     name: "{{ openshift_docker_service_name }}"
     state: started
   register: docker_start_result
-  until: not docker_start_result | failed
+  until: not (docker_start_result is failed)
   retries: 3
   delay: 30
 

+ 2 - 2
roles/openshift_node/tasks/upgrade/rpm_upgrade.yml

@@ -9,7 +9,7 @@
 - name: download new node packages
   command: "{{ ansible_pkg_mgr }} install -y --downloadonly {{ openshift_node_upgrade_rpm_list | join(' ')}}"
   register: result
-  until: result | success
+  until: result is succeeded
   vars:
     openshift_node_upgrade_rpm_list:
       - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"
@@ -21,4 +21,4 @@
 - name: download openvswitch upgrade rpm
   command: "{{ ansible_pkg_mgr }} update -y --downloadonly openvswitch"
   register: result
-  until: result | success
+  until: result is succeeded

+ 1 - 1
roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml

@@ -11,7 +11,7 @@
 - name: download new node packages
   command: "{{ ansible_pkg_mgr }} install -C -y {{ openshift_node_upgrade_rpm_list | join(' ')}}"
   register: result
-  until: result | success
+  until: result is succeeded
   vars:
     openshift_node_upgrade_rpm_list:
       - "{{ openshift_service_type }}-node{{ openshift_pkg_version }}"

+ 1 - 1
roles/openshift_node/tasks/upgrade/stop_services.yml

@@ -25,7 +25,7 @@
     name: docker
     state: stopped
   register: l_openshift_node_upgrade_docker_stop_result
-  until: not l_openshift_node_upgrade_docker_stop_result | failed
+  until: not (l_openshift_node_upgrade_docker_stop_result is failed)
   retries: 3
   delay: 30
   when:

+ 2 - 2
roles/openshift_node/tasks/upgrade_pre.yml

@@ -10,7 +10,7 @@
 - name: update package meta data to speed install later.
   command: "{{ ansible_pkg_mgr }} makecache"
   register: result
-  until: result | success
+  until: result is succeeded
   when: not openshift.common.is_containerized | bool
 
 - name: Check Docker image count
@@ -32,7 +32,7 @@
 - name: download docker upgrade rpm
   command: "{{ ansible_pkg_mgr }} install -y --downloadonly docker{{ '-' + docker_version }}"
   register: result
-  until: result | success
+  until: result is succeeded
   when:
   - l_docker_upgrade is defined
   - l_docker_upgrade | bool

+ 1 - 1
roles/openshift_node_certificates/handlers/main.yml

@@ -22,6 +22,6 @@
     state: restarted
   when: not openshift_certificates_redeploy | default(false) | bool
   register: l_docker_restart_docker_in_cert_result
-  until: not l_docker_restart_docker_in_cert_result | failed
+  until: not (l_docker_restart_docker_in_cert_result is failed)
   retries: 3
   delay: 30

+ 2 - 2
roles/openshift_openstack/tasks/container-storage-setup.yml

@@ -8,7 +8,7 @@
         group: root
         mode: 0644
   when:
-    - ansible_distribution_version | version_compare('7.4', '>=')
+    - ansible_distribution_version is version_compare('7.4', '>=')
     - ansible_distribution == "RedHat"
 
 - block:
@@ -20,7 +20,7 @@
         group: root
         mode: 0644
   when:
-    - ansible_distribution_version | version_compare('7.4', '<')
+    - ansible_distribution_version is version_compare('7.4', '<')
     - ansible_distribution == "RedHat"
 
 - block:

+ 2 - 2
roles/openshift_openstack/tasks/node-packages.yml

@@ -7,7 +7,7 @@
     state: latest
   with_items: "{{ openshift_openstack_required_packages }}"
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Install debug packages (optional)
   yum:
@@ -16,4 +16,4 @@
   with_items: "{{ openshift_openstack_debug_packages }}"
   when: openshift_openstack_install_debug_packages|bool
   register: result
-  until: result | success
+  until: result is succeeded

+ 1 - 1
roles/openshift_openstack/tasks/populate-dns.yml

@@ -116,6 +116,6 @@
     - "{{ openshift_openstack_dns_records_add | default([]) }}"
     - entries
   register: nsupdate_add_result
-  until: nsupdate_add_result|succeeded
+  until: nsupdate_add_result is succeeded
   retries: 10
   delay: 1

+ 1 - 1
roles/openshift_repos/tasks/main.yaml

@@ -10,7 +10,7 @@
   - name: Ensure libselinux-python is installed
     package: name=libselinux-python state=present
     register: result
-    until: result | success
+    until: result is succeeded
 
   - name: Remove openshift_additional.repo file
     file:

+ 1 - 1
roles/openshift_sanitize_inventory/tasks/main.yml

@@ -47,7 +47,7 @@
 - name: Abort when openshift_release is invalid
   when:
     - openshift_release is defined
-    - not openshift_release | match('^\d+(\.\d+){1,3}$')
+    - not (openshift_release is match('^\d+(\.\d+){1,3}$'))
   fail:
     msg: |-
       openshift_release is "{{ openshift_release }}" which is not a valid version string.

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml

@@ -5,7 +5,7 @@
   - not openshift.common.is_atomic | bool
   - not glusterfs_heketi_is_native | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Verify heketi-cli is installed
   shell: "command -v {{ glusterfs_heketi_cli }} >/dev/null 2>&1 || { echo >&2 'ERROR: Make sure heketi-cli is available, then re-run the installer'; exit 1; }"

+ 1 - 1
roles/openshift_storage_glusterfs/tasks/kernel_modules.yml

@@ -9,4 +9,4 @@
   systemd:
     name: systemd-modules-load.service
     state: restarted
-  when: km | changed
+  when: km is changed

+ 3 - 3
roles/openshift_storage_nfs/tasks/main.yml

@@ -5,7 +5,7 @@
 - name: Install nfs-utils
   package: name=nfs-utils state=present
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Configure NFS
   lineinfile:
@@ -16,7 +16,7 @@
 
 - name: Restart nfs-config
   systemd: name=nfs-config state=restarted
-  when: nfs_config | changed
+  when: nfs_config is changed
 
 - name: Ensure exports directory exists
   file:
@@ -70,4 +70,4 @@
   register: start_result
 
 - set_fact:
-    nfs_service_status_changed: "{{ start_result | changed }}"
+    nfs_service_status_changed: "{{ start_result is changed }}"

+ 1 - 1
roles/openshift_storage_nfs_lvm/tasks/nfs.yml

@@ -3,7 +3,7 @@
   package: name=nfs-utils state=present
   when: not openshift.common.is_containerized | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Start rpcbind
   systemd:

+ 2 - 2
roles/openshift_version/tasks/main.yml

@@ -49,7 +49,7 @@
     when: openshift.common.deployment_type == 'origin'
     assert:
       that:
-      - "{{ openshift_image_tag|match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
+      - "{{ openshift_image_tag is match('(^v?\\d+\\.\\d+\\.\\d+(-[\\w\\-\\.]*)?$)') }}"
       msg: |-
         openshift_image_tag must be in the format v#.#.#[-optional.#]. Examples: v1.2.3, v3.5.1-alpha.1
         You specified openshift_image_tag={{ openshift_image_tag }}
@@ -66,7 +66,7 @@
     when: openshift.common.deployment_type == 'openshift-enterprise'
     assert:
       that:
-      - "{{ openshift_image_tag|match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"
+      - "{{ openshift_image_tag is match('(^v\\d+\\.\\d+(\\.\\d+)*(-\\d+(\\.\\d+)*)?$)') }}"
       msg: |-
         openshift_image_tag must be in the format v#.#[.#[.#]]. Examples: v1.2, v3.4.1, v3.5.1.3,
         v3.5.1.3.4, v1.2-1, v1.2.3-4, v1.2.3-4.5, v1.2.3-4.5.6

+ 7 - 5
roles/os_firewall/tasks/firewalld.yml

@@ -9,7 +9,7 @@
     name: firewalld
     state: present
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Ensure iptables services are not enabled
   systemd:
@@ -21,12 +21,14 @@
     - iptables
     - ip6tables
   register: task_result
-  failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+  failed_when:
+    - task_result is failed
+    - ('could not' not in task_result.msg|lower)
 
 - name: Wait 10 seconds after disabling iptables
   pause:
     seconds: 10
-  when: task_result | changed
+  when: task_result is changed
 
 - name: Start and enable firewalld service
   systemd:
@@ -40,13 +42,13 @@
 - name: need to pause here, otherwise the firewalld service starting can sometimes cause ssh to fail
   pause:
     seconds: 10
-  when: result | changed
+  when: result is changed
 
 - name: Restart polkitd
   systemd:
     name: polkit
     state: restarted
-  when: result | changed
+  when: result is changed
 
 # Fix suspected race between firewalld and polkit BZ1436964
 - name: Wait for polkit action to have been created

+ 6 - 4
roles/os_firewall/tasks/iptables.yml

@@ -7,12 +7,14 @@
     enabled: no
     masked: yes
   register: task_result
-  failed_when: task_result|failed and 'could not' not in task_result.msg|lower
+  failed_when:
+    - task_result is failed
+    - ('could not' not in task_result.msg|lower)
 
 - name: Wait 10 seconds after disabling firewalld
   pause:
     seconds: 10
-  when: task_result | changed
+  when: task_result is changed
 
 - name: Install iptables packages
   package:
@@ -23,7 +25,7 @@
     - iptables-services
   when: not r_os_firewall_is_atomic | bool
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Start and enable iptables service
   systemd:
@@ -40,4 +42,4 @@
 - name: need to pause here, otherwise the iptables service starting can sometimes cause ssh to fail
   pause:
     seconds: 10
-  when: result | changed
+  when: result is changed

+ 1 - 1
roles/os_update_latest/tasks/main.yml

@@ -2,4 +2,4 @@
 - name: Update all packages
   package: name=* state=latest
   register: result
-  until: result | success
+  until: result is succeeded

+ 2 - 2
roles/rhel_subscribe/tasks/main.yml

@@ -22,7 +22,7 @@
     name: subscription-manager
     state: present
   register: result
-  until: result | success
+  until: result is succeeded
 
 - name: Is host already registered?
   command: bash -c "subscription-manager version"
@@ -35,7 +35,7 @@
     username: "{{ rhel_subscription_user }}"
     password: "{{ rhel_subscription_pass }}"
   register: rh_subscription
-  until: rh_subscription | succeeded
+  until: rh_subscription is succeeded
   when:
     - "'not registered' in rh_subscribed.stdout"
     - rhel_subscription_user is defined