Browse Source

Don't double quote when conditions

Scott Dodson 8 years ago
parent
commit
717c36fde2
40 changed files with 80 additions and 80 deletions
  1. 2 2
      docs/best_practices_guide.adoc
  2. 1 1
      playbooks/adhoc/create_pv/create_pv.yaml
  3. 1 1
      playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
  4. 1 1
      playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
  5. 1 1
      playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml
  6. 1 1
      playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
  7. 4 4
      playbooks/adhoc/uninstall.yml
  8. 10 10
      playbooks/common/openshift-cluster/evaluate_groups.yml
  9. 1 1
      playbooks/common/openshift-cluster/initialize_openshift_version.yml
  10. 8 8
      playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml
  11. 1 1
      playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
  12. 1 1
      roles/etcd/tasks/main.yml
  13. 1 1
      roles/lib_openshift/src/test/integration/oc_label.yml
  14. 1 1
      roles/lib_openshift/src/test/integration/oc_user.yml
  15. 1 1
      roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
  16. 3 3
      roles/openshift_certificate_expiry/tasks/main.yml
  17. 1 1
      roles/openshift_cloud_provider/tasks/openstack.yml
  18. 1 1
      roles/openshift_expand_partition/tasks/main.yml
  19. 6 6
      roles/openshift_hosted_logging/tasks/deploy_logging.yaml
  20. 1 1
      roles/openshift_hosted_metrics/tasks/install.yml
  21. 3 3
      roles/openshift_logging/tasks/generate_routes.yaml
  22. 2 2
      roles/openshift_logging/tasks/install_elasticsearch.yaml
  23. 2 2
      roles/openshift_logging/tasks/install_fluentd.yaml
  24. 2 2
      roles/openshift_logging/tasks/install_mux.yaml
  25. 1 1
      roles/openshift_logging/tasks/main.yaml
  26. 1 1
      roles/openshift_master/tasks/main.yml
  27. 2 2
      roles/openshift_master_facts/tasks/main.yml
  28. 1 1
      roles/openshift_metrics/tasks/install_cassandra.yaml
  29. 1 1
      roles/openshift_metrics/tasks/install_heapster.yaml
  30. 3 3
      roles/openshift_metrics/tasks/install_metrics.yaml
  31. 1 1
      roles/openshift_metrics/tasks/main.yaml
  32. 2 2
      roles/openshift_metrics/tasks/start_metrics.yaml
  33. 2 2
      roles/openshift_metrics/tasks/stop_metrics.yaml
  34. 2 2
      roles/openshift_metrics/tasks/uninstall_metrics.yaml
  35. 1 1
      roles/openshift_node/tasks/main.yml
  36. 1 1
      roles/openshift_provisioners/tasks/install_efs.yaml
  37. 2 2
      roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
  38. 2 2
      roles/openshift_storage_glusterfs/tasks/main.yml
  39. 1 1
      roles/os_firewall/tasks/firewall/firewalld.yml
  40. 1 1
      roles/os_firewall/tasks/firewall/iptables.yml

+ 2 - 2
docs/best_practices_guide.adoc

@@ -493,12 +493,12 @@ The Ansible `package` module calls the associated package manager for the underl
 # tasks.yml
 - name: Install etcd (for etcdctl)
   yum: name=etcd state=latest
-  when: "ansible_pkg_mgr == yum"
+  when: ansible_pkg_mgr == yum
   register: install_result
 
 - name: Install etcd (for etcdctl)
   dnf: name=etcd state=latest
-  when: "ansible_pkg_mgr == dnf"
+  when: ansible_pkg_mgr == dnf
   register: install_result
 ----
 

+ 1 - 1
playbooks/adhoc/create_pv/create_pv.yaml

@@ -20,7 +20,7 @@
   pre_tasks:
   - fail:
       msg: "This playbook requires {{item}} to be set."
-    when: "item is not defined or item == ''"
+    when: item is not defined or item == ''
     with_items:
     - cli_volume_size
     - cli_device_name

+ 1 - 1
playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml

@@ -33,7 +33,7 @@
   pre_tasks:
   - fail:
       msg: "This playbook requires {{item}} to be set."
-    when: "item is not defined or item == ''"
+    when: item is not defined or item == ''
     with_items:
     - cli_tag_name
     - cli_volume_size

+ 1 - 1
playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml

@@ -24,7 +24,7 @@
   pre_tasks:
   - fail:
       msg: "This playbook requires {{item}} to be set."
-    when: "item is not defined or item == ''"
+    when: item is not defined or item == ''
     with_items:
     - cli_docker_device
 

+ 1 - 1
playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml

@@ -25,7 +25,7 @@
 
   - fail:
       msg: "This playbook requires {{item}} to be set."
-    when: "item is not defined or item == ''"
+    when: item is not defined or item == ''
     with_items:
     - cli_tag_name
 

+ 1 - 1
playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml

@@ -42,7 +42,7 @@
   pre_tasks:
   - fail:
       msg: "This playbook requires {{item}} to be set."
-    when: "item is not defined or item == ''"
+    when: item is not defined or item == ''
     with_items:
     - cli_tag_name
     - cli_volume_size

+ 4 - 4
playbooks/adhoc/uninstall.yml

@@ -125,7 +125,7 @@
       - name: Remove flannel package
         package: name=flannel state=absent
         when: openshift_use_flannel | default(false) | bool
-      when: "not is_atomic | bool"
+      when: not is_atomic | bool
 
     - shell: systemctl reset-failed
       changed_when: False
@@ -146,7 +146,7 @@
       - lbr0
       - vlinuxbr
       - vovsbr
-    when: "openshift_remove_all | default(true) | bool"
+    when: openshift_remove_all | default(true) | bool
 
   - shell: atomic uninstall "{{ item }}"-master-api
     changed_when: False
@@ -239,7 +239,7 @@
         changed_when: False
         failed_when: False
         with_items: "{{ images_to_delete.results }}"
-      when: "openshift_uninstall_images | default(True) | bool"
+      when: openshift_uninstall_images | default(True) | bool
 
     - name: remove sdn drop files
       file:
@@ -252,7 +252,7 @@
       - /etc/sysconfig/openshift-node
       - /etc/sysconfig/openvswitch
       - /run/openshift-sdn
-    when: "openshift_remove_all | default(True) | bool"
+    when: openshift_remove_all | default(True) | bool
 
   - find: path={{ item }} file_type=file
     register: files

+ 10 - 10
playbooks/common/openshift-cluster/evaluate_groups.yml

@@ -7,31 +7,31 @@
   tasks:
   - fail:
       msg: This playbook requires g_etcd_hosts to be set
-    when: "g_etcd_hosts is not defined"
+    when: g_etcd_hosts is not defined
 
   - fail:
       msg: This playbook requires g_master_hosts or g_new_master_hosts to be set
-    when: "g_master_hosts is not defined and g_new_master_hosts is not defined"
+    when: g_master_hosts is not defined and g_new_master_hosts is not defined
 
   - fail:
       msg: This playbook requires g_node_hosts or g_new_node_hosts to be set
-    when: "g_node_hosts is not defined and g_new_node_hosts is not defined"
+    when: g_node_hosts is not defined and g_new_node_hosts is not defined
 
   - fail:
       msg: This playbook requires g_lb_hosts to be set
-    when: "g_lb_hosts is not defined"
+    when: g_lb_hosts is not defined
 
   - fail:
       msg: This playbook requires g_nfs_hosts to be set
-    when: "g_nfs_hosts is not defined"
+    when: g_nfs_hosts is not defined
 
   - fail:
       msg: The nfs group must be limited to one host
-    when: "(groups[g_nfs_hosts] | default([])) | length > 1"
+    when: (groups[g_nfs_hosts] | default([])) | length > 1
 
   - fail:
       msg: This playbook requires g_glusterfs_hosts to be set
-    when: "g_glusterfs_hosts is not defined"
+    when: g_glusterfs_hosts is not defined
 
   - name: Evaluate oo_all_hosts
     add_host:
@@ -86,7 +86,7 @@
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
     with_items: "{{ g_master_hosts | default([]) }}"
-    when: "g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool"
+    when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool
     changed_when: no
 
   - name: Evaluate oo_first_etcd
@@ -94,7 +94,7 @@
       name: "{{ g_etcd_hosts[0] }}"
       groups: oo_first_etcd
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
-    when: "g_etcd_hosts|length > 0"
+    when: g_etcd_hosts|length > 0
     changed_when: no
 
   - name: Evaluate oo_first_master
@@ -103,7 +103,7 @@
       groups: oo_first_master
       ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
       ansible_become: "{{ g_sudo | default(omit) }}"
-    when: "g_master_hosts|length > 0"
+    when: g_master_hosts|length > 0
     changed_when: no
 
   - name: Evaluate oo_lb_to_config

+ 1 - 1
playbooks/common/openshift-cluster/initialize_openshift_version.yml

@@ -16,7 +16,7 @@
     when: not openshift.common.is_atomic | bool
   - fail:
       msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils.
-    when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout"
+    when: not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout
 
 - name: Determine openshift_version to configure on first master
   hosts: oo_first_master

+ 8 - 8
playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml

@@ -63,12 +63,12 @@
   - block:
     - debug:
         msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}"
-      when: "openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]"
+      when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region]
 
     - debug:
         msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}"
-      when: "openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates"
-    when: "openshift_master_scheduler_predicates | default(none) is not none"
+      when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates
+    when: openshift_master_scheduler_predicates | default(none) is not none
 
   # Handle cases where openshift_master_predicates is not defined
   - block:
@@ -87,7 +87,7 @@
       when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and
                 openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}"
 
-    when: "openshift_master_scheduler_predicates | default(none) is none"
+    when: openshift_master_scheduler_predicates | default(none) is none
 
 
 # Upgrade priorities
@@ -120,12 +120,12 @@
   - block:
     - debug:
         msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}"
-      when: "openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]"
+      when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone]
 
     - debug:
         msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}"
-      when: "openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities"
-    when: "openshift_master_scheduler_priorities | default(none) is not none"
+      when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities
+    when: openshift_master_scheduler_priorities | default(none) is not none
 
   # Handle cases where openshift_master_priorities is not defined
   - block:
@@ -144,7 +144,7 @@
       when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and
                 openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}"
 
-    when: "openshift_master_scheduler_priorities | default(none) is none"
+    when: openshift_master_scheduler_priorities | default(none) is none
 
 
 # Update scheduler

+ 1 - 1
playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

@@ -14,7 +14,7 @@
     url: '{{ image_url }}'
     sha256sum: '{{ image_sha256 }}'
     dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}'
-  when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}'
+  when: ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"]
   register: downloaded_image
 
 - name: Uncompress xz compressed base cloud image

+ 1 - 1
roles/etcd/tasks/main.yml

@@ -84,7 +84,7 @@
       daemon_reload: yes
     when: not openshift.common.is_etcd_system_container | bool
     register: task_result
-    failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+    failed_when: task_result|failed and 'could not' not in task_result.msg|lower
 
   - name: Install etcd container service file
     template:

+ 1 - 1
roles/lib_openshift/src/test/integration/oc_label.yml

@@ -15,7 +15,7 @@
   - name: ensure needed vars are defined
     fail:
       msg: "{{ item }} not defined"
-    when: "item is not defined"
+    when: item is not defined
     with_items:
     - cli_master_test  # ansible inventory instance to run playbook against
 

+ 1 - 1
roles/lib_openshift/src/test/integration/oc_user.yml

@@ -14,7 +14,7 @@
   - name: ensure needed vars are defined
     fail:
       msg: "{{ item }} no defined"
-    when: "item is not defined"
+    when: item is not defined
     with_items:
     - cli_master_test  # ansible inventory instance to run playbook against
 

+ 1 - 1
roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py

@@ -35,7 +35,7 @@ Example playbook usage:
     become: no
     run_once: yes
     delegate_to: localhost
-    when: "openshift_certificate_expiry_save_json_results|bool"
+    when: openshift_certificate_expiry_save_json_results|bool
     copy:
       content: "{{ hostvars|oo_cert_expiry_results_to_json() }}"
       dest: "{{ openshift_certificate_expiry_json_results_path }}"

+ 3 - 3
roles/openshift_certificate_expiry/tasks/main.yml

@@ -13,12 +13,12 @@
     src: cert-expiry-table.html.j2
     dest: "{{ openshift_certificate_expiry_html_report_path }}"
   delegate_to: localhost
-  when: "openshift_certificate_expiry_generate_html_report|bool"
+  when: openshift_certificate_expiry_generate_html_report|bool
 
 - name: Generate the result JSON string
   run_once: yes
   set_fact: json_result_string="{{ hostvars|oo_cert_expiry_results_to_json(play_hosts) }}"
-  when: "openshift_certificate_expiry_save_json_results|bool"
+  when: openshift_certificate_expiry_save_json_results|bool
 
 - name: Generate results JSON file
   become: no
@@ -27,4 +27,4 @@
     src: save_json_results.j2
     dest: "{{ openshift_certificate_expiry_json_results_path }}"
   delegate_to: localhost
-  when: "openshift_certificate_expiry_save_json_results|bool"
+  when: openshift_certificate_expiry_save_json_results|bool

+ 1 - 1
roles/openshift_cloud_provider/tasks/openstack.yml

@@ -7,4 +7,4 @@
   template:
     dest: "{{ openshift.common.config_base }}/cloudprovider/openstack.conf"
     src: openstack.conf.j2
-  when: "openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)"
+  when: openshift_cloudprovider_openstack_auth_url is defined and openshift_cloudprovider_openstack_username is defined and openshift_cloudprovider_openstack_password is defined and (openshift_cloudprovider_openstack_tenant_id is defined or openshift_cloudprovider_openstack_tenant_name is defined)

+ 1 - 1
roles/openshift_expand_partition/tasks/main.yml

@@ -6,7 +6,7 @@
 - name: Determine if growpart is installed
   command: "rpm -q cloud-utils-growpart"
   register: has_growpart
-  failed_when: "has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout"
+  failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout
   changed_when: false
   when: openshift.common.is_containerized | bool
 

+ 6 - 6
roles/openshift_hosted_logging/tasks/deploy_logging.yaml

@@ -36,7 +36,7 @@
   command: >
     {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
   register: secret_output
-  failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+  failed_when: secret_output.rc == 1 and 'exists' not in secret_output.stderr
 
 - name: "Create templates for logging accounts and the deployer"
   command: >
@@ -60,21 +60,21 @@
     {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
     policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
   register: permiss_output
-  failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+  failed_when: permiss_output.rc == 1 and 'exists' not in permiss_output.stderr
 
 - name: "Set permissions for fluentd"
   command: >
     {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
     policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
   register: fluentd_output
-  failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+  failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
 
 - name: "Set additional permissions for fluentd"
   command: >
     {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
     add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
   register: fluentd2_output
-  failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+  failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
 
 - name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
   command: >
@@ -82,13 +82,13 @@
     policy add-cluster-role-to-user rolebinding-reader \
     system:serviceaccount:logging:aggregated-logging-elasticsearch
   register: rolebinding_reader_output
-  failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+  failed_when: rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr
 
 - name: "Create ConfigMap for deployer parameters"
   command: >
     {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
   register: deployer_configmap_output
-  failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+  failed_when: deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr
 
 - name: "Process the deployer template"
   shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"

+ 1 - 1
roles/openshift_hosted_metrics/tasks/install.yml

@@ -81,7 +81,7 @@
     secrets new metrics-deployer nothing=/dev/null
   register: metrics_deployer_secret
   changed_when: metrics_deployer_secret.rc == 0
-  failed_when: "metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr"
+  failed_when: metrics_deployer_secret.rc == 1 and 'already exists' not in metrics_deployer_secret.stderr
 
 # TODO: extend this to allow user passed in certs or generating cert with
 # OpenShift CA

+ 3 - 3
roles/openshift_logging/tasks/generate_routes.yaml

@@ -1,14 +1,14 @@
 ---
 - set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
-  when: "openshift_logging_kibana_key | trim | length > 0"
+  when: openshift_logging_kibana_key | trim | length > 0
   changed_when: false
 
 - set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode  }}
-  when: "openshift_logging_kibana_cert | trim | length > 0"
+  when: openshift_logging_kibana_cert | trim | length > 0
   changed_when: false
 
 - set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode  }}
-  when: "openshift_logging_kibana_ca | trim | length > 0"
+  when: openshift_logging_kibana_ca | trim | length > 0
   changed_when: false
 
 - set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}

+ 2 - 2
roles/openshift_logging/tasks/install_elasticsearch.yaml

@@ -3,7 +3,7 @@
   set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
 
 - set_fact: openshift_logging_es_pvc_prefix="logging-es"
-  when: "not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''"
+  when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''
 
 ### evaluate if the PVC attached to the dc currently matches the provided vars
 ## if it does then we reuse that pvc in the DC
@@ -65,7 +65,7 @@
   check_mode: no
 
 - set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
-  when: "not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''"
+  when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''
 
 - include: set_es_storage.yaml
   vars:

+ 2 - 2
roles/openshift_logging/tasks/install_fluentd.yaml

@@ -32,7 +32,7 @@
     {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
   register: fluentd_output
-  failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+  failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
   check_mode: no
   when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
 
@@ -49,6 +49,6 @@
     {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
   register: fluentd2_output
-  failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+  failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
   check_mode: no
   when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1

+ 2 - 2
roles/openshift_logging/tasks/install_mux.yaml

@@ -45,7 +45,7 @@
     {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
   register: mux_output
-  failed_when: "mux_output.rc == 1 and 'exists' not in mux_output.stderr"
+  failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr
   check_mode: no
   when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
 
@@ -62,6 +62,6 @@
     {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
   register: mux2_output
-  failed_when: "mux2_output.rc == 1 and 'exists' not in mux2_output.stderr"
+  failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr
   check_mode: no
   when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1

+ 1 - 1
roles/openshift_logging/tasks/main.yaml

@@ -1,7 +1,7 @@
 ---
 - fail:
     msg: Only one Fluentd nodeselector key pair should be provided
-  when: "openshift_logging_fluentd_nodeselector.keys() | count > 1"
+  when: openshift_logging_fluentd_nodeselector.keys() | count > 1
 
 - name: Set default image variables based on deployment_type
   include_vars: "{{ item }}"

+ 1 - 1
roles/openshift_master/tasks/main.yml

@@ -194,7 +194,7 @@
     state: stopped
   when: openshift_master_ha | bool
   register: task_result
-  failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+  failed_when: task_result|failed and 'could not' not in task_result.msg|lower
 
 - set_fact:
     master_service_status_changed: "{{ start_result | changed }}"

+ 2 - 2
roles/openshift_master_facts/tasks/main.yml

@@ -128,10 +128,10 @@
   - name: Test if scheduler config is readable
     fail:
       msg: "Unknown scheduler config apiVersion {{ openshift_master_scheduler_config.apiVersion }}"
-    when: "openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1'"
+    when: openshift_master_scheduler_current_config.apiVersion | default(None) != 'v1'
 
   - name: Set current scheduler predicates and priorities
     set_fact:
       openshift_master_scheduler_current_predicates: "{{ openshift_master_scheduler_current_config.predicates }}"
       openshift_master_scheduler_current_priorities: "{{ openshift_master_scheduler_current_config.priorities }}"
-  when: "scheduler_config_stat.stat.exists"
+  when: scheduler_config_stat.stat.exists

+ 1 - 1
roles/openshift_metrics/tasks/install_cassandra.yaml

@@ -23,7 +23,7 @@
   changed_when: false
 
 - set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"
-  when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''"
+  when: not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''
 
 - name: generate hawkular-cassandra persistent volume claims
   template:

+ 1 - 1
roles/openshift_metrics/tasks/install_heapster.yaml

@@ -22,7 +22,7 @@
   with_items:
     - hawkular-metrics-certs
     - hawkular-metrics-account
-  when: "not openshift_metrics_heapster_standalone | bool"
+  when: not openshift_metrics_heapster_standalone | bool
 
 - name: Generating serviceaccount for heapster
   template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml

+ 3 - 3
roles/openshift_metrics/tasks/install_metrics.yaml

@@ -10,11 +10,11 @@
     - cassandra
   loop_control:
     loop_var: include_file
-  when: "not openshift_metrics_heapster_standalone | bool"
+  when: not openshift_metrics_heapster_standalone | bool
 
 - name: Install Heapster Standalone
   include: install_heapster.yaml
-  when: "openshift_metrics_heapster_standalone | bool"
+  when: openshift_metrics_heapster_standalone | bool
 
 - find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
   register: object_def_files
@@ -48,7 +48,7 @@
 
 - name: Scaling down cluster to recognize changes
   include: stop_metrics.yaml
-  when: "existing_metrics_rc.stdout_lines | length > 0"
+  when: existing_metrics_rc.stdout_lines | length > 0
 
 - name: Scaling up cluster
   include: start_metrics.yaml

+ 1 - 1
roles/openshift_metrics/tasks/main.yaml

@@ -19,7 +19,7 @@
 - name: Create temp directory for all our templates
   file: path={{mktemp.stdout}}/templates state=directory mode=0755
   changed_when: False
-  when: "openshift_metrics_install_metrics | bool"
+  when: openshift_metrics_install_metrics | bool
 
 - name: Create temp directory local on control node
   local_action: command mktemp -d

+ 2 - 2
roles/openshift_metrics/tasks/start_metrics.yaml

@@ -20,7 +20,7 @@
   loop_control:
     loop_var: object
   when: metrics_cassandra_rc is defined
-  changed_when: "metrics_cassandra_rc | length > 0"
+  changed_when: metrics_cassandra_rc | length > 0
 
 - command: >
     {{openshift.common.client_binary}}
@@ -42,7 +42,7 @@
   with_items: "{{metrics_metrics_rc.stdout_lines}}"
   loop_control:
     loop_var: object
-  changed_when: "metrics_metrics_rc | length > 0"
+  changed_when: metrics_metrics_rc | length > 0
 
 - command: >
     {{openshift.common.client_binary}}

+ 2 - 2
roles/openshift_metrics/tasks/stop_metrics.yaml

@@ -41,7 +41,7 @@
   with_items: "{{metrics_hawkular_rc.stdout_lines}}"
   loop_control:
     loop_var: object
-  changed_when: "metrics_hawkular_rc | length > 0"
+  changed_when: metrics_hawkular_rc | length > 0
 
 - command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
@@ -63,4 +63,4 @@
   loop_control:
     loop_var: object
   when: metrics_cassandra_rc is defined
-  changed_when: "metrics_cassandra_rc | length > 0"
+  changed_when: metrics_cassandra_rc | length > 0

+ 2 - 2
roles/openshift_metrics/tasks/uninstall_metrics.yaml

@@ -8,7 +8,7 @@
     delete --ignore-not-found --selector=metrics-infra
     all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
   register: delete_metrics
-  changed_when: "delete_metrics.stdout != 'No resources found'"
+  changed_when: delete_metrics.stdout != 'No resources found'
 
 - name: remove rolebindings
   command: >
@@ -16,4 +16,4 @@
     delete --ignore-not-found
     rolebinding/hawkular-view
     clusterrolebinding/heapster-cluster-reader
-  changed_when: "delete_metrics.stdout != 'No resources found'"
+  changed_when: delete_metrics.stdout != 'No resources found'

+ 1 - 1
roles/openshift_node/tasks/main.yml

@@ -147,7 +147,7 @@
     - regex: '^AWS_SECRET_ACCESS_KEY='
       line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
   no_log: True
-  when: "openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined"
+  when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
   notify:
     - restart node
 

+ 1 - 1
roles/openshift_provisioners/tasks/install_efs.yaml

@@ -65,6 +65,6 @@
     {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
   register: efs_output
-  failed_when: "efs_output.rc == 1 and 'exists' not in efs_output.stderr"
+  failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr
   check_mode: no
   when: efs_anyuid.stdout.find("system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs") == -1

+ 2 - 2
roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml

@@ -14,7 +14,7 @@
 # Need `command` here because heketi-storage.json contains multiple objects.
 - name: Copy heketi DB to GlusterFS volume
   command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ openshift_storage_glusterfs_namespace }}"
-  when: "setup_storage.rc == 0"
+  when: setup_storage.rc == 0
 
 - name: Wait for copy job to finish
   oc_obj:
@@ -34,7 +34,7 @@
   - "heketi_job.results.results | count > 0"
   # Fail when pod's 'Failed' status is True
   - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"
-  when: "setup_storage.rc == 0"
+  when: setup_storage.rc == 0
 
 - name: Delete deploy resources
   oc_obj:

+ 2 - 2
roles/openshift_storage_glusterfs/tasks/main.yml

@@ -163,7 +163,7 @@
 - name: Load heketi topology
   command: "heketi-cli -s http://{{ openshift_storage_glusterfs_heketi_url }} --user admin --secret '{{ openshift_storage_glusterfs_heketi_admin_key }}' topology load --json={{ mktemp.stdout }}/topology.json 2>&1"
   register: topology_load
-  failed_when: "topology_load.rc != 0 or 'Unable' in topology_load.stdout"
+  failed_when: topology_load.rc != 0 or 'Unable' in topology_load.stdout
   when:
   - openshift_storage_glusterfs_is_native
   - openshift_storage_glusterfs_heketi_topology_load
@@ -172,7 +172,7 @@
   when: openshift_storage_glusterfs_heketi_is_native and openshift_storage_glusterfs_heketi_is_missing
 
 - include: glusterfs_registry.yml
-  when: "openshift.hosted.registry.storage.kind == 'glusterfs'"
+  when: openshift.hosted.registry.storage.kind == 'glusterfs'
 
 - name: Delete temp directory
   file:

+ 1 - 1
roles/os_firewall/tasks/firewall/firewalld.yml

@@ -14,7 +14,7 @@
     - iptables
     - ip6tables
   register: task_result
-  failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+  failed_when: task_result|failed and 'could not' not in task_result.msg|lower
 
 - name: Wait 10 seconds after disabling iptables
   pause:

+ 1 - 1
roles/os_firewall/tasks/firewall/iptables.yml

@@ -7,7 +7,7 @@
     enabled: no
     masked: yes
   register: task_result
-  failed_when: "task_result|failed and 'could not' not in task_result.msg|lower"
+  failed_when: task_result|failed and 'could not' not in task_result.msg|lower
 
 - name: Wait 10 seconds after disabling firewalld
   pause: