Ver Fonte

polish openshift-master role

Jan Chaloupka há 7 anos atrás
pai
commit
13c0075279

+ 16 - 7
roles/openshift_master/handlers/main.yml

@@ -1,12 +1,21 @@
 ---
 - name: restart master api
-  systemd: name={{ openshift.common.service_type }}-master-api state=restarted
-  when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-  notify: Verify API Server
+  systemd:
+    name: "{{ openshift.common.service_type }}-master-api"
+    state: restarted
+  when:
+  - not (master_api_service_status_changed | default(false) | bool)
+  - openshift.master.cluster_method == 'native'
+  notify:
+  - Verify API Server
 
 - name: restart master controllers
-  systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
-  when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
+  systemd:
+    name: "{{ openshift.common.service_type }}-master-controllers"
+    state: restarted
+  when:
+  - not (master_controllers_service_status_changed | default(false) | bool)
+  - openshift.master.cluster_method == 'native'
 
 - name: Verify API Server
   # Using curl here since the uri module requires python-httplib2 and
@@ -23,8 +32,8 @@
     # Disables the following warning:
     # Consider using get_url or uri module rather than running curl
     warn: no
-  register: api_available_output
-  until: api_available_output.stdout == 'ok'
+  register: l_api_available_output
+  until: l_api_available_output.stdout == 'ok'
   retries: 120
   delay: 1
   changed_when: false

+ 5 - 1
roles/openshift_master/tasks/clean_systemd_units.yml

@@ -1,5 +1,9 @@
 ---
 
 - name: Disable master service
-  systemd: name={{ openshift.common.service_type }}-master state=stopped enabled=no masked=yes
+  systemd:
+    name: "{{ openshift.common.service_type }}-master"
+    state: stopped
+    enabled: no
+    masked: yes
   ignore_errors: true

+ 8 - 4
roles/openshift_master/tasks/firewall.yml

@@ -7,7 +7,8 @@
       action: add
       protocol: "{{ item.port.split('/')[1] }}"
       port: "{{ item.port.split('/')[0] }}"
-    when: item.cond | default(True)
+    when:
+    - item.cond | default(True)
     with_items: "{{ r_openshift_master_os_firewall_allow }}"
 
   - name: Remove iptables rules
@@ -16,7 +17,8 @@
       action: remove
       protocol: "{{ item.port.split('/')[1] }}"
       port: "{{ item.port.split('/')[0] }}"
-    when: item.cond | default(True)
+    when:
+    - item.cond | default(True)
     with_items: "{{ r_openshift_master_os_firewall_deny }}"
 
 - when: r_openshift_master_firewall_enabled | bool and r_openshift_master_use_firewalld | bool
@@ -27,7 +29,8 @@
       permanent: true
       immediate: true
       state: enabled
-    when: item.cond | default(True)
+    when:
+    - item.cond | default(True)
     with_items: "{{ r_openshift_master_os_firewall_allow }}"
 
   - name: Remove firewalld allow rules
@@ -36,5 +39,6 @@
       permanent: true
       immediate: true
       state: disabled
-    when: item.cond | default(True)
+    when:
+    - item.cond | default(True)
     with_items: "{{ r_openshift_master_os_firewall_deny }}"

+ 125 - 66
roles/openshift_master/tasks/main.yml

@@ -7,21 +7,34 @@
 - fail:
     msg: >
       Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }}
-  when: openshift_master_oauth_grant_method is defined and openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
+  when:
+  - openshift_master_oauth_grant_method is defined
+  - openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
 
 # HA Variable Validation
 - fail:
     msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
-  when: openshift.master.ha | bool and ((openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"]))
+  when:
+  - openshift.master.ha | bool
+  - (openshift.master.cluster_method is not defined) or (openshift.master.cluster_method is defined and openshift.master.cluster_method not in ["native", "pacemaker"])
 - fail:
     msg: "'native' high availability is not supported for the requested OpenShift version"
-  when: openshift.master.ha | bool and openshift.master.cluster_method == "native" and not openshift.common.version_gte_3_1_or_1_1 | bool
+  when:
+  - openshift.master.ha | bool
+  - openshift.master.cluster_method == "native"
+  - not openshift.common.version_gte_3_1_or_1_1 | bool
 - fail:
     msg: "openshift_master_cluster_password must be set for multi-master installations"
-  when: openshift.master.ha | bool and openshift.master.cluster_method == "pacemaker" and (openshift_master_cluster_password is not defined or not openshift_master_cluster_password)
+  when:
+  - openshift.master.ha | bool
+  - openshift.master.cluster_method == "pacemaker"
+  - openshift_master_cluster_password is not defined or not openshift_master_cluster_password
 - fail:
     msg: "Pacemaker based HA is not supported at this time when used with containerized installs"
-  when: openshift.master.ha | bool and openshift.master.cluster_method == "pacemaker" and openshift.common.is_containerized | bool
+  when:
+  - openshift.master.ha | bool
+  - openshift.master.cluster_method == "pacemaker"
+  - openshift.common.is_containerized | bool
 
 - name: Open up firewall ports
   include: firewall.yml
@@ -31,7 +44,8 @@
   package:
     name: "{{ openshift.common.service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
     state: present
-  when: not openshift.common.is_containerized | bool
+  when:
+  - not openshift.common.is_containerized | bool
 
 - name: Create openshift.common.data_dir
   file:
@@ -40,11 +54,13 @@
     mode: 0755
     owner: root
     group: root
-  when: openshift.common.is_containerized | bool
+  when:
+  - openshift.common.is_containerized | bool
 
 - name: Reload systemd units
   command: systemctl daemon-reload
-  when: openshift.common.is_containerized | bool and install_result | changed
+  when:
+  - openshift.common.is_containerized | bool
 
 - name: Re-gather package dependent master facts
   openshift_facts:
@@ -61,8 +77,8 @@
   args:
     creates: "{{ openshift_master_policy }}"
   notify:
-    - restart master api
-    - restart master controllers
+  - restart master api
+  - restart master controllers
 
 - name: Create the scheduler config
   copy:
@@ -70,20 +86,22 @@
     dest: "{{ openshift_master_scheduler_conf }}"
     backup: true
   notify:
-    - restart master api
-    - restart master controllers
+  - restart master api
+  - restart master controllers
 
 - name: Install httpd-tools if needed
   package: name=httpd-tools state=present
-  when: (item.kind == 'HTPasswdPasswordIdentityProvider') and
-        not openshift.common.is_atomic | bool
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
+  - not openshift.common.is_atomic | bool
   with_items: "{{ openshift.master.identity_providers }}"
 
 - name: Ensure htpasswd directory exists
   file:
     path: "{{ item.filename | dirname }}"
     state: directory
-  when: item.kind == 'HTPasswdPasswordIdentityProvider'
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
   with_items: "{{ openshift.master.identity_providers }}"
 
 - name: Create the htpasswd file if needed
@@ -91,7 +109,9 @@
     dest: "{{ item.filename }}"
     src: htpasswd.j2
     backup: yes
-  when: item.kind == 'HTPasswdPasswordIdentityProvider' and openshift.master.manage_htpasswd | bool
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
+  - openshift.master.manage_htpasswd | bool
   with_items: "{{ openshift.master.identity_providers }}"
 
 - name: Ensure htpasswd file exists
@@ -100,7 +120,8 @@
     force: no
     content: ""
     mode: 0600
-  when: item.kind == 'HTPasswdPasswordIdentityProvider'
+  when:
+  - item.kind == 'HTPasswdPasswordIdentityProvider'
   with_items: "{{ openshift.master.identity_providers }}"
 
 - name: Create the ldap ca file if needed
@@ -109,7 +130,9 @@
     content: "{{ openshift.master.ldap_ca }}"
     mode: 0600
     backup: yes
-  when: openshift.master.ldap_ca is defined and item.kind == 'LDAPPasswordIdentityProvider'
+  when:
+  - openshift.master.ldap_ca is defined
+  - item.kind == 'LDAPPasswordIdentityProvider'
   with_items: "{{ openshift.master.identity_providers }}"
 
 - name: Create the openid ca file if needed
@@ -118,7 +141,10 @@
     content: "{{ openshift.master.openid_ca }}"
     mode: 0600
     backup: yes
-  when: openshift.master.openid_ca is defined and item.kind == 'OpenIDIdentityProvider' and item.ca | default('') != ''
+  when:
+  - openshift.master.openid_ca is defined
+  - item.kind == 'OpenIDIdentityProvider'
+  - item.ca | default('') != ''
   with_items: "{{ openshift.master.identity_providers }}"
 
 - name: Create the request header ca file if needed
@@ -127,20 +153,23 @@
     content: "{{ openshift.master.request_header_ca }}"
     mode: 0600
     backup: yes
-  when: openshift.master.request_header_ca is defined and item.kind == 'RequestHeaderIdentityProvider' and item.clientCA | default('') != ''
+  when:
+  - openshift.master.request_header_ca is defined
+  - item.kind == 'RequestHeaderIdentityProvider'
+  - item.clientCA | default('') != ''
   with_items: "{{ openshift.master.identity_providers }}"
 
 # This is an ugly hack to verify settings are in a file without modifying them with lineinfile.
 # The template file will stomp any other settings made.
 - block:
-    - name: check whether our docker-registry setting exists in the env file
-      command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
-      failed_when: false
-      changed_when: false
-      register: already_set
+  - name: check whether our docker-registry setting exists in the env file
+    command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift.common.service_type }}-master"
+    failed_when: false
+    changed_when: false
+    register: l_already_set
 
-    - set_fact:
-        openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (already_set.stdout is defined and already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
+  - set_fact:
+      openshift_push_via_dns: "{{ (openshift_use_dnsmasq | default(true) and openshift.common.version_gte_3_6) or (l_already_set.stdout is defined and l_already_set.stdout | match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}"
 
 - name: Set fact of all etcd host IPs
   openshift_facts:
@@ -156,7 +185,9 @@
 
 - name: Install Master system container
   include: system_container.yml
-  when: openshift.common.is_containerized | bool and openshift.common.is_master_system_container | bool
+  when:
+  - openshift.common.is_containerized | bool
+  - openshift.common.is_master_system_container | bool
 
 - name: Create session secrets file
   template:
@@ -165,9 +196,11 @@
     owner: root
     group: root
     mode: 0600
-  when: openshift.master.session_auth_secrets is defined and openshift.master.session_encryption_secrets is defined
+  when:
+  - openshift.master.session_auth_secrets is defined
+  - openshift.master.session_encryption_secrets is defined
   notify:
-    - restart master api
+  - restart master api
 
 - set_fact:
     translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1', openshift.common.version, openshift.common.deployment_type) }}"
@@ -182,53 +215,66 @@
     group: root
     mode: 0600
   notify:
-    - restart master api
-    - restart master controllers
+  - restart master api
+  - restart master controllers
 
 - include: set_loopback_context.yml
-  when: openshift.common.version_gte_3_2_or_1_2
+  when:
+  - openshift.common.version_gte_3_2_or_1_2
 
 - name: Start and enable master api on first master
   systemd:
     name: "{{ openshift.common.service_type }}-master-api"
     enabled: yes
     state: started
-  when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
-  register: start_result
-  until: not start_result | failed
+  when:
+  - openshift.master.cluster_method == 'native'
+  - inventory_hostname == openshift_master_hosts[0]
+  register: l_start_result
+  until: not l_start_result | failed
   retries: 1
   delay: 60
 
 - name: Dump logs from master-api if it failed
   command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
-  when: start_result | failed
+  when:
+  - l_start_result | failed
 
 - set_fact:
-    master_api_service_status_changed: "{{ start_result | changed }}"
-  when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
+    master_api_service_status_changed: "{{ l_start_result | changed }}"
+  when:
+  - openshift.master.cluster_method == 'native'
+  - inventory_hostname == openshift_master_hosts[0]
 
 - pause:
     seconds: 15
-  when: openshift.master.ha | bool and openshift.master.cluster_method == 'native'
+  when:
+  - openshift.master.ha | bool
+  - openshift.master.cluster_method == 'native'
 
 - name: Start and enable master api all masters
   systemd:
     name: "{{ openshift.common.service_type }}-master-api"
     enabled: yes
     state: started
-  when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
-  register: start_result
-  until: not start_result | failed
+  when:
+  - openshift.master.cluster_method == 'native'
+  - inventory_hostname != openshift_master_hosts[0]
+  register: l_start_result
+  until: not l_start_result | failed
   retries: 1
   delay: 60
 
 - name: Dump logs from master-api if it failed
   command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-api
-  when: start_result | failed
+  when:
+  - l_start_result | failed
 
 - set_fact:
-    master_api_service_status_changed: "{{ start_result | changed }}"
-  when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
+    master_api_service_status_changed: "{{ l_start_result | changed }}"
+  when:
+  - openshift.master.cluster_method == 'native'
+  - inventory_hostname != openshift_master_hosts[0]
 
 # A separate wait is required here for native HA since notifies will
 # be resolved after all tasks in the role.
@@ -243,67 +289,80 @@
     --cacert {{ openshift.common.config_base }}/master/ca.crt
     {% endif %}
     {{ openshift.master.api_url }}/healthz/ready
-  register: api_available_output
-  until: api_available_output.stdout == 'ok'
+  register: l_api_available_output
+  until: l_api_available_output.stdout == 'ok'
   retries: 120
   delay: 1
   run_once: true
   changed_when: false
-  when: openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
+  when:
+  - openshift.master.cluster_method == 'native'
+  - master_api_service_status_changed | bool
 
 - name: Start and enable master controller on first master
   systemd:
     name: "{{ openshift.common.service_type }}-master-controllers"
     enabled: yes
     state: started
-  when: openshift.master.cluster_method == 'native' and inventory_hostname == openshift_master_hosts[0]
-  register: start_result
-  until: not start_result | failed
+  when:
+  - openshift.master.cluster_method == 'native'
+  - inventory_hostname == openshift_master_hosts[0]
+  register: l_start_result
+  until: not l_start_result | failed
   retries: 1
   delay: 60
 
 - name: Dump logs from master-controllers if it failed
   command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
-  when: start_result | failed
+  when:
+  - l_start_result | failed
 
 - name: Wait for master controller service to start on first master
   pause:
     seconds: 15
-  when: openshift.master.cluster_method == 'native'
+  when:
+  - openshift.master.cluster_method == 'native'
 
 - name: Start and enable master controller on all masters
   systemd:
     name: "{{ openshift.common.service_type }}-master-controllers"
     enabled: yes
     state: started
-  when: openshift.master.cluster_method == 'native' and inventory_hostname != openshift_master_hosts[0]
-  register: start_result
-  until: not start_result | failed
+  when:
+  - openshift.master.cluster_method == 'native'
+  - inventory_hostname != openshift_master_hosts[0]
+  register: l_start_result
+  until: not l_start_result | failed
   retries: 1
   delay: 60
 
 - name: Dump logs from master-controllers if it failed
   command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
-  when: start_result | failed
+  when:
+  - l_start_result | failed
 
 - set_fact:
-    master_controllers_service_status_changed: "{{ start_result | changed }}"
-  when: openshift.master.cluster_method == 'native'
+    master_controllers_service_status_changed: "{{ l_start_result | changed }}"
+  when:
+  - openshift.master.cluster_method == 'native'
 
 - name: Install cluster packages
   package: name=pcs state=present
-  when: openshift.master.cluster_method == 'pacemaker'
-    and not openshift.common.is_containerized | bool
-  register: install_result
+  when:
+  - openshift.master.cluster_method == 'pacemaker'
+  - not openshift.common.is_containerized | bool
+  register: l_install_result
 
 - name: Start and enable cluster service
   systemd:
     name: pcsd
     enabled: yes
     state: started
-  when: openshift.master.cluster_method == 'pacemaker'
-    and not openshift.common.is_containerized | bool
+  when:
+  - openshift.master.cluster_method == 'pacemaker'
+  - not openshift.common.is_containerized | bool
 
 - name: Set the cluster user password
   shell: echo {{ openshift_master_cluster_password | quote }} | passwd --stdin hacluster
-  when: install_result | changed
+  when:
+  - l_install_result | changed

+ 8 - 5
roles/openshift_master/tasks/set_loopback_context.yml

@@ -4,7 +4,7 @@
     {{ openshift.common.client_binary }} config view
     --config={{ openshift_master_loopback_config }}
   changed_when: false
-  register: loopback_config
+  register: l_loopback_config
 
 - command: >
     {{ openshift.common.client_binary }} config set-cluster
@@ -12,7 +12,8 @@
     --embed-certs=true --server={{ openshift.master.loopback_api_url }}
     {{ openshift.master.loopback_cluster_name }}
     --config={{ openshift_master_loopback_config }}
-  when: loopback_context_string not in loopback_config.stdout
+  when:
+  - loopback_context_string not in l_loopback_config.stdout
   register: set_loopback_cluster
 
 - command: >
@@ -21,11 +22,13 @@
     --namespace=default --user={{ openshift.master.loopback_user }}
     {{ openshift.master.loopback_context_name }}
     --config={{ openshift_master_loopback_config }}
-  when: set_loopback_cluster | changed
-  register: set_loopback_context
+  when:
+  - set_loopback_cluster | changed
+  register: l_set_loopback_context
 
 - command: >
     {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }}
     --config={{ openshift_master_loopback_config }}
-  when: set_loopback_context | changed
+  when:
+  - l_set_loopback_context | changed
   register: set_current_context

+ 4 - 5
roles/openshift_master/tasks/system_container.yml

@@ -2,13 +2,12 @@
 - name: Pre-pull master system container image
   command: >
     atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
-  register: pull_result
-  changed_when: "'Pulling layer' in pull_result.stdout"
+  register: l_pull_result
+  changed_when: "'Pulling layer' in l_pull_result.stdout"
 
 - name: Check Master system container package
   command: >
     atomic containers list --no-trunc -a -f container={{ openshift.common.service_type }}-master
-  register: result
 
 # HA
 - name: Install or Update HA api master system container
@@ -17,7 +16,7 @@
     image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
     state: latest
     values:
-      - COMMAND=api
+    - COMMAND=api
 
 - name: Install or Update HA controller master system container
   oc_atomic_container:
@@ -25,4 +24,4 @@
     image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
     state: latest
     values:
-      - COMMAND=controllers
+    - COMMAND=controllers

+ 42 - 24
roles/openshift_master/tasks/systemd_units.yml

@@ -12,15 +12,18 @@
   set_fact:
     containerized_svc_dir: "/etc/systemd/system"
     ha_svc_template_path: "docker-cluster"
-  when: openshift.common.is_containerized | bool
+  when:
+  - openshift.common.is_containerized | bool
 
 # This is the image used for both HA and non-HA clusters:
 - name: Pre-pull master image
   command: >
     docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}
-  register: pull_result
-  changed_when: "'Downloaded newer image' in pull_result.stdout"
-  when: openshift.common.is_containerized | bool and not openshift.common.is_master_system_container | bool
+  register: l_pull_result
+  changed_when: "'Downloaded newer image' in l_pull_result.stdout"
+  when:
+  - openshift.common.is_containerized | bool
+  - not openshift.common.is_master_system_container | bool
 
 - name: Create the ha systemd unit files
   template:
@@ -32,23 +35,26 @@
   with_items:
   - api
   - controllers
-  register: create_ha_unit_files
+  register: l_create_ha_unit_files
 
 - command: systemctl daemon-reload
-  when: create_ha_unit_files | changed
+  when:
+  - l_create_ha_unit_files | changed
 # end workaround for missing systemd unit files
 
 - name: Preserve Master API Proxy Config options
   command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-api
-  register: master_api_proxy
-  when: openshift.master.cluster_method == "native"
+  register: l_master_api_proxy
+  when:
+  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
 - name: Preserve Master API AWS options
   command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-api
   register: master_api_aws
-  when: openshift.master.cluster_method == "native"
+  when:
+  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
@@ -57,22 +63,27 @@
     src: "{{ ha_svc_template_path }}/atomic-openshift-master-api.j2"
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
     backup: true
-  when: openshift.master.cluster_method == "native"
+  when:
+  - openshift.master.cluster_method == "native"
   notify:
   - restart master api
 
 - name: Restore Master API Proxy Config Options
-  when: openshift.master.cluster_method == "native"
-      and master_api_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
+  when:
+  - openshift.master.cluster_method == "native"
+  - l_master_api_proxy.rc == 0
+  - "'http_proxy' not in openshift.common"
+  - "'https_proxy' not in openshift.common"
   lineinfile:
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
     line: "{{ item }}"
-  with_items: "{{ master_api_proxy.stdout_lines | default([]) }}"
+  with_items: "{{ l_master_api_proxy.stdout_lines | default([]) }}"
 
 - name: Restore Master API AWS Options
-  when: openshift.master.cluster_method == "native"
-      and master_api_aws.rc == 0 and
-      not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
+  when:
+  - openshift.master.cluster_method == "native"
+  - master_api_aws.rc == 0
+  - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
   lineinfile:
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-api
     line: "{{ item }}"
@@ -82,14 +93,16 @@
 - name: Preserve Master Controllers Proxy Config options
   command: grep PROXY /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
   register: master_controllers_proxy
-  when: openshift.master.cluster_method == "native"
+  when:
+  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
 - name: Preserve Master Controllers AWS options
   command: grep AWS_ /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
   register: master_controllers_aws
-  when: openshift.master.cluster_method == "native"
+  when:
+  - openshift.master.cluster_method == "native"
   failed_when: false
   changed_when: false
 
@@ -98,7 +111,8 @@
     src: "{{ ha_svc_template_path }}/atomic-openshift-master-controllers.j2"
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
     backup: true
-  when: openshift.master.cluster_method == "native"
+  when:
+  - openshift.master.cluster_method == "native"
   notify:
   - restart master controllers
 
@@ -107,14 +121,18 @@
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
     line: "{{ item }}"
   with_items: "{{ master_controllers_proxy.stdout_lines | default([]) }}"
-  when: openshift.master.cluster_method == "native"
-        and master_controllers_proxy.rc == 0 and 'http_proxy' not in openshift.common and 'https_proxy' not in openshift.common
+  when:
+  - openshift.master.cluster_method == "native"
+  - master_controllers_proxy.rc == 0
+  - "'http_proxy' not in openshift.common"
+  - "'https_proxy' not in openshift.common"
 
 - name: Restore Master Controllers AWS Options
   lineinfile:
     dest: /etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
     line: "{{ item }}"
   with_items: "{{ master_controllers_aws.stdout_lines | default([]) }}"
-  when: openshift.master.cluster_method == "native"
-      and master_controllers_aws.rc == 0 and
-      not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)
+  when:
+  - openshift.master.cluster_method == "native"
+  - master_controllers_aws.rc == 0
+  - not (openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined)