Forráskód Böngészése

YAML Linting

* Added checks to make ci for yaml linting
* Modified y(a)ml files to pass lint checks
Russell Teague 8 éve
szülő
commit
be97433dd5
96 módosított fájl, 658 hozzáadás és 577 törlés
  1. 1 0
      .travis.yml
  2. 67 0
      git/.yamllint
  3. 1 1
      playbooks/adhoc/atomic_openshift_tutorial_reset.yml
  4. 1 0
      playbooks/adhoc/bootstrap-fedora.yml
  5. 1 2
      playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
  6. 1 1
      playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml
  7. 4 4
      playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
  8. 9 10
      playbooks/adhoc/noc/create_host.yml
  9. 3 4
      playbooks/adhoc/noc/create_maintenance.yml
  10. 1 2
      playbooks/adhoc/openshift_hosted_logging_efk.yaml
  11. 1 1
      playbooks/adhoc/s3_registry/s3_registry.yml
  12. 1 1
      playbooks/adhoc/sdn_restart/oo-sdn-restart.yml
  13. 1 1
      playbooks/adhoc/uninstall.yml
  14. 1 1
      playbooks/adhoc/zabbix_setup/clean_zabbix.yml
  15. 1 1
      playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
  16. 8 8
      playbooks/aws/openshift-cluster/cluster_hosts.yml
  17. 2 2
      playbooks/aws/openshift-cluster/config.yml
  18. 15 15
      playbooks/aws/openshift-cluster/tasks/launch_instances.yml
  19. 42 42
      playbooks/aws/openshift-cluster/terminate.yml
  20. 8 8
      playbooks/byo/openshift-cluster/cluster_hosts.yml
  21. 1 2
      playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
  22. 1 0
      playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
  23. 0 1
      playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
  24. 0 1
      playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
  25. 0 1
      playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
  26. 19 19
      playbooks/byo/openshift-node/network_manager.yml
  27. 3 3
      playbooks/byo/rhel_subscribe.yml
  28. 1 0
      playbooks/common/openshift-cluster/additional_config.yml
  29. 1 1
      playbooks/common/openshift-cluster/enable_dnsmasq.yml
  30. 1 2
      playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
  31. 1 0
      playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
  32. 8 10
      playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
  33. 0 1
      playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml
  34. 2 2
      playbooks/common/openshift-cluster/validate_hostnames.yml
  35. 1 1
      playbooks/common/openshift-etcd/service.yml
  36. 1 1
      playbooks/common/openshift-loadbalancer/service.yml
  37. 2 2
      playbooks/common/openshift-master/config.yml
  38. 6 7
      playbooks/common/openshift-master/restart.yml
  39. 1 0
      playbooks/common/openshift-master/restart_hosts.yml
  40. 1 0
      playbooks/common/openshift-master/restart_services.yml
  41. 1 1
      playbooks/common/openshift-master/service.yml
  42. 1 1
      playbooks/common/openshift-nfs/service.yml
  43. 1 1
      playbooks/common/openshift-node/service.yml
  44. 8 8
      playbooks/gce/openshift-cluster/cluster_hosts.yml
  45. 1 1
      playbooks/gce/openshift-cluster/tasks/launch_instances.yml
  46. 11 12
      playbooks/gce/openshift-cluster/terminate.yml
  47. 8 8
      playbooks/libvirt/openshift-cluster/cluster_hosts.yml
  48. 2 2
      playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
  49. 0 1
      playbooks/libvirt/openshift-cluster/terminate.yml
  50. 10 10
      playbooks/libvirt/openshift-cluster/vars.yml
  51. 8 8
      playbooks/openstack/openshift-cluster/cluster_hosts.yml
  52. 18 18
      playbooks/openstack/openshift-cluster/launch.yml
  53. 1 0
      playbooks/openstack/openshift-cluster/terminate.yml
  54. 1 0
      playbooks/openstack/openshift-cluster/vars.yml
  55. 2 2
      roles/docker/meta/main.yml
  56. 9 9
      roles/docker/tasks/main.yml
  57. 0 1
      roles/flannel_register/defaults/main.yaml
  58. 1 1
      roles/kube_nfs_volumes/meta/main.yml
  59. 1 1
      roles/nuage_ca/meta/main.yml
  60. 1 0
      roles/nuage_common/defaults/main.yaml
  61. 1 1
      roles/nuage_master/defaults/main.yaml
  62. 7 7
      roles/nuage_master/meta/main.yml
  63. 4 4
      roles/nuage_master/tasks/certificates.yml
  64. 7 7
      roles/nuage_master/tasks/main.yaml
  65. 9 8
      roles/nuage_master/vars/main.yaml
  66. 8 8
      roles/nuage_node/meta/main.yml
  67. 3 3
      roles/nuage_node/tasks/certificates.yml
  68. 1 1
      roles/nuage_node/tasks/iptables.yml
  69. 11 11
      roles/nuage_node/tasks/main.yaml
  70. 2 2
      roles/nuage_node/vars/main.yaml
  71. 1 2
      roles/openshift_builddefaults/tasks/main.yml
  72. 1 0
      roles/openshift_cloud_provider/tasks/aws.yml
  73. 1 0
      roles/openshift_cloud_provider/tasks/gce.yml
  74. 2 3
      roles/openshift_common/tasks/main.yml
  75. 1 1
      roles/openshift_docker_facts/tasks/main.yml
  76. 2 2
      roles/openshift_examples/defaults/main.yml
  77. 2 2
      roles/openshift_expand_partition/meta/main.yml
  78. 1 0
      roles/openshift_hosted/tasks/registry/storage/object_storage.yml
  79. 43 43
      roles/openshift_hosted_logging/tasks/cleanup_logging.yaml
  80. 174 174
      roles/openshift_hosted_logging/tasks/deploy_logging.yaml
  81. 1 0
      roles/openshift_hosted_logging/vars/main.yaml
  82. 27 26
      roles/openshift_manageiq/vars/main.yml
  83. 1 0
      roles/openshift_master/tasks/systemd_units.yml
  84. 2 2
      roles/openshift_master_facts/tasks/main.yml
  85. 0 1
      roles/openshift_master_facts/vars/main.yml
  86. 3 3
      roles/openshift_metrics/tasks/main.yaml
  87. 4 3
      roles/openshift_metrics/vars/main.yaml
  88. 13 12
      roles/openshift_node/tasks/systemd_units.yml
  89. 1 1
      roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
  90. 1 1
      roles/openshift_repos/vars/main.yml
  91. 2 1
      roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml
  92. 1 0
      roles/openshift_serviceaccounts/tasks/main.yml
  93. 1 1
      roles/openshift_storage_nfs_lvm/meta/main.yml
  94. 2 1
      roles/rhel_subscribe/meta/main.yml
  95. 13 3
      utils/Makefile
  96. 1 0
      utils/test-requirements.txt

+ 1 - 0
.travis.yml

@@ -1,3 +1,4 @@
+---
 sudo: false
 
 language: python

+ 67 - 0
git/.yamllint

@@ -0,0 +1,67 @@
+# -*- mode: yaml -*-
+# vim:ts=2:sw=2:ai:si:syntax=yaml
+#
+# yamllint configuration directives
+# Project Homepage: https://github.com/adrienverge/yamllint
+#
+# Overriding rules in files:
+# http://yamllint.readthedocs.io/en/latest/disable_with_comments.html
+---
+extends: default
+
+# Rules documentation: http://yamllint.readthedocs.io/en/latest/rules.html
+rules:
+
+  braces:
+    # Defaults
+    # min-spaces-inside: 0
+    # max-spaces-inside: 0
+
+    # Keeping 0 min-spaces to not error on empty collection definitions
+    min-spaces-inside: 0
+    # Allowing one space inside braces to improve code readability
+    max-spaces-inside: 1
+
+  brackets:
+    # Defaults
+    # min-spaces-inside: 0
+    # max-spaces-inside: 0
+
+    # Keeping 0 min-spaces to not error on empty collection definitions
+    min-spaces-inside: 0
+    # Allowing one space inside braces to improve code readability
+    max-spaces-inside: 1
+
+  comments:
+    # Defaults
+    # level: warning
+    # require-starting-space: true
+    # min-spaces-from-content: 2
+
+    # Disabling to allow for code comment blocks and #!/usr/bin/ansible-playbook
+    require-starting-space: false
+
+  indentation:
+    # Defaults
+    # spaces: consistent
+    # indent-sequences: true
+    # check-multi-line-strings: false
+
+    # Requiring 2 space indentation
+    spaces: 2
+    # Requiring consistent indentation within a file, either indented or not
+    indent-sequences: consistent
+
+  # Disabling due to copious amounts of long lines in the code which would
+  # require a code style change to resolve
+  line-length: disable
+    # Defaults
+    # max: 80
+    # allow-non-breakable-words: true
+    # allow-non-breakable-inline-mappings: false
+
+  # Disabling due to copious amounts of truthy warnings in the code which would
+  # require a code style change to resolve
+  truthy: disable
+    # Defaults
+    # level: warning

+ 1 - 1
playbooks/adhoc/atomic_openshift_tutorial_reset.yml

@@ -19,7 +19,7 @@
       changed_when: False
       failed_when: False
 
-    - shell:  docker images -q |xargs docker rmi
+    - shell: docker images -q |xargs docker rmi
       changed_when: False
       failed_when: False
 

+ 1 - 0
playbooks/adhoc/bootstrap-fedora.yml

@@ -1,3 +1,4 @@
+---
 - hosts: OSEv3
   gather_facts: false
   tasks:

+ 1 - 2
playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml

@@ -56,7 +56,7 @@
 
   - name: fail if we don't detect loopback
     fail:
-      msg:  loopback not detected! Please investigate manually.
+      msg: loopback not detected! Please investigate manually.
     when: loop_device_check.rc == 1
 
   - name: stop zagg client monitoring container
@@ -139,4 +139,3 @@
     register: dockerstart
 
   - debug: var=dockerstart
-

+ 1 - 1
playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml

@@ -43,7 +43,7 @@
 
   - name: fail if we don't detect loopback
     fail:
-      msg:  loopback not detected! Please investigate manually.
+      msg: loopback not detected! Please investigate manually.
     when: loop_device_check.rc == 1
 
   - name: stop zagg client monitoring container

+ 4 - 4
playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml

@@ -37,7 +37,7 @@
   vars:
     cli_volume_type: gp2
     cli_volume_size: 200
-#    cli_volume_iops: "{{ 30 * cli_volume_size }}"
+    #cli_volume_iops: "{{ 30 * cli_volume_size }}"
 
   pre_tasks:
   - fail:
@@ -65,7 +65,7 @@
 
   - name: fail if we don't detect devicemapper
     fail:
-      msg:  The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+      msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
     when: device_mapper_check.rc == 1
 
   # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test
@@ -80,7 +80,7 @@
 
   - name: fail if we don't find a docker volume group
     fail:
-      msg:  Unable to find docker volume group. Please investigate manually.
+      msg: Unable to find docker volume group. Please investigate manually.
     when: docker_vg_name.stdout_lines|length != 1
 
   # docker-storage-setup creates a docker-pool as the lvm.  I am using docker-pool lvm to test
@@ -95,7 +95,7 @@
 
   - name: fail if we don't find a docker physical volume
     fail:
-      msg:  Unable to find docker physical volume. Please investigate manually.
+      msg: Unable to find docker physical volume. Please investigate manually.
     when: docker_pv_name.stdout_lines|length != 1
 
 

+ 9 - 10
playbooks/adhoc/noc/create_host.yml

@@ -16,7 +16,7 @@
           host: ctr_test_kwoodson
           filter:
             host:
-            -  ctr_kwoodson_test_tmpl
+              - ctr_kwoodson_test_tmpl
 
       register: tmpl_results
 
@@ -39,21 +39,20 @@
         params:
           host: ctr_test_kwoodson
           interfaces:
-          - type: 1
-            main: 1
-            useip: 1
-            ip: 127.0.0.1
-            dns: ""
-            port: 10050
+            - type: 1
+              main: 1
+              useip: 1
+              ip: 127.0.0.1
+              dns: ""
+              port: 10050
           groups:
-          - groupid: 1
+            - groupid: 1
           templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
           output: extend
           filter:
             host:
-            -  ctr_test_kwoodson
+              - ctr_test_kwoodson
 
       register: host_results
 
     - debug: var=host_results
-

+ 3 - 4
playbooks/adhoc/noc/create_maintenance.yml

@@ -26,13 +26,12 @@
           maintenance_type: "0"
           output: extend
           hostids: "{{ oo_hostids.split(',') | default([]) }}"
-#groupids: "{{ oo_groupids.split(',') | default([]) }}"
+          #groupids: "{{ oo_groupids.split(',') | default([]) }}"
           timeperiods:
-          - start_time: "{{ oo_start }}"
-            period: "{{ oo_stop }}"
+            - start_time: "{{ oo_start }}"
+              period: "{{ oo_stop }}"
           selectTimeperiods: extend
 
       register: maintenance
 
     - debug: var=maintenance
-

+ 1 - 2
playbooks/adhoc/openshift_hosted_logging_efk.yaml

@@ -2,5 +2,4 @@
 - hosts: masters[0]
   roles:
   - role: openshift_hosted_logging
-    openshift_hosted_logging_cleanup: no 
-
+    openshift_hosted_logging_cleanup: no

+ 1 - 1
playbooks/adhoc/s3_registry/s3_registry.yml

@@ -22,7 +22,7 @@
   tasks:
 
   - name: Check for AWS creds
-    fail: 
+    fail:
       msg: "Couldn't find {{ item }} creds in ENV"
     when: "{{ item }} == ''"
     with_items:

+ 1 - 1
playbooks/adhoc/sdn_restart/oo-sdn-restart.yml

@@ -7,7 +7,7 @@
 - name: Check vars
   hosts: localhost
   gather_facts: false
- 
+
   pre_tasks:
   - fail:
       msg: "Playbook requires host to be set"

+ 1 - 1
playbooks/adhoc/uninstall.yml

@@ -184,7 +184,7 @@
     - docker.io/openshift
     when: openshift_uninstall_images | default(True) | bool
 
-  - shell:  "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+  - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
     changed_when: False
     failed_when: False
     with_items: "{{ images_to_delete.results }}"

+ 1 - 1
playbooks/adhoc/zabbix_setup/clean_zabbix.yml

@@ -57,4 +57,4 @@
       name: "{{ item }}"
       state: absent
     with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
-    when:  templ_heartbeat.results | length == 0
+    when: templ_heartbeat.results | length == 0

+ 1 - 1
playbooks/adhoc/zabbix_setup/oo-config-zaio.yml

@@ -15,5 +15,5 @@
     ozb_server: "{{ g_server }}"
     ozb_user: "{{ g_user }}"
     ozb_password: "{{ g_password }}"
-    ozb_scriptrunner_user:  "{{ g_zbx_scriptrunner_user }}"
+    ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
     ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"

+ 8 - 8
playbooks/aws/openshift-cluster/cluster_hosts.yml

@@ -1,21 +1,21 @@
 ---
-g_all_hosts:     "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
-                    | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([])
+                 | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}"
 
-g_etcd_hosts:    "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}"
 
-g_lb_hosts:      "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}"
 
-g_nfs_hosts:     "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}"
 
-g_master_hosts:  "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}"
 
 g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}"
 
-g_node_hosts:    "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}"
 
 g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}"
 
-g_infra_hosts:   "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}"
 
 g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}"

+ 2 - 2
playbooks/aws/openshift-cluster/config.yml

@@ -17,8 +17,8 @@
 
 - include: ../../common/openshift-cluster/config.yml
   vars:
-    g_ssh_user:     "{{ deployment_vars[deployment_type].ssh_user }}"
-    g_sudo:         "{{ deployment_vars[deployment_type].become }}"
+    g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+    g_sudo: "{{ deployment_vars[deployment_type].become }}"
     g_nodeonmaster: true
     openshift_cluster_id: "{{ cluster_id }}"
     openshift_debug_level: "{{ debug_level }}"

+ 15 - 15
playbooks/aws/openshift-cluster/tasks/launch_instances.yml

@@ -125,21 +125,21 @@
 
 - set_fact:
     logrotate:
-        - name: syslog
-          path: |
-            /var/log/cron
-            /var/log/maillog
-            /var/log/messages
-            /var/log/secure
-            /var/log/spooler"
-          options:
-            - daily
-            - rotate 7
-            - compress
-            - sharedscripts
-            - missingok
-          scripts:
-            postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
+    - name: syslog
+      path: |
+        /var/log/cron
+        /var/log/maillog
+        /var/log/messages
+        /var/log/secure
+        /var/log/spooler"
+      options:
+      - daily
+      - rotate 7
+      - compress
+      - sharedscripts
+      - missingok
+      scripts:
+        postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
 
 - name: Add new instances groups and variables
   add_host:

+ 42 - 42
playbooks/aws/openshift-cluster/terminate.yml

@@ -29,49 +29,49 @@
   become: no
   gather_facts: no
   tasks:
-    - name: Remove tags from instances
-      ec2_tag:
-        resource: "{{ hostvars[item]['ec2_id'] }}"
-        region: "{{ hostvars[item]['ec2_region'] }}"
-        state: absent
-        tags:
-          environment:   "{{ hostvars[item]['ec2_tag_environment'] }}"
-          clusterid:     "{{ hostvars[item]['ec2_tag_clusterid'] }}"
-          host-type:     "{{ hostvars[item]['ec2_tag_host-type'] }}"
-          sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
-      with_items: "{{ groups.oo_hosts_to_terminate }}"
-      when: "'oo_hosts_to_terminate' in groups"
+  - name: Remove tags from instances
+    ec2_tag:
+      resource: "{{ hostvars[item]['ec2_id'] }}"
+      region: "{{ hostvars[item]['ec2_region'] }}"
+      state: absent
+      tags:
+        environment: "{{ hostvars[item]['ec2_tag_environment'] }}"
+        clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}"
+        host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}"
+        sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}"
+    with_items: "{{ groups.oo_hosts_to_terminate }}"
+    when: "'oo_hosts_to_terminate' in groups"
 
-    - name: Terminate instances
-      ec2:
-        state: absent
-        instance_ids: ["{{ hostvars[item].ec2_id }}"]
-        region: "{{ hostvars[item].ec2_region }}"
-      ignore_errors: yes
-      register: ec2_term
-      with_items: "{{ groups.oo_hosts_to_terminate }}"
-      when: "'oo_hosts_to_terminate' in groups"
+  - name: Terminate instances
+    ec2:
+      state: absent
+      instance_ids: ["{{ hostvars[item].ec2_id }}"]
+      region: "{{ hostvars[item].ec2_region }}"
+    ignore_errors: yes
+    register: ec2_term
+    with_items: "{{ groups.oo_hosts_to_terminate }}"
+    when: "'oo_hosts_to_terminate' in groups"
 
-    # Fail if any of the instances failed to terminate with an error other
-    # than 403 Forbidden
-    - fail:
-        msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
-      when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
-      with_items: "{{ ec2_term.results }}"
+  # Fail if any of the instances failed to terminate with an error other
+  # than 403 Forbidden
+  - fail:
+      msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"
+    when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
+    with_items: "{{ ec2_term.results }}"
 
-    - name: Stop instance if termination failed
-      ec2:
-        state: stopped
-        instance_ids: ["{{ item.item.ec2_id }}"]
-        region: "{{ item.item.ec2_region }}"
-      register: ec2_stop
-      when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
-      with_items: "{{ ec2_term.results }}"
+  - name: Stop instance if termination failed
+    ec2:
+      state: stopped
+      instance_ids: ["{{ item.item.ec2_id }}"]
+      region: "{{ item.item.ec2_region }}"
+    register: ec2_stop
+    when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed"
+    with_items: "{{ ec2_term.results }}"
 
-    - name: Rename stopped instances
-      ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
-      args:
-        tags:
-          Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
-      with_items: "{{ ec2_stop.results }}"
-      when: ec2_stop | changed
+  - name: Rename stopped instances
+    ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present
+    args:
+      tags:
+        Name: "{{ item.item.item.ec2_tag_Name }}-terminate"
+    with_items: "{{ ec2_stop.results }}"
+    when: ec2_stop | changed

+ 8 - 8
playbooks/byo/openshift-cluster/cluster_hosts.yml

@@ -1,19 +1,19 @@
 ---
-g_etcd_hosts:   "{{ groups.etcd | default([]) }}"
+g_etcd_hosts: "{{ groups.etcd | default([]) }}"
 
-g_lb_hosts:     "{{ groups.lb | default([]) }}"
+g_lb_hosts: "{{ groups.lb | default([]) }}"
 
 g_master_hosts: "{{ groups.masters | default([]) }}"
 
 g_new_master_hosts: "{{ groups.new_masters | default([]) }}"
 
-g_node_hosts:   "{{ groups.nodes | default([]) }}"
+g_node_hosts: "{{ groups.nodes | default([]) }}"
 
 g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
 
-g_nfs_hosts:   "{{ groups.nfs | default([]) }}"
+g_nfs_hosts: "{{ groups.nfs | default([]) }}"
 
-g_all_hosts:    "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
-                    | union(g_lb_hosts) | union(g_nfs_hosts)
-                    | union(g_new_node_hosts)| union(g_new_master_hosts)
-                    | default([]) }}"
+g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
+                 | union(g_lb_hosts) | union(g_nfs_hosts)
+                 | union(g_new_node_hosts)| union(g_new_master_hosts)
+                 | default([]) }}"

+ 1 - 2
playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -1,4 +1,4 @@
-
+---
 - name: Check for appropriate Docker versions
   hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
   roles:
@@ -43,4 +43,3 @@
       {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool
-

+ 1 - 0
playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml

@@ -1,3 +1,4 @@
+---
 # Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
 #
 # Currently only supports upgrading 1.9.x to >= 1.10.x.

+ 0 - 1
playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml

@@ -97,4 +97,3 @@
     node_config_hook: "v3_3/node_config_upgrade.yml"
 
 - include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-

+ 0 - 1
playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml

@@ -98,4 +98,3 @@
     master_config_hook: "v3_3/master_config_upgrade.yml"
 
 - include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-

+ 0 - 1
playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml

@@ -93,4 +93,3 @@
 - include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
 
 - include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-

+ 19 - 19
playbooks/byo/openshift-node/network_manager.yml

@@ -13,24 +13,24 @@
 - hosts: l_oo_all_hosts
   become: yes
   tasks:
-    - name: install NetworkManager
-      package:
-        name: 'NetworkManager'
-        state: present
+  - name: install NetworkManager
+    package:
+      name: 'NetworkManager'
+      state: present
 
-    - name: configure NetworkManager
-      lineinfile:
-        dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
-        regexp: '^{{ item }}='
-        line: '{{ item }}=yes'
-        state: present
-        create: yes
-      with_items:
-        - 'USE_PEERDNS'
-        - 'NM_CONTROLLED'
+  - name: configure NetworkManager
+    lineinfile:
+      dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
+      regexp: '^{{ item }}='
+      line: '{{ item }}=yes'
+      state: present
+      create: yes
+    with_items:
+    - 'USE_PEERDNS'
+    - 'NM_CONTROLLED'
 
-    - name: enable and start NetworkManager
-      service:
-        name: 'NetworkManager'
-        state: started
-        enabled: yes
+  - name: enable and start NetworkManager
+    service:
+      name: 'NetworkManager'
+      state: started
+      enabled: yes

+ 3 - 3
playbooks/byo/rhel_subscribe.yml

@@ -14,9 +14,9 @@
   gather_facts: no
   tasks:
   - include_vars: openshift-cluster/cluster_hosts.yml
-  
-- include: ../common/openshift-cluster/evaluate_groups.yml 
-  
+
+- include: ../common/openshift-cluster/evaluate_groups.yml
+
 - hosts: l_oo_all_hosts
   vars:
     openshift_deployment_type: "{{ deployment_type }}"

+ 1 - 0
playbooks/common/openshift-cluster/additional_config.yml

@@ -1,3 +1,4 @@
+---
 - name: Additional master configuration
   hosts: oo_first_master
   vars:

+ 1 - 1
playbooks/common/openshift-cluster/enable_dnsmasq.yml

@@ -59,7 +59,7 @@
   vars:
     openshift_deployment_type: "{{ deployment_type }}"
   roles:
-    - openshift_node_dnsmasq
+  - openshift_node_dnsmasq
   post_tasks:
   - modify_yaml:
       dest: "{{ openshift.common.config_base }}/node/node-config.yaml"

+ 1 - 2
playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml

@@ -50,6 +50,5 @@
 
 - name: Flag to delete all images prior to upgrade if crossing Docker 1.10 boundary
   set_fact:
-      docker_upgrade_nuke_images: True
+    docker_upgrade_nuke_images: True
   when: l_docker_upgrade | bool and docker_upgrade_nuke_images is not defined and curr_docker_version.stdout | version_compare('1.10','<') and docker_version | version_compare('1.10','>=')
-

+ 1 - 0
playbooks/common/openshift-cluster/upgrades/etcd/backup.yml

@@ -1,3 +1,4 @@
+---
 - name: Backup etcd
   hosts: etcd_hosts_to_backup
   vars:

+ 8 - 10
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -58,8 +58,8 @@
 
   - include: rpm_upgrade.yml
     vars:
-       component: "node"
-       openshift_version: "{{ openshift_pkg_version | default('') }}"
+      component: "node"
+      openshift_version: "{{ openshift_pkg_version | default('') }}"
     when: inventory_hostname in groups.oo_nodes_to_upgrade and not openshift.common.is_containerized | bool
 
   - name: Remove obsolete docker-sdn-ovs.conf
@@ -72,12 +72,12 @@
   - name: Ensure containerized services stopped before Docker restart
     service: name={{ item }} state=stopped
     with_items:
-      - etcd_container
-      - openvswitch
-      - "{{ openshift.common.service_type }}-master"
-      - "{{ openshift.common.service_type }}-master-api"
-      - "{{ openshift.common.service_type }}-master-controllers"
-      - "{{ openshift.common.service_type }}-node"
+    - etcd_container
+    - openvswitch
+    - "{{ openshift.common.service_type }}-master"
+    - "{{ openshift.common.service_type }}-master-api"
+    - "{{ openshift.common.service_type }}-master-controllers"
+    - "{{ openshift.common.service_type }}-node"
     failed_when: false
     when: openshift.common.is_containerized | bool
 
@@ -96,5 +96,3 @@
     until: node_sched.rc == 0
     retries: 3
     delay: 1
-
-

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml

@@ -18,4 +18,3 @@
     dest: "{{ openshift.common.config_base}}/node/node-config.yaml"
     yaml_key: 'masterClientConnectionOverrides.qps'
     yaml_value: 20
-

+ 2 - 2
playbooks/common/openshift-cluster/validate_hostnames.yml

@@ -11,6 +11,6 @@
     failed_when: false
   - name: Warn user about bad openshift_hostname values
     pause:
-       prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
-       seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
+      prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
+      seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
     when: lookupip.stdout not in ansible_all_ipv4_addresses

+ 1 - 1
playbooks/common/openshift-etcd/service.yml

@@ -17,4 +17,4 @@
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name=etcd state="{{ new_cluster_state }}"
+  - service: name=etcd state="{{ new_cluster_state }}"

+ 1 - 1
playbooks/common/openshift-loadbalancer/service.yml

@@ -17,4 +17,4 @@
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name=haproxy state="{{ new_cluster_state }}"
+  - service: name=haproxy state="{{ new_cluster_state }}"

+ 2 - 2
playbooks/common/openshift-master/config.yml

@@ -99,8 +99,8 @@
   - openshift_facts:
       role: master
       local_facts:
-          session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
-          session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
+        session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}"
+        session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}"
 
 - name: Generate master session secrets
   hosts: oo_first_master

+ 6 - 7
playbooks/common/openshift-master/restart.yml

@@ -13,12 +13,12 @@
       role: "{{ item.role }}"
       local_facts: "{{ item.local_facts }}"
     with_items:
-      - role: common
-        local_facts:
-          rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
-      - role: master
-        local_facts:
-          cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+    - role: common
+      local_facts:
+        rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}"
+    - role: master
+      local_facts:
+        cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
 
 # Creating a temp file on localhost, we then check each system that will
 # be rebooted to see if that file exists, if so we know we're running
@@ -76,4 +76,3 @@
     when: openshift.common.rolling_restart_mode == 'system'
   - include: restart_services.yml
     when: openshift.common.rolling_restart_mode == 'services'
-

+ 1 - 0
playbooks/common/openshift-master/restart_hosts.yml

@@ -1,3 +1,4 @@
+---
 - name: Restart master system
   # https://github.com/ansible/ansible/issues/10616
   shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart"

+ 1 - 0
playbooks/common/openshift-master/restart_services.yml

@@ -1,3 +1,4 @@
+---
 - name: Restart master
   service:
     name: "{{ openshift.common.service_type }}-master"

+ 1 - 1
playbooks/common/openshift-master/service.yml

@@ -17,4 +17,4 @@
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"
+  - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}"

+ 1 - 1
playbooks/common/openshift-nfs/service.yml

@@ -15,4 +15,4 @@
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name=nfs-server state="{{ new_cluster_state }}"
+  - service: name=nfs-server state="{{ new_cluster_state }}"

+ 1 - 1
playbooks/common/openshift-node/service.yml

@@ -17,4 +17,4 @@
   connection: ssh
   gather_facts: no
   tasks:
-    - service: name={{ service_type }}-node state="{{ new_cluster_state }}"
+  - service: name={{ service_type }}-node state="{{ new_cluster_state }}"

+ 8 - 8
playbooks/gce/openshift-cluster/cluster_hosts.yml

@@ -1,21 +1,21 @@
 ---
-g_all_hosts:     "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
-                    | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+                 | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
 
-g_etcd_hosts:    "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
 
-g_lb_hosts:      "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
 
-g_nfs_hosts:     "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
 
-g_master_hosts:  "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
 
 g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
 
-g_node_hosts:    "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
 
 g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
 
-g_infra_hosts:   "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
 
 g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"

+ 1 - 1
playbooks/gce/openshift-cluster/tasks/launch_instances.yml

@@ -10,7 +10,7 @@
     zone: "{{ lookup('env', 'zone') }}"
     network: "{{ lookup('env', 'network') }}"
     subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}"
-# unsupported in 1.9.+
+    # unsupported in 1.9.+
     #service_account_permissions: "datastore,logging-write"
     tags:
       - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }}

+ 11 - 12
playbooks/gce/openshift-cluster/terminate.yml

@@ -33,18 +33,17 @@
   vars_files:
   - vars.yml
   tasks:
-
-    - name: Terminate instances that were previously launched
-      local_action:
-        module: gce
-        state: 'absent'
-        name: "{{ item }}"
-        service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
-        pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
-        project_id: "{{ lookup('env', 'gce_project_id') }}"
-        zone: "{{ lookup('env', 'zone') }}"
-      with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
-      when: item is defined
+  - name: Terminate instances that were previously launched
+    local_action:
+      module: gce
+      state: 'absent'
+      name: "{{ item }}"
+      service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
+      pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
+      project_id: "{{ lookup('env', 'gce_project_id') }}"
+      zone: "{{ lookup('env', 'zone') }}"
+    with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"
+    when: item is defined
 
 #- include: ../openshift-node/terminate.yml
 #  vars:

+ 8 - 8
playbooks/libvirt/openshift-cluster/cluster_hosts.yml

@@ -1,21 +1,21 @@
 ---
-g_all_hosts:     "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
-                    | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([])
+                 | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}"
 
-g_etcd_hosts:    "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}"
 
-g_lb_hosts:      "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}"
 
-g_nfs_hosts:     "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}"
 
-g_master_hosts:  "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}"
 
 g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}"
 
-g_node_hosts:    "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}"
 
 g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}"
 
-g_infra_hosts:   "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}"
 
 g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}"

+ 2 - 2
playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml

@@ -134,5 +134,5 @@
   retries: 30
   delay: 1
   with_together:
-  - '{{ instances }}'
-  - '{{ ips }}'
+    - '{{ instances }}'
+    - '{{ ips }}'

+ 0 - 1
playbooks/libvirt/openshift-cluster/terminate.yml

@@ -68,4 +68,3 @@
       path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
       state: absent
     with_items: "{{ groups['oo_hosts_to_terminate'] }}"
-

+ 10 - 10
playbooks/libvirt/openshift-cluster/vars.yml

@@ -12,10 +12,10 @@ debug_level: 2
 # The default value of image_url for enterprise and openshift-enterprise deployment types below won't work.
 deployment_rhel7_ent_base:
   image:
-    url:    "{{ lookup('oo_option', 'image_url') |
-                default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
-    name:   "{{ lookup('oo_option', 'image_name') |
-                default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+    url: "{{ lookup('oo_option', 'image_url') |
+             default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
+    name: "{{ lookup('oo_option', 'image_name') |
+              default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}"
     sha256: "{{ lookup('oo_option', 'image_sha256') |
                 default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}"
     compression: ""
@@ -25,12 +25,12 @@ deployment_rhel7_ent_base:
 deployment_vars:
   origin:
     image:
-      url:    "{{ lookup('oo_option', 'image_url') |
-                  default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
-      compression:   "{{ lookup('oo_option', 'image_compression') |
-                         default('xz', True) }}"
-      name:   "{{ lookup('oo_option', 'image_name') |
-                  default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
+      url: "{{ lookup('oo_option', 'image_url') |
+               default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}"
+      compression: "{{ lookup('oo_option', 'image_compression') |
+                       default('xz', True) }}"
+      name: "{{ lookup('oo_option', 'image_name') |
+                default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}"
       sha256: "{{ lookup('oo_option', 'image_sha256') |
                   default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}"
     ssh_user: openshift

+ 8 - 8
playbooks/openstack/openshift-cluster/cluster_hosts.yml

@@ -1,21 +1,21 @@
 ---
-g_all_hosts:     "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
-                    | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
+g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([])
+                 | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}"
 
-g_etcd_hosts:    "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
+g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}"
 
-g_lb_hosts:      "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
+g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}"
 
-g_nfs_hosts:     "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
+g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}"
 
-g_master_hosts:  "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
+g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}"
 
 g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}"
 
-g_node_hosts:    "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
+g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}"
 
 g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}"
 
-g_infra_hosts:   "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
+g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}"
 
 g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}"

+ 18 - 18
playbooks/openstack/openshift-cluster/launch.yml

@@ -111,9 +111,9 @@
         public_v4: '{{ item[2] }}'
         private_v4: '{{ item[1] }}'
     with_together:
-      - '{{ parsed_outputs.etcd_names }}'
-      - '{{ parsed_outputs.etcd_ips }}'
-      - '{{ parsed_outputs.etcd_floating_ips }}'
+    - '{{ parsed_outputs.etcd_names }}'
+    - '{{ parsed_outputs.etcd_ips }}'
+    - '{{ parsed_outputs.etcd_floating_ips }}'
 
   - name: Add new master instances groups and variables
     add_host:
@@ -128,9 +128,9 @@
         public_v4: '{{ item[2] }}'
         private_v4: '{{ item[1] }}'
     with_together:
-      - '{{ parsed_outputs.master_names }}'
-      - '{{ parsed_outputs.master_ips }}'
-      - '{{ parsed_outputs.master_floating_ips }}'
+    - '{{ parsed_outputs.master_names }}'
+    - '{{ parsed_outputs.master_ips }}'
+    - '{{ parsed_outputs.master_floating_ips }}'
 
   - name: Add new node instances groups and variables
     add_host:
@@ -145,9 +145,9 @@
         public_v4: '{{ item[2] }}'
         private_v4: '{{ item[1] }}'
     with_together:
-      - '{{ parsed_outputs.node_names }}'
-      - '{{ parsed_outputs.node_ips }}'
-      - '{{ parsed_outputs.node_floating_ips }}'
+    - '{{ parsed_outputs.node_names }}'
+    - '{{ parsed_outputs.node_ips }}'
+    - '{{ parsed_outputs.node_floating_ips }}'
 
   - name: Add new infra instances groups and variables
     add_host:
@@ -162,18 +162,18 @@
         public_v4: '{{ item[2] }}'
         private_v4: '{{ item[1] }}'
     with_together:
-      - '{{ parsed_outputs.infra_names }}'
-      - '{{ parsed_outputs.infra_ips }}'
-      - '{{ parsed_outputs.infra_floating_ips }}'
+    - '{{ parsed_outputs.infra_names }}'
+    - '{{ parsed_outputs.infra_ips }}'
+    - '{{ parsed_outputs.infra_floating_ips }}'
 
   - name: Wait for ssh
     wait_for:
       host: '{{ item }}'
       port: 22
     with_flattened:
-      - '{{ parsed_outputs.master_floating_ips }}'
-      - '{{ parsed_outputs.node_floating_ips }}'
-      - '{{ parsed_outputs.infra_floating_ips }}'
+    - '{{ parsed_outputs.master_floating_ips }}'
+    - '{{ parsed_outputs.node_floating_ips }}'
+    - '{{ parsed_outputs.infra_floating_ips }}'
 
   - name: Wait for user setup
     command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup'
@@ -182,9 +182,9 @@
     retries: 30
     delay: 1
     with_flattened:
-      - '{{ parsed_outputs.master_floating_ips }}'
-      - '{{ parsed_outputs.node_floating_ips }}'
-      - '{{ parsed_outputs.infra_floating_ips }}'
+    - '{{ parsed_outputs.master_floating_ips }}'
+    - '{{ parsed_outputs.node_floating_ips }}'
+    - '{{ parsed_outputs.infra_floating_ips }}'
 
 - include: update.yml
 

+ 1 - 0
playbooks/openstack/openshift-cluster/terminate.yml

@@ -1,3 +1,4 @@
+---
 - name: Terminate instance(s)
   hosts: localhost
   become: no

+ 1 - 0
playbooks/openstack/openshift-cluster/vars.yml

@@ -1,3 +1,4 @@
+# yamllint disable rule:colons
 ---
 debug_level: 2
 openstack_infra_heat_stack:     "{{ lookup('oo_option', 'infra_heat_stack' ) |

+ 2 - 2
roles/docker/meta/main.yml

@@ -10,5 +10,5 @@ galaxy_info:
     versions:
     - 7
 dependencies:
-  - role: os_firewall
-    os_firewall_use_firewalld: False
+- role: os_firewall
+  os_firewall_use_firewalld: False

+ 9 - 9
roles/docker/tasks/main.yml

@@ -86,16 +86,16 @@
     line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val }}'"
     state: "{{ 'present' if item.reg_fact_val != '' else 'absent'}}"
   with_items:
-    - reg_conf_var: HTTP_PROXY
-      reg_fact_val: "{{ docker_http_proxy | default('') }}"
-    - reg_conf_var: HTTPS_PROXY
-      reg_fact_val: "{{ docker_https_proxy | default('') }}"
-    - reg_conf_var: NO_PROXY
-      reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
+  - reg_conf_var: HTTP_PROXY
+    reg_fact_val: "{{ docker_http_proxy | default('') }}"
+  - reg_conf_var: HTTPS_PROXY
+    reg_fact_val: "{{ docker_https_proxy | default('') }}"
+  - reg_conf_var: NO_PROXY
+    reg_fact_val: "{{ docker_no_proxy | default('') | join(',') }}"
   notify:
-    - restart docker
+  - restart docker
   when:
-    - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
+  - docker_check.stat.isreg is defined and docker_check.stat.isreg and '"http_proxy" in openshift.common or "https_proxy" in openshift.common'
 
 - name: Set various Docker options
   lineinfile:
@@ -109,7 +109,7 @@
       {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
   when: docker_check.stat.isreg is defined and docker_check.stat.isreg
   notify:
-    - restart docker
+  - restart docker
 
 - name: Start the Docker service
   systemd:

+ 0 - 1
roles/flannel_register/defaults/main.yaml

@@ -8,4 +8,3 @@ etcd_conf_dir: "{{ openshift.common.config_base }}/master"
 etcd_peer_ca_file: "{{ etcd_conf_dir + '/ca.crt' if (openshift.master.embedded_etcd | bool) else etcd_conf_dir + '/master.etcd-ca.crt' }}"
 etcd_peer_cert_file: "{{ etcd_conf_dir }}/master.etcd-client.crt"
 etcd_peer_key_file: "{{ etcd_conf_dir }}/master.etcd-client.key"
-

+ 1 - 1
roles/kube_nfs_volumes/meta/main.yml

@@ -13,5 +13,5 @@ galaxy_info:
     versions:
     - all
   categories:
-    - cloud
+  - cloud
 dependencies: []

+ 1 - 1
roles/nuage_ca/meta/main.yml

@@ -1,6 +1,6 @@
 ---
 galaxy_info:
-  author: Vishal Patil 
+  author: Vishal Patil
   description:
   company: Nuage Networks
   license: Apache License, Version 2.0

+ 1 - 0
roles/nuage_common/defaults/main.yaml

@@ -1,3 +1,4 @@
+---
 nuage_ca_master: "{{ groups.oo_first_master.0 }}"
 nuage_ca_master_crt_dir: /usr/share/nuage-openshift-certificates
 

+ 1 - 1
roles/nuage_master/defaults/main.yaml

@@ -1,4 +1,4 @@
 ---
 nuage_master_cspadminpasswd: ""
 nuage_master_adminusername: admin
-nuage_master_adminuserpasswd: admin 
+nuage_master_adminuserpasswd: admin

+ 7 - 7
roles/nuage_master/meta/main.yml

@@ -13,10 +13,10 @@ galaxy_info:
   - cloud
   - system
 dependencies:
-  - role: nuage_ca
-  - role: nuage_common
-  - role: openshift_etcd_client_certificates
-  - role: os_firewall
-    os_firewall_allow:
-    - service: openshift-monitor
-      port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_ca
+- role: nuage_common
+- role: openshift_etcd_client_certificates
+- role: os_firewall
+  os_firewall_allow:
+  - service: openshift-monitor
+    port: "{{ nuage_mon_rest_server_port }}/tcp"

+ 4 - 4
roles/nuage_master/tasks/certificates.yml

@@ -1,11 +1,11 @@
 ---
 - name: Create a directory to hold the certificates
   file: path="{{ nuage_mon_rest_server_crt_dir }}" state=directory
-  delegate_to: "{{ nuage_ca_master }}" 
+  delegate_to: "{{ nuage_ca_master }}"
 
 - name: Create the key
   command: >
-    openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096  
+    openssl genrsa -out "{{ nuage_ca_master_rest_server_key }}" 4096
   delegate_to: "{{ nuage_ca_master }}"
 
 - name: Create the req file
@@ -30,7 +30,7 @@
   shell: "cd {{ nuage_mon_rest_server_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
   delegate_to: "{{ nuage_ca_master }}"
 
-- name: Create a temp directory for the certificates 
+- name: Create a temp directory for the certificates
   local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
   register: mktemp
 
@@ -42,7 +42,7 @@
   unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_master_crt_dir }}
 
 - name: Delete the certificates after copy
-  file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent 
+  file: path="{{ nuage_mon_rest_server_crt_dir }}" state=absent
   delegate_to: "{{ nuage_ca_master }}"
 
 - name: Delete the temp directory

+ 7 - 7
roles/nuage_master/tasks/main.yaml

@@ -1,13 +1,13 @@
 ---
 - name: Create directory /usr/share/nuage-openshift-monitor
   become: yes
-  file: path=/usr/share/nuage-openshift-monitor state=directory  
+  file: path=/usr/share/nuage-openshift-monitor state=directory
 
 - name: Create the log directory
   become: yes
   file: path={{ nuage_mon_rest_server_logdir }} state=directory
 
-- name: Install Nuage Openshift Monitor 
+- name: Install Nuage Openshift Monitor
   become: yes
   yum: name={{ nuage_openshift_rpm }} state=present
 
@@ -17,12 +17,12 @@
   become: yes
   fetch: src={{ cert_output_dir }}/{{ item }} dest=/tmp/{{ item }} flat=yes
   with_items:
-        - ca.crt
-        - nuage.crt
-        - nuage.key
-        - nuage.kubeconfig 
+    - ca.crt
+    - nuage.crt
+    - nuage.key
+    - nuage.kubeconfig
 
-- include: certificates.yml 
+- include: certificates.yml
 
 - name: Create nuage-openshift-monitor.yaml
   become: yes

+ 9 - 8
roles/nuage_master/vars/main.yaml

@@ -1,3 +1,4 @@
+---
 openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
 openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
 openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
@@ -6,7 +7,7 @@ ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
 admin_config: "{{ openshift.common.config_base }}/master/admin.kubeconfig"
 cert_output_dir: /usr/share/nuage-openshift-monitor
 kube_config: /usr/share/nuage-openshift-monitor/nuage.kubeconfig
-kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml 
+kubemon_yaml: /usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml
 master_config_yaml: "{{ openshift_master_config_dir }}/master-config.yaml"
 nuage_mon_rest_server_url: "0.0.0.0:{{ nuage_mon_rest_server_port }}"
 nuage_mon_rest_server_logdir: "{{ nuage_openshift_monitor_log_dir | default('/var/log/nuage-openshift-monitor') }}"
@@ -14,18 +15,18 @@ nuage_mon_log_level: "{{ nuage_openshift_monitor_log_level | default('3') }}"
 
 nuage_mon_rest_server_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
 nuage_ca_master_rest_server_key: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.key"
-nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt" 
+nuage_ca_master_rest_server_crt: "{{ nuage_mon_rest_server_crt_dir }}/nuageMonServer.crt"
 
 nuage_mon_rest_server_host: "{{ openshift.master.cluster_hostname | default(openshift.common.hostname) }}"
 
-nuage_master_crt_dir : /usr/share/nuage-openshift-monitor
+nuage_master_crt_dir: /usr/share/nuage-openshift-monitor
 nuage_service_account: system:serviceaccount:default:nuage
 
 nuage_service_account_config:
-    apiVersion: v1
-    kind: ServiceAccount
-    metadata:
-      name: nuage 
+  apiVersion: v1
+  kind: ServiceAccount
+  metadata:
+    name: nuage
 
 nuage_tasks:
-    - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }} 
+  - policy add-cluster-role-to-user cluster-reader {{ nuage_service_account }}

+ 8 - 8
roles/nuage_node/meta/main.yml

@@ -13,11 +13,11 @@ galaxy_info:
   - cloud
   - system
 dependencies:
-  - role: nuage_common
-  - role: nuage_ca
-  - role: os_firewall
-    os_firewall_allow:
-    - service: vxlan
-      port: 4789/udp
-    - service: nuage-monitor
-      port: "{{ nuage_mon_rest_server_port }}/tcp"
+- role: nuage_common
+- role: nuage_ca
+- role: os_firewall
+  os_firewall_allow:
+  - service: vxlan
+    port: 4789/udp
+  - service: nuage-monitor
+    port: "{{ nuage_mon_rest_server_port }}/tcp"

+ 3 - 3
roles/nuage_node/tasks/certificates.yml

@@ -5,7 +5,7 @@
 
 - name: Create the key
   command: >
-    openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096  
+    openssl genrsa -out "{{ nuage_ca_master_plugin_key }}" 4096
   delegate_to: "{{ nuage_ca_master }}"
 
 - name: Create the req file
@@ -30,7 +30,7 @@
   shell: "cd {{ nuage_plugin_rest_client_crt_dir }} && tar -czvf /tmp/{{ ansible_nodename }}.tgz *"
   delegate_to: "{{ nuage_ca_master }}"
 
-- name: Create a temp directory for the certificates 
+- name: Create a temp directory for the certificates
   local_action: command mktemp -d "/tmp/openshift-{{ ansible_nodename }}-XXXXXXX"
   register: mktemp
 
@@ -42,7 +42,7 @@
   unarchive: src="{{ mktemp.stdout }}/{{ ansible_nodename }}.tgz" dest={{ nuage_plugin_crt_dir }}
 
 - name: Delete the certificates after copy
-  file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent 
+  file: path="{{ nuage_plugin_rest_client_crt_dir }}" state=absent
   delegate_to: "{{ nuage_ca_master }}"
 
 - name: Delete the temp directory

+ 1 - 1
roles/nuage_node/tasks/iptables.yml

@@ -5,7 +5,7 @@
   always_run: yes
 
 - name: Allow traffic from overlay to underlay
-  command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay" 
+  command: /sbin/iptables --wait -I FORWARD 1 -s {{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }} -j ACCEPT -m comment --comment "nuage-overlay-underlay"
   when: "'nuage-overlay-underlay' not in iptablesrules.stdout"
   notify:
     - save iptable rules

+ 11 - 11
roles/nuage_node/tasks/main.yaml

@@ -2,16 +2,16 @@
 - name: Install Nuage VRS
   become: yes
   yum: name={{ vrs_rpm }} state=present
-  
-- name: Set the uplink interface 
+
+- name: Set the uplink interface
   become: yes
   lineinfile: dest={{ vrs_config }} regexp=^NETWORK_UPLINK_INTF line='NETWORK_UPLINK_INTF={{ uplink_interface }}'
 
-- name: Set the Active Controller 
+- name: Set the Active Controller
   become: yes
   lineinfile: dest={{ vrs_config }} regexp=^ACTIVE_CONTROLLER line='ACTIVE_CONTROLLER={{ vsc_active_ip }}'
 
-- name: Set the Standby Controller 
+- name: Set the Standby Controller
   become: yes
   lineinfile: dest={{ vrs_config }} regexp=^STANDBY_CONTROLLER line='STANDBY_CONTROLLER={{ vsc_standby_ip }}'
   when: vsc_standby_ip is defined
@@ -24,18 +24,18 @@
   become: yes
   copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
   with_items:
-        - ca.crt
-        - nuage.crt
-        - nuage.key
-        - nuage.kubeconfig 
+    - ca.crt
+    - nuage.crt
+    - nuage.key
+    - nuage.kubeconfig
 
 - include: certificates.yml
 
-- name: Set the vsp-openshift.yaml 
+- name: Set the vsp-openshift.yaml
   become: yes
-  template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644 
+  template: src=vsp-openshift.j2 dest={{ vsp_openshift_yaml }} owner=root mode=0644
   notify:
     - restart vrs
-    - restart node 
+    - restart node
 
 - include: iptables.yml

+ 2 - 2
roles/nuage_node/vars/main.yaml

@@ -17,6 +17,6 @@ plugin_log_level: "{{ nuage_plugin_log_level | default('err') }}"
 
 nuage_plugin_rest_client_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_nodename }}"
 nuage_ca_master_plugin_key: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.key"
-nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt" 
+nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
 
-nuage_plugin_crt_dir : /usr/share/vsp-openshift
+nuage_plugin_crt_dir: /usr/share/vsp-openshift

+ 1 - 2
roles/openshift_builddefaults/tasks/main.yml

@@ -15,10 +15,9 @@
       no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
       git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
       git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
-      
+
 - name: Set builddefaults config structure
   openshift_facts:
     role: builddefaults
     local_facts:
       config: "{{ openshift_builddefaults_json | default(builddefaults_yaml) }}"
-        

+ 1 - 0
roles/openshift_cloud_provider/tasks/aws.yml

@@ -1,3 +1,4 @@
+---
 # Work around ini_file create option in 2.2 which defaults to no
 - name: Create cloud config file
   file:

+ 1 - 0
roles/openshift_cloud_provider/tasks/gce.yml

@@ -1,3 +1,4 @@
+---
 # Work around ini_file create option in 2.2 which defaults to no
 - name: Create cloud config file
   file:

+ 2 - 3
roles/openshift_common/tasks/main.yml

@@ -4,11 +4,11 @@
   when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_flannel | default(false) | bool
 
 - fail:
-   msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
+    msg: Nuage sdn can not be used with openshift sdn, set openshift_use_openshift_sdn=false if you want to use nuage
   when: openshift_use_openshift_sdn | default(true) | bool and openshift_use_nuage | default(false) | bool
 
 - fail:
-   msg: Nuage sdn can not be used with flannel
+    msg: Nuage sdn can not be used with flannel
   when: openshift_use_flannel | default(false) | bool and openshift_use_nuage | default(false) | bool
 
 - fail:
@@ -46,4 +46,3 @@
   command: >
     hostnamectl set-hostname {{ openshift.common.hostname }}
   when: openshift_set_hostname | default(set_hostname_default) | bool
-

+ 1 - 1
roles/openshift_docker_facts/tasks/main.yml

@@ -9,7 +9,7 @@
       additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
       blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
       insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
-      log_driver:  "{{ openshift_docker_log_driver | default(None) }}"
+      log_driver: "{{ openshift_docker_log_driver | default(None) }}"
       log_options: "{{ openshift_docker_log_options | default(None) }}"
       options: "{{ openshift_docker_options | default(None) }}"
       disable_push_dockerhub: "{{ openshift_disable_push_dockerhub | default(None) }}"

+ 2 - 2
roles/openshift_examples/defaults/main.yml

@@ -12,8 +12,8 @@ examples_base: "{{ openshift.common.config_base if openshift.common.is_container
 image_streams_base: "{{ examples_base }}/image-streams"
 centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json"
 rhel_image_streams:
- - "{{ image_streams_base}}/image-streams-rhel7.json"
- - "{{ image_streams_base}}/dotnet_imagestreams.json"
+  - "{{ image_streams_base}}/image-streams-rhel7.json"
+  - "{{ image_streams_base}}/dotnet_imagestreams.json"
 db_templates_base: "{{ examples_base }}/db-templates"
 xpaas_image_streams: "{{ examples_base }}/xpaas-streams/"
 xpaas_templates_base: "{{ examples_base }}/xpaas-templates"

+ 2 - 2
roles/openshift_expand_partition/meta/main.yml

@@ -13,6 +13,6 @@ galaxy_info:
     versions:
     - all
   categories:
-    - openshift
-    - cloud
+  - openshift
+  - cloud
 dependencies: []

+ 1 - 0
roles/openshift_hosted/tasks/registry/storage/object_storage.yml

@@ -1,3 +1,4 @@
+---
 - fail:
     msg: >
       Object Storage Provider: {{ openshift.hosted.registry.storage.provider }}

+ 43 - 43
roles/openshift_hosted_logging/tasks/cleanup_logging.yaml

@@ -1,59 +1,59 @@
 ---
-  - name: Create temp directory for kubeconfig
-    command: mktemp -d /tmp/openshift-ansible-XXXXXX
-    register: mktemp
-    changed_when: False
+- name: Create temp directory for kubeconfig
+  command: mktemp -d /tmp/openshift-ansible-XXXXXX
+  register: mktemp
+  changed_when: False
 
-  - name: Copy the admin client config(s)
-    command: >
-      cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
-    changed_when: False
+- name: Copy the admin client config(s)
+  command: >
+    cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+  changed_when: False
 
-  - name: "Checking for logging project"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
-    register: logging_project
-    failed_when: "'FAILED' in logging_project.stderr"
+- name: "Checking for logging project"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
+  register: logging_project
+  failed_when: "'FAILED' in logging_project.stderr"
 
-  - name: "Changing projects"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+- name: "Changing projects"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
 
 
-  - name: "Cleanup any previous logging infrastructure"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
-    with_items:
-      - kibana
-      - fluentd
-      - elasticsearch
-    ignore_errors: yes
+- name: "Cleanup any previous logging infrastructure"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
+  with_items:
+    - kibana
+    - fluentd
+    - elasticsearch
+  ignore_errors: yes
 
-  - name: "Cleanup existing support infrastructure"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
-    ignore_errors: yes
+- name: "Cleanup existing support infrastructure"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
+  ignore_errors: yes
 
-  - name: "Cleanup existing secrets"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
-    ignore_errors: yes
-    register: clean_result
-    failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
+- name: "Cleanup existing secrets"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
+  ignore_errors: yes
+  register: clean_result
+  failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
 
-  - name: "Cleanup existing logging deployers"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
+- name: "Cleanup existing logging deployers"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
 
 
-  - name: "Cleanup logging project"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
+- name: "Cleanup logging project"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
 
 
-  - name: "Remove deployer template"
-    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
-    register: delete_output
-    failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
+- name: "Remove deployer template"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
+  register: delete_output
+  failed_when: delete_output.rc == 1 and 'exists' not in delete_output.stderr
 
 
-  - name: Delete temp directory
-    file:
-      name: "{{ mktemp.stdout }}"
-      state: absent
-    changed_when: False
+- name: Delete temp directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  changed_when: False
 
-  - debug: msg="Success!"
+- debug: msg="Success!"

+ 174 - 174
roles/openshift_hosted_logging/tasks/deploy_logging.yaml

@@ -1,175 +1,175 @@
 ---
-  - debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
-    when: target_registry is defined and target_registry
-
-  - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
-    when: "openshift_hosted_logging_hostname is not defined or
-          openshift_hosted_logging_elasticsearch_cluster_size is not defined or
-          openshift_hosted_logging_master_public_url is not defined"
-
-  - name: Create temp directory for kubeconfig
-    command: mktemp -d /tmp/openshift-ansible-XXXXXX
-    register: mktemp
-    changed_when: False
-
-  - name: Copy the admin client config(s)
-    command: >
-      cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
-    changed_when: False
-
-  - name: "Check for logging project already exists"
-    command: >
-      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
-    register: logging_project_result
-    ignore_errors: True
-
-  - name: "Create logging project"
-    command: >
-      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
-    when: logging_project_result.stdout == ""
-
-  - name: "Changing projects"
-    command: >
-      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
-
-  - name: "Creating logging deployer secret"
-    command: >
-      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
-    register: secret_output
-    failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
-
-  - name: "Create templates for logging accounts and the deployer"
-    command: >
-      {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
-      -f {{ hosted_base }}/logging-deployer.yaml
-      --config={{ mktemp.stdout }}/admin.kubeconfig
-      -n logging
-    register: logging_import_template
-    failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
-    changed_when: "'created' in logging_import_template.stdout"
-
-  - name: "Process the logging accounts template"
-    shell: >
-      {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
-      process logging-deployer-account-template |  {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
-    register: process_deployer_accounts
-    failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
-
-  - name: "Set permissions for logging-deployer service account"
-    command: >
-      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
-      policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
-    register: permiss_output
-    failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
-
-  - name: "Set permissions for fluentd"
-    command: >
-      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
-      policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
-    register: fluentd_output
-    failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
-
-  - name: "Set additional permissions for fluentd"
-    command: >
-      {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
-      add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
-    register: fluentd2_output
-    failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
-
-  - name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
-    command: >
-      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
-      policy add-cluster-role-to-user rolebinding-reader \
-      system:serviceaccount:logging:aggregated-logging-elasticsearch
-    register: rolebinding_reader_output
-    failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
-
-  - name: "Create ConfigMap for deployer parameters"
-    command: >
-      {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
-    register: deployer_configmap_output
-    failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
-
-  - name: "Process the deployer template"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
-    register: process_deployer
-    failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
-
-  - name: "Wait for image pull and deployer pod"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
-    register: result
-    until: result.rc == 0
-    retries: 20
-    delay: 15
-
-  - name: "Process imagestream template"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
-    when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-    register: process_is
-    failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
-
-  - name: "Set insecured registry"
-    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all  openshift.io/image.insecureRepository=true --overwrite"
-    when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-
-  - name: "Wait for imagestreams to become available"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
-    when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
-    register: result
-    until: result.rc == 0
-    failed_when: result.rc == 1 and 'not found' not in result.stderr
-    retries: 20
-    delay: 5
-
-  - name: "Wait for component pods to be running"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
-    with_items:
-      - es
-      - kibana
-      - curator
-    register: result
-    until: result.rc == 0
-    failed_when: result.rc == 1 or 'Error' in result.stderr
-    retries: 20
-    delay: 15
-
-  - name: "Wait for ops component pods to be running"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
-    with_items:
-      - es-ops
-      - kibana-ops
-      - curator-ops
-    when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
-    register: result
-    until: result.rc == 0
-    failed_when: result.rc == 1 or 'Error' in result.stderr
-    retries: 20
-    delay: 15
-
-  - name: "Wait for fluentd DaemonSet to exist"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
-    register: result
-    until: result.rc == 0
-    failed_when: result.rc == 1 or 'Error' in result.stderr
-    retries: 20
-    delay: 5
-
-  - name: "Deploy fluentd by labeling the node"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
-
-  - name: "Wait for fluentd to be running"
-    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
-    register: result
-    until: result.rc == 0
-    failed_when: result.rc == 1 or 'Error' in result.stderr
-    retries: 20
-    delay: 15
-
-  - debug:
-      msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
-
-  - name: Delete temp directory
-    file:
-      name: "{{ mktemp.stdout }}"
-      state: absent
-    changed_when: False
+- debug: msg="WARNING target_registry is deprecated, use openshift_hosted_logging_image_prefix instead"
+  when: target_registry is defined and target_registry
+
+- fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
+  when: "openshift_hosted_logging_hostname is not defined or
+        openshift_hosted_logging_elasticsearch_cluster_size is not defined or
+        openshift_hosted_logging_master_public_url is not defined"
+
+- name: Create temp directory for kubeconfig
+  command: mktemp -d /tmp/openshift-ansible-XXXXXX
+  register: mktemp
+  changed_when: False
+
+- name: Copy the admin client config(s)
+  command: >
+    cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+  changed_when: False
+
+- name: "Check for logging project already exists"
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging -o jsonpath='{.metadata.name}'
+  register: logging_project_result
+  ignore_errors: True
+
+- name: "Create logging project"
+  command: >
+    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+  when: logging_project_result.stdout == ""
+
+- name: "Changing projects"
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging
+
+- name: "Creating logging deployer secret"
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}
+  register: secret_output
+  failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+
+- name: "Create templates for logging accounts and the deployer"
+  command: >
+    {{ openshift.common.client_binary }} create --config={{ mktemp.stdout }}/admin.kubeconfig
+    -f {{ hosted_base }}/logging-deployer.yaml
+    --config={{ mktemp.stdout }}/admin.kubeconfig
+    -n logging
+  register: logging_import_template
+  failed_when: "'already exists' not in logging_import_template.stderr and logging_import_template.rc != 0"
+  changed_when: "'created' in logging_import_template.stdout"
+
+- name: "Process the logging accounts template"
+  shell: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    process logging-deployer-account-template |  {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -
+  register: process_deployer_accounts
+  failed_when: process_deployer_accounts.rc == 1 and 'already exists' not in process_deployer_accounts.stderr
+
+- name: "Set permissions for logging-deployer service account"
+  command: >
+    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+    policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
+  register: permiss_output
+  failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+
+- name: "Set permissions for fluentd"
+  command: >
+    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+    policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+  register: fluentd_output
+  failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+
+- name: "Set additional permissions for fluentd"
+  command: >
+    {{ openshift.common.client_binary }} adm policy --config={{ mktemp.stdout }}/admin.kubeconfig
+    add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+  register: fluentd2_output
+  failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+
+- name: "Add rolebinding-reader to aggregated-logging-elasticsearch"
+  command: >
+    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
+    policy add-cluster-role-to-user rolebinding-reader \
+    system:serviceaccount:logging:aggregated-logging-elasticsearch
+  register: rolebinding_reader_output
+  failed_when: "rolebinding_reader_output == 1 and 'exists' not in rolebinding_reader_output.stderr"
+
+- name: "Create ConfigMap for deployer parameters"
+  command: >
+    {{ openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-deployer {{ deployer_cmap_params }}
+  register: deployer_configmap_output
+  failed_when: "deployer_configmap_output.rc == 1 and 'exists' not in deployer_configmap_output.stderr"
+
+- name: "Process the deployer template"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-deployer-template {{ oc_new_app_values }}"
+  register: process_deployer
+  failed_when: process_deployer.rc == 1 and 'already exists' not in process_deployer.stderr
+
+- name: "Wait for image pull and deployer pod"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
+  register: result
+  until: result.rc == 0
+  retries: 20
+  delay: 15
+
+- name: "Process imagestream template"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-app logging-imagestream-template {{ oc_new_app_values }}"
+  when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+  register: process_is
+  failed_when: process_is.rc == 1 and 'already exists' not in process_is.stderr
+
+- name: "Set insecured registry"
+  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all  openshift.io/image.insecureRepository=true --overwrite"
+  when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+
+- name: "Wait for imagestreams to become available"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
+  when: tr_or_ohlip is defined and insecure_registry is defined and insecure_registry
+  register: result
+  until: result.rc == 0
+  failed_when: result.rc == 1 and 'not found' not in result.stderr
+  retries: 20
+  delay: 5
+
+- name: "Wait for component pods to be running"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+  with_items:
+    - es
+    - kibana
+    - curator
+  register: result
+  until: result.rc == 0
+  failed_when: result.rc == 1 or 'Error' in result.stderr
+  retries: 20
+  delay: 15
+
+- name: "Wait for ops component pods to be running"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component={{ item }} | grep Running"
+  with_items:
+    - es-ops
+    - kibana-ops
+    - curator-ops
+  when: openshift_hosted_logging_enable_ops_cluster is defined and openshift_hosted_logging_enable_ops_cluster
+  register: result
+  until: result.rc == 0
+  failed_when: result.rc == 1 or 'Error' in result.stderr
+  retries: 20
+  delay: 15
+
+- name: "Wait for fluentd DaemonSet to exist"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get daemonset logging-fluentd"
+  register: result
+  until: result.rc == 0
+  failed_when: result.rc == 1 or 'Error' in result.stderr
+  retries: 20
+  delay: 5
+
+- name: "Deploy fluentd by labeling the node"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node --overwrite=true {{ '-l' ~ openshift_hosted_logging_fluentd_nodeselector if openshift_hosted_logging_fluentd_nodeselector is defined else '--all' }} {{ openshift_hosted_logging_fluentd_nodeselector_label if openshift_hosted_logging_fluentd_nodeselector_label is defined else 'logging-infra-fluentd=true' }}"
+
+- name: "Wait for fluentd to be running"
+  shell: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods -l component=fluentd | grep Running"
+  register: result
+  until: result.rc == 0
+  failed_when: result.rc == 1 or 'Error' in result.stderr
+  retries: 20
+  delay: 15
+
+- debug:
+    msg: "Logging components deployed. Note persistent volume for elasticsearch must be setup manually"
+
+- name: Delete temp directory
+  file:
+    name: "{{ mktemp.stdout }}"
+    state: absent
+  changed_when: False

+ 1 - 0
roles/openshift_hosted_logging/vars/main.yaml

@@ -1,3 +1,4 @@
+---
 tr_or_ohlip: "{{ openshift_hosted_logging_deployer_prefix | default(target_registry) | default(None) }}"
 ip_kv: "{{ '-p IMAGE_PREFIX=' ~ tr_or_ohlip | quote if tr_or_ohlip != '' else '' }}"
 iv_kv: "{{ '-p IMAGE_VERSION=' ~ openshift_hosted_logging_deployer_version | quote if openshift_hosted_logging_deployer_version | default(none) is not none else '' }}"

+ 27 - 26
roles/openshift_manageiq/vars/main.yml

@@ -1,13 +1,14 @@
+---
 manageiq_cluster_role:
-    apiVersion: v1
-    kind: ClusterRole
-    metadata:
-      name: management-infra-admin
-    rules:
-    - resources:
-      - pods/proxy
-      verbs:
-      - '*'
+  apiVersion: v1
+  kind: ClusterRole
+  metadata:
+    name: management-infra-admin
+  rules:
+  - resources:
+    - pods/proxy
+    verbs:
+    - '*'
 
 manageiq_metrics_admin_clusterrole:
   apiVersion: v1
@@ -24,28 +25,28 @@ manageiq_metrics_admin_clusterrole:
     - '*'
 
 manageiq_service_account:
-    apiVersion: v1
-    kind: ServiceAccount
-    metadata:
-      name: management-admin
+  apiVersion: v1
+  kind: ServiceAccount
+  metadata:
+    name: management-admin
 
 manageiq_image_inspector_service_account:
-    apiVersion: v1
-    kind: ServiceAccount
-    metadata:
-      name: inspector-admin
+  apiVersion: v1
+  kind: ServiceAccount
+  metadata:
+    name: inspector-admin
 
 manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
 
 manage_iq_tasks:
-    - policy add-role-to-user -n management-infra admin -z management-admin
-    - policy add-role-to-user -n management-infra management-infra-admin -z management-admin
-    - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
-    - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
-    - policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
-    - policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
-    - policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
-    - policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
+- policy add-role-to-user -n management-infra admin -z management-admin
+- policy add-role-to-user -n management-infra management-infra-admin -z management-admin
+- policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-puller system:serviceaccount:management-infra:inspector-admin
+- policy add-scc-to-user privileged system:serviceaccount:management-infra:inspector-admin
+- policy add-cluster-role-to-user self-provisioner system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user hawkular-metrics-admin system:serviceaccount:management-infra:management-admin
 
 manage_iq_openshift_3_2_tasks:
-    - policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin
+- policy add-cluster-role-to-user system:image-auditor system:serviceaccount:management-infra:management-admin

+ 1 - 0
roles/openshift_master/tasks/systemd_units.yml

@@ -1,3 +1,4 @@
+---
 # This file is included both in the openshift_master role and in the upgrade
 # playbooks.  For that reason the ha_svc variables are use set_fact instead of
 # the vars directory on the role.

+ 2 - 2
roles/openshift_master_facts/tasks/main.yml

@@ -92,8 +92,8 @@
       controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
       master_image: "{{ osm_image | default(None) }}"
       admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
-      kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
-      oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
+      kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}"  # deprecated, merged with admission_plugin_config
+      oauth_template: "{{ openshift_master_oauth_template | default(None) }}"  # deprecated in origin 1.2 / OSE 3.2
       oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
       oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
       image_policy_config: "{{ openshift_master_image_policy_config | default(None) }}"

+ 0 - 1
roles/openshift_master_facts/vars/main.yml

@@ -23,4 +23,3 @@ builddefaults_yaml:
         value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
       - name: no_proxy
         value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
-

+ 3 - 3
roles/openshift_metrics/tasks/main.yaml

@@ -38,9 +38,9 @@
     get pods -l {{ item }} | grep -q Running
   register: metrics_pods_status
   with_items:
-    - metrics-infra=hawkular-metrics
-    - metrics-infra=heapster
-    - metrics-infra=hawkular-cassandra
+  - metrics-infra=hawkular-metrics
+  - metrics-infra=heapster
+  - metrics-infra=hawkular-cassandra
   failed_when: false
   changed_when: false
 

+ 4 - 3
roles/openshift_metrics/vars/main.yaml

@@ -1,6 +1,7 @@
+---
 hawkular_permission_oc_commands:
-    - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
-    - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+  - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+  - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
 
 metrics_deployer_sa:
   apiVersion: v1
@@ -8,7 +9,7 @@ metrics_deployer_sa:
   metadata:
     name: metrics-deployer
   secrets:
-  - name: metrics-deployer
+    - name: metrics-deployer
 
 
 hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig

+ 13 - 12
roles/openshift_node/tasks/systemd_units.yml

@@ -1,3 +1,4 @@
+---
 # This file is included both in the openshift_master role and in the upgrade
 # playbooks.
 
@@ -68,12 +69,12 @@
     line: "{{ item.line }}"
     create: true
   with_items:
-    - regex: '^OPTIONS='
-      line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
-    - regex: '^CONFIG_FILE='
-      line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
-    - regex: '^IMAGE_VERSION='
-      line: "IMAGE_VERSION={{ openshift_image_tag }}"
+  - regex: '^OPTIONS='
+    line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+  - regex: '^CONFIG_FILE='
+    line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
+  - regex: '^IMAGE_VERSION='
+    line: "IMAGE_VERSION={{ openshift_image_tag }}"
   notify:
   - restart node
 
@@ -84,12 +85,12 @@
     line: "{{ item.line }}"
     create: true
   with_items:
-    - regex: '^HTTP_PROXY='
-      line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
-    - regex: '^HTTPS_PROXY='
-      line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
-    - regex: '^NO_PROXY='
-      line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
+  - regex: '^HTTP_PROXY='
+    line: "HTTP_PROXY={{ openshift.common.http_proxy | default('') }}"
+  - regex: '^HTTPS_PROXY='
+    line: "HTTPS_PROXY={{ openshift.common.https_proxy | default('') }}"
+  - regex: '^NO_PROXY='
+    line: "NO_PROXY={{ openshift.common.no_proxy | default([]) | join(',') }},{{ openshift.common.portal_net }},{{ hostvars[groups.oo_first_master.0].openshift.master.sdn_cluster_network_cidr }}"
   when: ('http_proxy' in openshift.common and openshift.common.http_proxy != '')
   notify:
   - restart node

+ 1 - 1
roles/openshift_node_dnsmasq/tasks/no-network-manager.yml

@@ -1,2 +1,2 @@
 ---
-- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."
+- fail: msg="Currently, NetworkManager must be installed and enabled prior to installation."

+ 1 - 1
roles/openshift_repos/vars/main.yml

@@ -4,4 +4,4 @@
 # enterprise is used for OSE 3.0 < 3.1 which uses packages named 'openshift'
 # atomic-enterprise uses Red Hat packages named 'atomic-openshift'
 # openshift-enterprise uses Red Hat packages named 'atomic-openshift' starting with OSE 3.1
-known_openshift_deployment_types: ['origin', 'online', 'enterprise','atomic-enterprise','openshift-enterprise']
+known_openshift_deployment_types: ['origin', 'online', 'enterprise', 'atomic-enterprise', 'openshift-enterprise']

+ 2 - 1
roles/openshift_serviceaccounts/tasks/legacy_add_scc_to_user.yml

@@ -1,3 +1,4 @@
+---
 ####
 #
 # OSE 3.0.z did not have 'oadm policy add-scc-to-user'.
@@ -9,7 +10,7 @@
     path: /tmp/openshift
     state: directory
     owner: root
-    mode: 700
+    mode: 0700
 
 - name: Create service account configs
   template:

+ 1 - 0
roles/openshift_serviceaccounts/tasks/main.yml

@@ -1,3 +1,4 @@
+---
 - name: test if service accounts exists
   command: >
       {{ openshift.common.client_binary }} get sa {{ item }} -n {{ openshift_serviceaccounts_namespace }}

+ 1 - 1
roles/openshift_storage_nfs_lvm/meta/main.yml

@@ -13,5 +13,5 @@ galaxy_info:
     versions:
     - all
   categories:
-    - openshift
+  - openshift
 dependencies: []

+ 2 - 1
roles/rhel_subscribe/meta/main.yml

@@ -1,2 +1,3 @@
+---
 dependencies:
-- role: openshift_facts
+  - role: openshift_facts

+ 13 - 3
utils/Makefile

@@ -32,6 +32,10 @@ ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
 MANPAGES := docs/man/man1/atomic-openshift-installer.1
 VERSION := 1.3
 
+# YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions
+YAMLFILES = $(shell find ../ -name $(VENV) -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" 2>&1)
+PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name "*.py" -print)
+
 sdist: clean
 	python setup.py sdist
 	rm -fR $(SHORTNAME).egg-info
@@ -86,7 +90,13 @@ ci-pylint: $(VENV)
 	@echo "#############################################"
 	@echo "# Running PyLint Tests in virtualenv"
 	@echo "#############################################"
-	. $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name "*.py" -print)
+	. $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(PYFILES)
+
+ci-yamllint: $(VENV)
+	@echo "#############################################"
+	@echo "# Running yamllint Tests in virtualenv"
+	@echo "#############################################"
+	@. $(VENV)/bin/activate && yamllint -c ../git/.yamllint $(YAMLFILES)
 
 ci-list-deps: $(VENV)
 	@echo "#############################################"
@@ -101,9 +111,9 @@ ci-flake8: $(VENV)
 	. $(VENV)/bin/activate && flake8 --config=setup.cfg ../ --exclude="utils,../inventory"
 	. $(VENV)/bin/activate && python setup.py flake8
 
-ci: ci-list-deps ci-unittests ci-flake8 ci-pylint
+ci: ci-list-deps ci-unittests ci-flake8 ci-pylint ci-yamllint
 	@echo
 	@echo "##################################################################################"
 	@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
 	@echo "To clean your test environment run 'make clean'"
-	@echo "Other targets you may run with 'make': 'ci-pylint', 'ci-unittests', 'ci-flake8'"
+	@echo "Other targets you may run with 'make': 'ci-pylint', 'ci-unittests', 'ci-flake8', 'ci-yamllint'"

+ 1 - 0
utils/test-requirements.txt

@@ -10,3 +10,4 @@ PyYAML
 click
 backports.functools_lru_cache
 pyOpenSSL
+yamllint