소스 검색

Remove become statements

This commit removes become:no statements that break
the installer in various ways.
Michael Gugino 7 년 전
부모
커밋
3b07acdcd4
26개의 변경된 파일3개의 추가작업 그리고 50개의 파일을 삭제
  1. 0 2
      playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml
  2. 0 2
      playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
  3. 0 1
      playbooks/init/evaluate_groups.yml
  4. 0 2
      playbooks/openshift-etcd/private/embedded2external.yml
  5. 0 2
      playbooks/openshift-etcd/private/migrate.yml
  6. 0 2
      playbooks/openshift-etcd/private/redeploy-ca.yml
  7. 0 1
      playbooks/openshift-etcd/private/upgrade_backup.yml
  8. 0 2
      playbooks/openshift-master/private/redeploy-openshift-ca.yml
  9. 0 1
      playbooks/openshift-master/private/tasks/restart_hosts.yml
  10. 0 2
      playbooks/openshift-master/private/validate_restart.yml
  11. 0 1
      playbooks/openshift-master/scaleup.yml
  12. 0 1
      playbooks/openshift-node/private/setup.yml
  13. 0 1
      playbooks/openshift-node/scaleup.yml
  14. 0 1
      roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
  15. 0 2
      roles/openshift_certificate_expiry/tasks/main.yml
  16. 0 2
      roles/openshift_expand_partition/README.md
  17. 0 6
      roles/openshift_logging/tasks/generate_jks.yaml
  18. 0 2
      roles/openshift_logging/tasks/main.yaml
  19. 0 1
      roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
  20. 0 2
      roles/openshift_master_certificates/tasks/main.yml
  21. 0 1
      roles/openshift_named_certificates/tasks/main.yml
  22. 3 5
      roles/openshift_storage_nfs_lvm/README.md
  23. 0 2
      roles/openshift_web_console/tasks/install.yml
  24. 0 2
      roles/openshift_web_console/tasks/update_asset_config.yml
  25. 0 2
      roles/template_service_broker/tasks/install.yml
  26. 0 2
      roles/template_service_broker/tasks/remove.yml

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml

@@ -2,7 +2,6 @@
 - name: Create local temp directory for syncing certs
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Create local temp directory for syncing certs
@@ -65,7 +64,6 @@
 - name: Delete local temp directory
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Delete local temp directory

+ 0 - 2
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -108,7 +108,6 @@
 - name: Gate on master update
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       master_update_completed: "{{ hostvars
@@ -242,7 +241,6 @@
 - name: Gate on reconcile
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       reconcile_completed: "{{ hostvars

+ 0 - 1
playbooks/init/evaluate_groups.yml

@@ -2,7 +2,6 @@
 - name: Populate config host groups
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Load group name mapping variables

+ 0 - 2
playbooks/openshift-etcd/private/embedded2external.yml

@@ -89,7 +89,6 @@
     local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
     register: g_etcd_client_mktemp
     changed_when: False
-    become: no
 
   - import_role:
       name: etcd
@@ -116,7 +115,6 @@
   - name: Delete temporary directory
     local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
     changed_when: False
-    become: no
 
 # 7. force new cluster from the backup
 - name: Force new etcd cluster

+ 0 - 2
playbooks/openshift-etcd/private/migrate.yml

@@ -2,7 +2,6 @@
 - name: Check if the master has embedded etcd
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tags:
   - always
@@ -53,7 +52,6 @@
 - name: Gate on etcd backup
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       etcd_backup_completed: "{{ hostvars

+ 0 - 2
playbooks/openshift-etcd/private/redeploy-ca.yml

@@ -26,7 +26,6 @@
 - name: Create temp directory for syncing certs
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Create local temp directory for syncing certs
@@ -74,7 +73,6 @@
 - name: Delete temporary directory on localhost
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - file:

+ 0 - 1
playbooks/openshift-etcd/private/upgrade_backup.yml

@@ -14,7 +14,6 @@
 - name: Gate on etcd backup
   hosts: localhost
   connection: local
-  become: no
   tasks:
   - set_fact:
       etcd_backup_completed: "{{ hostvars

+ 0 - 2
playbooks/openshift-master/private/redeploy-openshift-ca.yml

@@ -125,7 +125,6 @@
 - name: Create temp directory for syncing certs
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - name: Create local temp directory for syncing certs
@@ -264,7 +263,6 @@
 - name: Delete temporary directory on localhost
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - file:

+ 0 - 1
playbooks/openshift-master/private/tasks/restart_hosts.yml

@@ -27,7 +27,6 @@
       delay=10
       timeout=600
       port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}"
-  become: no
 
 # Now that ssh is back up we can wait for API on the remote system,
 # avoiding some potential connection issues from local system:

+ 0 - 2
playbooks/openshift-master/private/validate_restart.yml

@@ -21,7 +21,6 @@
 - name: Create temp file on localhost
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - local_action: command mktemp
@@ -38,7 +37,6 @@
 - name: Cleanup temp file on localhost
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent

+ 0 - 1
playbooks/openshift-master/scaleup.yml

@@ -4,7 +4,6 @@
 - name: Ensure there are new_masters or new_nodes
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - fail:

+ 0 - 1
playbooks/openshift-node/private/setup.yml

@@ -8,7 +8,6 @@
 
 - name: Evaluate node groups
   hosts: localhost
-  become: no
   connection: local
   tasks:
   - name: Evaluate oo_containerized_master_nodes

+ 0 - 1
playbooks/openshift-node/scaleup.yml

@@ -4,7 +4,6 @@
 - name: Ensure there are new_nodes
   hosts: localhost
   connection: local
-  become: no
   gather_facts: no
   tasks:
   - fail:

+ 0 - 1
roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py

@@ -31,7 +31,6 @@ certificates
 Example playbook usage:
 
   - name: Generate expiration results JSON
-    become: no
     run_once: yes
     delegate_to: localhost
     when: openshift_certificate_expiry_save_json_results|bool

+ 0 - 2
roles/openshift_certificate_expiry/tasks/main.yml

@@ -7,7 +7,6 @@
   register: check_results
 
 - name: Generate expiration report HTML
-  become: no
   run_once: yes
   template:
     src: cert-expiry-table.html.j2
@@ -21,7 +20,6 @@
   when: openshift_certificate_expiry_save_json_results|bool
 
 - name: Generate results JSON file
-  become: no
   run_once: yes
   template:
     src: save_json_results.j2

+ 0 - 2
roles/openshift_expand_partition/README.md

@@ -45,7 +45,6 @@ space on /dev/xvda, and the file system will be expanded to fill the new
 partition space.
 
     - hosts: mynodes
-      become: no
       remote_user: root
       gather_facts: no
       roles:
@@ -68,7 +67,6 @@ partition space.
 * Create an ansible playbook, say `expandvar.yaml`:
     ```
     - hosts: mynodes
-      become: no
       remote_user: root
       gather_facts: no
       roles:

+ 0 - 6
roles/openshift_logging/tasks/generate_jks.yaml

@@ -24,25 +24,21 @@
   local_action: file path="{{local_tmp.stdout}}/elasticsearch.jks" state=touch mode="u=rw,g=r,o=r"
   when: elasticsearch_jks.stat.exists
   changed_when: False
-  become: no
 
 - name: Create placeholder for previously created JKS certs to prevent recreating...
   local_action: file path="{{local_tmp.stdout}}/logging-es.jks" state=touch mode="u=rw,g=r,o=r"
   when: logging_es_jks.stat.exists
   changed_when: False
-  become: no
 
 - name: Create placeholder for previously created JKS certs to prevent recreating...
   local_action: file path="{{local_tmp.stdout}}/system.admin.jks" state=touch mode="u=rw,g=r,o=r"
   when: system_admin_jks.stat.exists
   changed_when: False
-  become: no
 
 - name: Create placeholder for previously created JKS certs to prevent recreating...
   local_action: file path="{{local_tmp.stdout}}/truststore.jks" state=touch mode="u=rw,g=r,o=r"
   when: truststore_jks.stat.exists
   changed_when: False
-  become: no
 
 - name: pulling down signing items from host
   fetch:
@@ -61,12 +57,10 @@
   vars:
     - top_dir: "{{local_tmp.stdout}}"
   when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
-  become: no
 
 - name: Run JKS generation script
   local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
   check_mode: no
-  become: no
   when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
 
 - name: Pushing locally generated JKS certs to remote host...

+ 0 - 2
roles/openshift_logging/tasks/main.yaml

@@ -17,7 +17,6 @@
   register: local_tmp
   changed_when: False
   check_mode: no
-  become: no
 
 - include_tasks: install_logging.yaml
   when:
@@ -31,4 +30,3 @@
   local_action: file path="{{local_tmp.stdout}}" state=absent
   tags: logging_cleanup
   changed_when: False
-  become: no

+ 0 - 1
roles/openshift_logging_fluentd/tasks/label_and_wait.yaml

@@ -8,4 +8,3 @@
 
 # wait half a second between labels
 - local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
-  become: no

+ 0 - 2
roles/openshift_master_certificates/tasks/main.yml

@@ -120,7 +120,6 @@
   register: g_master_certs_mktemp
   changed_when: False
   when: master_certs_missing | bool
-  become: no
 
 - name: Create a tarball of the master certs
   command: >
@@ -157,7 +156,6 @@
   local_action: file path="{{ g_master_certs_mktemp.stdout }}" state=absent
   changed_when: False
   when: master_certs_missing | bool
-  become: no
 
 - name: Lookup default group for ansible_ssh_user
   command: "/usr/bin/id -g {{ ansible_ssh_user | quote }}"

+ 0 - 1
roles/openshift_named_certificates/tasks/main.yml

@@ -3,7 +3,6 @@
     parsed_named_certificates: "{{ named_certificates | lib_utils_oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}"
   when: named_certificates | length > 0
   delegate_to: localhost
-  become: no
   run_once: true
 
 - openshift_facts:

+ 3 - 5
roles/openshift_storage_nfs_lvm/README.md

@@ -1,7 +1,7 @@
 # openshift_storage_nfs_lvm
 
 This role is useful to create and export nfs disks for openshift persistent volumes.
-It does so by creating lvm partitions on an already setup pv/vg, creating xfs 
+It does so by creating lvm partitions on an already setup pv/vg, creating xfs
 filesystem on each partition, mounting the partitions, exporting the mounts via NFS
 and creating a json file for each mount that an openshift master can use to
 create persistent volumes.
@@ -20,7 +20,7 @@ create persistent volumes.
 osnl_nfs_export_options: "*(rw,sync,all_squash)"
 
 # Directory, where the created partitions should be mounted. They will be
-# mounted as <osnl_mount_dir>/<lvm volume name> 
+# mounted as <osnl_mount_dir>/<lvm volume name>
 osnl_mount_dir: /exports/openshift
 
 # Volume Group to use.
@@ -64,11 +64,10 @@ None
 ## Example Playbook
 
 With this playbook, 2 5Gig lvm partitions are created, named stg5g0003 and stg5g0004
-Both of them are mounted into `/exports/openshift` directory.  Both directories are 
+Both of them are mounted into `/exports/openshift` directory.  Both directories are
 exported via NFS.  json files are created in /root.
 
     - hosts: nfsservers
-      become: no
       remote_user: root
       gather_facts: no
       roles:
@@ -94,7 +93,6 @@ exported via NFS.  json files are created in /root.
 * Create an ansible playbook, say `setupnfs.yaml`:
     ```
     - hosts: nfsservers
-      become: no
       remote_user: root
       gather_facts: no
       roles:

+ 0 - 2
roles/openshift_web_console/tasks/install.yml

@@ -23,7 +23,6 @@
   command: mktemp -d /tmp/console-ansible-XXXXXX
   register: mktemp
   changed_when: False
-  become: no
 
 - name: Copy asset config template to temp directory
   copy:
@@ -76,4 +75,3 @@
     state: absent
     name: "{{ mktemp.stdout }}"
   changed_when: False
-  become: no

+ 0 - 2
roles/openshift_web_console/tasks/update_asset_config.yml

@@ -30,7 +30,6 @@
   command: mktemp -d /tmp/console-ansible-XXXXXX
   register: mktemp
   changed_when: False
-  become: no
 
 - name: Copy asset config to temp file
   copy:
@@ -55,7 +54,6 @@
     state: absent
     name: "{{ mktemp.stdout }}"
   changed_when: False
-  become: no
 
 # There's currently no command to trigger a rollout for a k8s deployment
 # without changing the pod spec. Add an annotation to force a rollout after

+ 0 - 2
roles/template_service_broker/tasks/install.yml

@@ -21,7 +21,6 @@
 - command: mktemp -d /tmp/tsb-ansible-XXXXXX
   register: mktemp
   changed_when: False
-  become: no
 
 - copy:
     src: "{{ __tsb_files_location }}/{{ item }}"
@@ -86,4 +85,3 @@
     state: absent
     name: "{{ mktemp.stdout }}"
   changed_when: False
-  become: no

+ 0 - 2
roles/template_service_broker/tasks/remove.yml

@@ -2,7 +2,6 @@
 - command: mktemp -d /tmp/tsb-ansible-XXXXXX
   register: mktemp
   changed_when: False
-  become: no
 
 - copy:
     src: "{{ __tsb_files_location }}/{{ item }}"
@@ -32,4 +31,3 @@
     state: absent
     name: "{{ mktemp.stdout }}"
   changed_when: False
-  become: no