Przeglądaj źródła

Switch from "oadm" to "oc adm" and fix bug in binary sync.

Found bug syncing binaries to containerized hosts where if a symlink was
pre-existing, but pointing to the wrong destination, it would not be
corrected.

Switched to using oc adm instead of oadm.
Devan Goodwin 8 lat temu
rodzic
commit
3ea0166aa3

+ 3 - 3
playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -25,13 +25,13 @@
   tasks:
   - name: Prepare for Node evacuation
     command: >
-      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false
+      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
 
   - name: Evacuate Node for Kubelet upgrade
     command: >
-      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force
+      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
 
@@ -40,7 +40,7 @@
 
   - name: Set node schedulability
     command: >
-      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true
+      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: openshift.node.schedulable | bool
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool

+ 3 - 3
playbooks/common/openshift-cluster/redeploy-certificates.yml

@@ -224,7 +224,7 @@
 
   - name: Prepare for node evacuation
     command: >
-      {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+      {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
       manage-node {{ openshift.node.nodename }}
       --schedulable=false
     delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -232,7 +232,7 @@
 
   - name: Evacuate node
     command: >
-      {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+      {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
       manage-node {{ openshift.node.nodename }}
       --evacuate --force
     delegate_to: "{{ groups.oo_first_master.0 }}"
@@ -240,7 +240,7 @@
 
   - name: Set node schedulability
     command: >
-      {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
+      {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
       manage-node {{ openshift.node.nodename }} --schedulable=true
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml

@@ -22,7 +22,7 @@
 
   - name: Create service signer certificate
     command: >
-      {{ openshift.common.admin_binary }} ca create-signer-cert
+      {{ openshift.common.client_binary }} adm ca create-signer-cert
       --cert=service-signer.crt
       --key=service-signer.key
       --name=openshift-service-serving-signer

+ 3 - 7
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -200,19 +200,15 @@
     # restart.
     skip_docker_role: True
   tasks:
-  - name: Verifying the correct commandline tools are available
-    shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}}
-    when: openshift.common.is_containerized | bool and verify_upgrade_version is defined
-
   - name: Reconcile Cluster Roles
     command: >
-      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       policy reconcile-cluster-roles --additive-only=true --confirm
     run_once: true
 
   - name: Reconcile Cluster Role Bindings
     command: >
-      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
       policy reconcile-cluster-role-bindings
       --exclude-groups=system:authenticated
       --exclude-groups=system:authenticated:oauth
@@ -224,7 +220,7 @@
 
   - name: Reconcile Security Context Constraints
     command: >
-      {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true
+      {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true
     run_once: true
 
   - set_fact:

+ 3 - 3
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -29,7 +29,7 @@
 
   - name: Mark unschedulable if host is a node
     command: >
-      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false
+      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_upgrade
     # NOTE: There is a transient "object has been modified" error here, allow a couple
@@ -41,7 +41,7 @@
 
   - name: Evacuate Node for Kubelet upgrade
     command: >
-      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_upgrade
   tasks:
@@ -64,7 +64,7 @@
 
   - name: Set node schedulability
     command: >
-      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true
+      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool
     register: node_sched

+ 3 - 3
roles/nuage_master/tasks/serviceaccount.yml

@@ -16,7 +16,7 @@
   shell: >
     echo {{ nuage_service_account_config | to_json | quote }} |
     {{ openshift.common.client_binary }} create
-    -n default 
+    -n default
     --config={{nuage_tmp_conf}}
     -f -
   register: osnuage_create_service_account
@@ -25,7 +25,7 @@
 
 - name: Configure role/user permissions
   command: >
-    {{ openshift.common.admin_binary }} {{item}}
+    {{ openshift.common.client_binary }} adm {{item}}
     --config={{nuage_tmp_conf}}
   with_items: "{{nuage_tasks}}"
   register: osnuage_perm_task
@@ -34,7 +34,7 @@
 
 - name: Generate the node client config
   command: >
-    {{ openshift.common.admin_binary }} create-api-client-config
+    {{ openshift.common.client_binary }} adm create-api-client-config
       --certificate-authority={{ openshift_master_ca_cert }}
       --client-dir={{ cert_output_dir }}
       --master={{ openshift.master.api_url }}

+ 1 - 1
roles/openshift_ca/tasks/main.yml

@@ -80,7 +80,7 @@
 
 - name: Create the master certificates if they do not already exist
   command: >
-    {{ openshift.common.admin_binary }} create-master-certs
+    {{ openshift.common.client_binary }} adm create-master-certs
     {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
     --certificate-authority {{ named_ca_certificate }}
     {% endfor %}

+ 6 - 1
roles/openshift_cli/library/openshift_container_binary_sync.py

@@ -83,8 +83,13 @@ class BinarySyncer(object):
 
     def _sync_symlink(self, binary_name, link_to):
         """ Ensure the given binary name exists and links to the expected binary. """
+
+        # The symlink we are creating:
         link_path = os.path.join(self.bin_dir, binary_name)
-        link_dest = os.path.join(self.bin_dir, binary_name)
+
+        # The expected file we should be linking to:
+        link_dest = os.path.join(self.bin_dir, link_to)
+
         if not os.path.exists(link_path) or \
                 not os.path.islink(link_path) or \
                 os.path.realpath(link_path) != os.path.realpath(link_dest):

+ 1 - 1
roles/openshift_hosted/tasks/registry/registry.yml

@@ -30,7 +30,7 @@
 
 - name: Create OpenShift registry
   command: >
-    {{ openshift.common.admin_binary }} registry --create
+    {{ openshift.common.client_binary }} adm registry --create
     --config={{ openshift_hosted_kubeconfig }}
     {% if replicas > 1 -%}
     --replicas={{ replicas }}

+ 1 - 1
roles/openshift_hosted/tasks/registry/secure.yml

@@ -33,7 +33,7 @@
 
 - name: Create registry certificates if they do not exist
   command: >
-    {{ openshift.common.admin_binary }} ca create-server-cert
+    {{ openshift.common.client_binary }} adm ca create-server-cert
     --signer-cert=/etc/origin/master/ca.crt
     --signer-key=/etc/origin/master/ca.key
     --signer-serial=/etc/origin/master/ca.serial.txt

+ 2 - 2
roles/openshift_hosted/tasks/router/router.yml

@@ -48,7 +48,7 @@
 
 - name: Create OpenShift router
   command: >
-    {{ openshift.common.admin_binary }} router --create
+    {{ openshift.common.client_binary }} adm router --create
     --config={{ openshift_hosted_kubeconfig }}
     {% if replicas > 1 -%}
     --replicas={{ replicas }}
@@ -73,7 +73,7 @@
     {% if openshift.hosted.router.name | default(none) is not none -%}
     {{ openshift.hosted.router.name }}
     {% endif -%}
-    
+
   register: openshift_hosted_router_results
   changed_when: "'service exists' not in openshift_hosted_router_results.stdout"
   failed_when: "openshift_hosted_router_results.rc != 0 and 'service exists' not in openshift_hosted_router_results.stdout and 'deployment_config' not in openshift_hosted_router_results.stderr and 'service' not in openshift_hosted_router_results.stderr"

+ 4 - 4
roles/openshift_hosted_logging/tasks/deploy_logging.yaml

@@ -25,7 +25,7 @@
 
   - name: "Create logging project"
     command: >
-      {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
     when: logging_project_result.stdout == ""
 
   - name: "Changing projects"
@@ -51,19 +51,19 @@
 
   - name: "Set permissions for logging-deployer service account"
     command: >
-      {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
+      {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy add-cluster-role-to-user oauth-editor system:serviceaccount:logging:logging-deployer
     register: permiss_output
     failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
 
   - name: "Set permissions for fluentd"
     command: >
-      {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+      {{ openshift.common.client_binary }} adm policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
     register: fluentd_output
     failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
 
   - name: "Set additional permissions for fluentd"
     command: >
-      {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+      {{ openshift.common.client_binary }} adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
     register: fluentd2_output
     failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
 

+ 1 - 1
roles/openshift_manage_node/tasks/main.yml

@@ -26,7 +26,7 @@
 
 - name: Set node schedulability
   command: >
-    {{ openshift.common.admin_binary }} manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
+    {{ openshift.common.client_binary }} adm manage-node {{ hostvars[item].openshift.node.nodename }} --schedulable={{ 'true' if hostvars[item].openshift.node.schedulable | bool else 'false' }}
     --config={{ openshift_manage_node_kubeconfig }}
     -n default
   with_items: "{{ openshift_nodes }}"

+ 3 - 3
roles/openshift_manageiq/tasks/main.yaml

@@ -10,7 +10,7 @@
 
 - name: Add Managment Infrastructure project
   command: >
-    {{ openshift.common.admin_binary }} new-project
+    {{ openshift.common.client_binary }} adm new-project
     management-infra
     --description="Management Infrastructure"
     --config={{manage_iq_tmp_conf}}
@@ -52,7 +52,7 @@
 
 - name: Configure role/user permissions
   command: >
-    {{ openshift.common.admin_binary }} {{item}}
+    {{ openshift.common.client_binary }} adm {{item}}
     --config={{manage_iq_tmp_conf}}
   with_items: "{{manage_iq_tasks}}"
   register: osmiq_perm_task
@@ -61,7 +61,7 @@
 
 - name: Configure 3_2 role/user permissions
   command: >
-    {{ openshift.common.admin_binary }} {{item}}
+    {{ openshift.common.client_binary }} adm {{item}}
     --config={{manage_iq_tmp_conf}}
   with_items: "{{manage_iq_openshift_3_2_tasks}}"
   register: osmiq_perm_3_2_task

+ 1 - 1
roles/openshift_master/tasks/main.yml

@@ -57,7 +57,7 @@
 
 - name: Create the policy file if it does not already exist
   command: >
-    {{ openshift.common.admin_binary }} create-bootstrap-policy-file
+    {{ openshift.common.client_binary }} adm create-bootstrap-policy-file
       --filename={{ openshift_master_policy }}
   args:
     creates: "{{ openshift_master_policy }}"

+ 1 - 1
roles/openshift_master_certificates/tasks/main.yml

@@ -52,7 +52,7 @@
 
 - name: Create the master certificates if they do not already exist
   command: >
-    {{ openshift.common.admin_binary }} create-master-certs
+    {{ openshift.common.client_binary }} adm create-master-certs
     {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
     --certificate-authority {{ named_ca_certificate }}
     {% endfor %}

+ 2 - 2
roles/openshift_metrics/tasks/install.yml

@@ -30,7 +30,7 @@
 
 - name: Add edit permission to the openshift-infra project to metrics-deployer SA
   command: >
-    {{ openshift.common.admin_binary }}
+    {{ openshift.common.client_binary }} adm
     --config={{ openshift_metrics_kubeconfig }}
     --namespace openshift-infra
     policy add-role-to-user edit
@@ -48,7 +48,7 @@
 
 - name: Add cluster-reader permission to the openshift-infra project to heapster SA
   command: >
-    {{ openshift.common.admin_binary }}
+    {{ openshift.common.client_binary }} adm
     --config={{ openshift_metrics_kubeconfig }}
     --namespace openshift-infra
     policy add-cluster-role-to-user cluster-reader

+ 2 - 2
roles/openshift_node_certificates/tasks/main.yml

@@ -44,7 +44,7 @@
 
 - name: Generate the node client config
   command: >
-    {{ openshift.common.admin_binary }} create-api-client-config
+    {{ openshift.common.client_binary }} adm create-api-client-config
       {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
       --certificate-authority {{ named_ca_certificate }}
       {% endfor %}
@@ -63,7 +63,7 @@
 
 - name: Generate the node server certificate
   command: >
-    {{ openshift.common.admin_binary }} ca create-server-cert
+    {{ openshift.common.client_binary }} adm ca create-server-cert
       --cert={{ openshift_node_generated_config_dir }}/server.crt
       --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
       --overwrite=true

+ 1 - 1
roles/openshift_projects/tasks/main.yml

@@ -20,7 +20,7 @@
 
 - name: Create projects
   command: >
-    {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig
     new-project {{ item.item.key }}
     {% if item.item.value.default_node_selector | default(none) != none %}
     {{ '--node-selector=' ~ item.item.value.default_node_selector }}

+ 1 - 1
roles/openshift_serviceaccounts/tasks/main.yml

@@ -26,7 +26,7 @@
 
 - name: Grant the user access to the appropriate scc
   command: >
-      {{ openshift.common.admin_binary }} policy add-scc-to-user
+      {{ openshift.common.client_binary }} adm policy add-scc-to-user
       {{ item.1.item }} system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}
   when: "openshift.common.version_gte_3_1_or_1_1 and item.1.rc == 0 and 'system:serviceaccount:{{ openshift_serviceaccounts_namespace }}:{{ item.0 }}' not in {{ (item.1.stdout | from_yaml).users | default([]) }}"
   with_nested: