Browse Source

Merge pull request #6380 from sdodson/oc-adm

Remove all uses of openshift.common.admin_binary
Scott Dodson 7 years ago
parent
commit
37ffebc86c

+ 1 - 1
docs/proposals/role_decomposition.md

@@ -262,7 +262,7 @@ dependencies:
 
 - name: "Create logging project"
   command: >
-    {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
   when: not ansible_check_mode and "not found" in logging_project_result.stderr
 
 - name: Create logging cert directory

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml

@@ -51,7 +51,7 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ openshift.common.client_binary }} adm drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
     register: l_docker_upgrade_drain_result

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -305,7 +305,7 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_control_plane_drain_result
     until: not l_upgrade_control_plane_drain_result | failed

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml

@@ -26,7 +26,7 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_nodes_drain_result
     until: not l_upgrade_nodes_drain_result | failed

+ 1 - 1
playbooks/common/openshift-cluster/upgrades/upgrade_scale_group.yml

@@ -42,7 +42,7 @@
 
   - name: Drain Node for Kubelet upgrade
     command: >
-      {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
+      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets
     delegate_to: "{{ groups.oo_first_master.0 }}"
     register: l_upgrade_nodes_drain_result
     until: not l_upgrade_nodes_drain_result | failed

+ 1 - 1
roles/etcd/tasks/migration/add_ttls.yml

@@ -11,7 +11,7 @@
 
 - name: Re-introduce leases (as a replacement for key TTLs)
   command: >
-    oadm migrate etcd-ttl \
+    {{ openshift.common.client_binary }} adm migrate etcd-ttl \
     --cert {{ r_etcd_common_master_peer_cert_file }} \
     --key {{ r_etcd_common_master_peer_key_file }} \
     --cacert {{ r_etcd_common_master_peer_ca_file }} \

+ 5 - 2
roles/openshift_cli/library/openshift_container_binary_sync.py

@@ -27,7 +27,7 @@ class BinarySyncError(Exception):
 # pylint: disable=too-few-public-methods,too-many-instance-attributes
 class BinarySyncer(object):
     """
-    Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+    Syncs the openshift, oc, and kubectl binaries/symlinks out of
     a container onto the host system.
     """
 
@@ -108,7 +108,10 @@ class BinarySyncer(object):
 
         # Ensure correct symlinks created:
         self._sync_symlink('kubectl', 'openshift')
-        self._sync_symlink('oadm', 'openshift')
+
+        # Remove old oadm binary
+        if os.path.exists(os.path.join(self.bin_dir, 'oadm')):
+            os.remove(os.path.join(self.bin_dir, 'oadm'))
 
     def _sync_symlink(self, binary_name, link_to):
         """ Ensure the given binary name exists and links to the expected binary. """

+ 1 - 1
roles/openshift_docker_gc/templates/dockergc-ds.yaml.j2

@@ -5,7 +5,7 @@ items:
   kind: ServiceAccount
   metadata:
     name: dockergc
-  # You must grant privileged via: oadm policy add-scc-to-user -z dockergc privileged
+  # You must grant privileged via: oc adm policy add-scc-to-user -z dockergc privileged
   # in order for the dockergc to access the docker socket and root directory
 - apiVersion: extensions/v1beta1
   kind: DaemonSet

+ 1 - 2
roles/openshift_facts/library/openshift_facts.py

@@ -1410,7 +1410,6 @@ def set_container_facts_if_unset(facts):
             facts['node']['ovs_system_image'] = ovs_image
 
     if safe_get_bool(facts['common']['is_containerized']):
-        facts['common']['admin_binary'] = '/usr/local/bin/oadm'
         facts['common']['client_binary'] = '/usr/local/bin/oc'
 
     return facts
@@ -1595,7 +1594,7 @@ class OpenShiftFacts(object):
                                   hostname=hostname,
                                   public_hostname=hostname,
                                   portal_net='172.30.0.0/16',
-                                  client_binary='oc', admin_binary='oadm',
+                                  client_binary='oc',
                                   dns_domain='cluster.local',
                                   config_base='/etc/origin')
 

+ 3 - 3
roles/openshift_node/README.md

@@ -33,9 +33,9 @@ Notes
 Currently we support re-labeling nodes but we don't re-schedule running pods nor remove existing labels. That means you will have to trigger the re-schedulling manually. To re-schedule your pods, just follow the steps below:
 
 ```
-oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --drain ${NODE}
-oadm manage-node --schedulable=true ${NODE}
+oc adm manage-node --schedulable=false ${NODE}
+oc adm manage-node --drain ${NODE}
+oc adm manage-node --schedulable=true ${NODE}
 ````
 
 > If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.

+ 1 - 1
roles/openshift_provisioners/tasks/install_efs.yaml

@@ -66,7 +66,7 @@
 
 - name: "Set anyuid permissions for efs"
   command: >
-    {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+    {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs
   register: efs_output
   failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr