Prechádzať zdrojové kódy

Ensure we're running with admin kubeconfig in several locations

Fixes https://bugzilla.redhat.com/show_bug.cgi?id=1576527
Scott Dodson 7 rokov pred
rodič
commit
ccbda0ffbb

+ 1 - 1
playbooks/openshift-node/private/join.yml

@@ -37,7 +37,7 @@
     block:
     - name: Get CSRs
       command: >
-        {{ openshift_client_binary }} describe csr --config=/etc/origin/master/admin.kubeconfig
+        {{ openshift_client_binary }} describe csr --config={{ openshift.common.config_base }}/master/admin.kubeconfig
     - name: Report approval errors
       fail:
         msg: Node approval failed

+ 1 - 1
roles/openshift_control_plane/tasks/bootstrap.yml

@@ -3,7 +3,7 @@
 # oc_serviceaccounts_kubeconfig
 - name: create service account kubeconfig with csr rights
   command: >
-    {{ openshift_client_binary }} serviceaccounts create-kubeconfig {{ openshift_master_csr_sa }} -n {{ openshift_master_csr_namespace }}
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig serviceaccounts create-kubeconfig {{ openshift_master_csr_sa }} -n {{ openshift_master_csr_namespace }}
   register: kubeconfig_out
   until: kubeconfig_out.rc == 0
   retries: 24

+ 3 - 3
roles/openshift_control_plane/tasks/main.yml

@@ -168,21 +168,21 @@
   block:
   - name: Check status in the kube-system namespace
     command: >
-      {{ openshift_client_binary }} status --config=/etc/origin/master/admin.kubeconfig -n kube-system
+      {{ openshift_client_binary }} status --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n kube-system
     register: control_plane_status
     ignore_errors: true
   - debug:
       msg: "{{ control_plane_status.stdout_lines }}"
   - name: Get pods in the kube-system namespace
     command: >
-      {{ openshift_client_binary }} get pods --config=/etc/origin/master/admin.kubeconfig -n kube-system -o wide
+      {{ openshift_client_binary }} get pods --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n kube-system -o wide
     register: control_plane_pods
     ignore_errors: true
   - debug:
       msg: "{{ control_plane_pods.stdout_lines }}"
   - name: Get events in the kube-system namespace
     command: >
-      {{ openshift_client_binary }} get events --config=/etc/origin/master/admin.kubeconfig -n kube-system
+      {{ openshift_client_binary }} get events --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n kube-system
     register: control_plane_events
     ignore_errors: true
   - debug:

+ 1 - 1
roles/openshift_docker_gc/tasks/main.yml

@@ -5,7 +5,7 @@
 
 # NOTE: oc_adm_policy_user does not support -z (yet)
 - name: Add dockergc as privileged
-  shell: "{{ openshift_client_binary }}  adm policy add-scc-to-user -z dockergc privileged"
+  command: "{{ openshift_client_binary }}  adm policy add-scc-to-user -z dockergc privileged"
 #  oc_adm_policy_user:
 #    user: dockergc
 #    resource_kind: scc

+ 2 - 1
roles/openshift_grafana/tasks/install_grafana.yaml

@@ -25,7 +25,8 @@
 # TODO remove this when annotations are supported by oc_serviceaccount
 - name: annotate serviceaccount
   command: >
-    {{ openshift_client_binary }} annotate --overwrite -n {{ grafana_namespace }}
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+    annotate --overwrite -n {{ grafana_namespace }}
     serviceaccount {{ grafana_serviceaccount_name }} {{ item }}
   with_items:
     "{{ grafana_serviceaccount_annotations }}"

+ 3 - 1
roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml

@@ -16,7 +16,9 @@
     dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
 
 - name: Create GlusterFS registry service and endpoint
-  command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"
+  command: >
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+    apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}
   with_items:
   - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"
   - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml"

+ 3 - 3
roles/openshift_hosted/tasks/wait_for_pod.yml

@@ -5,7 +5,7 @@
     command: |
       {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \
                         --namespace {{ item.namespace | default('default') }} \
-                        --config /etc/origin/master/admin.kubeconfig
+                        --config={{ openshift.common.config_base }}/master/admin.kubeconfig
     async: 600
     poll: 5
     with_items: "{{ l_openshift_hosted_wfp_items }}"
@@ -15,7 +15,7 @@
     command: |
       {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \
              --namespace {{ item.namespace }} \
-             --config /etc/origin/master/admin.kubeconfig \
+             --config={{ openshift.common.config_base }}/master/admin.kubeconfig \
              -o jsonpath='{ .status.latestVersion }'
     register: l_openshift_hosted_wfp_latest_version
     with_items: "{{ l_openshift_hosted_wfp_items }}"
@@ -24,7 +24,7 @@
     command: |
       {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \
              --namespace {{ item.0.namespace }} \
-             --config /etc/origin/master/admin.kubeconfig \
+             --config={{ openshift.common.config_base }}/master/admin.kubeconfig \
              -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }'
     register: openshift_hosted_wfp_rc_phase
     until: "'Complete' in openshift_hosted_wfp_rc_phase.stdout"

+ 2 - 2
roles/openshift_logging_elasticsearch/tasks/get_es_version.yml

@@ -17,7 +17,7 @@
 
 - name: "Getting ES version for logging-es cluster"
   command: >
-    {{ openshift_client_binary }} exec {{ available_pod }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ available_pod }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
   register: _curl_output
   when: available_pod is defined
 
@@ -39,7 +39,7 @@
 
 - name: "Getting ES version for logging-es-ops cluster"
   command: >
-    {{ openshift_client_binary }} exec {{ available_ops_pod }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ available_ops_pod }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XGET 'https://localhost:9200/'
   register: _ops_curl_output
   when: available_ops_pod is defined
 

+ 1 - 1
roles/openshift_logging_elasticsearch/tasks/main.yaml

@@ -310,7 +310,7 @@
       _restart_logging_components: "{{ _restart_logging_components | default([]) + [es_component] | unique }}"
 
   - shell: >
-      {{ openshift_client_binary }} get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get dc -l component="{{ es_component }}" -n "{{ openshift_logging_elasticsearch_namespace }}" -o name | cut -d'/' -f2
     register: _es_dcs
 
   - set_fact:

+ 7 - 7
roles/openshift_logging_elasticsearch/tasks/restart_cluster.yml

@@ -1,13 +1,13 @@
 ---
 ## get all pods for the cluster
 - command: >
-    {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
   register: _cluster_pods
 
 ### Check for cluster state before making changes -- if its red then we don't want to continue
 - name: "Checking current health for {{ _es_node }} cluster"
   shell: >
-    {{ openshift_client_binary }} exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
   register: _pod_status
   when: _cluster_pods.stdout_lines | count > 0
 
@@ -46,7 +46,7 @@
 
   - name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
     command: >
-      {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
     register: _disable_output
     changed_when: "'\"acknowledged\":true' in _disable_output.stdout"
     when: _cluster_pods.stdout_lines | count > 0
@@ -54,7 +54,7 @@
   # Flush ES
   - name: "Flushing for logging-{{ _cluster_component }} cluster"
     command: >
-      {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
     register: _flush_output
     changed_when: "'\"acknowledged\":true' in _flush_output.stdout"
     when:
@@ -62,7 +62,7 @@
     - full_restart_cluster | bool
 
   - command: >
-      {{ openshift_client_binary }} get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
     register: _cluster_dcs
 
   # If we are currently restarting the "es" cluster we want to check if we are scaling up the number of es nodes
@@ -92,12 +92,12 @@
 
   ## we may need a new first pod to run against -- fetch them all again
   - command: >
-      {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
     register: _cluster_pods
 
   - name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
     command: >
-      {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
+      {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
     register: _enable_output
     changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
     when: _cluster_pods.stdout != ""

+ 3 - 3
roles/openshift_logging_elasticsearch/tasks/restart_es_node.yml

@@ -1,7 +1,7 @@
 ---
 - name: "Rolling out new pod(s) for {{ _es_node }}"
   command: >
-    {{ openshift_client_binary }} rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }}
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig rollout latest {{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }}
 
 - when: not _skip_healthcheck | bool
   name: "Waiting for {{ _es_node }} to finish scaling up"
@@ -23,14 +23,14 @@
 - when: not _skip_healthcheck | bool
   name: Gettings name(s) of replica pod(s)
   command: >
-    {{ openshift_client_binary }} get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get pods -l deploymentconfig={{ _es_node }} -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
   register: _pods
   changed_when: false
 
 - when: not _skip_healthcheck | bool
   name: "Waiting for ES node {{ _es_node }} health to be in ['green', 'yellow']"
   shell: >
-    {{ openshift_client_binary }} exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec "{{ _pod }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
   with_items: "{{ _pods.stdout.split(' ') }}"
   loop_control:
     loop_var: _pod

+ 2 - 2
roles/openshift_node_group/tasks/sync.yml

@@ -35,11 +35,11 @@
 # TODO: temporary until we fix apply for image stream tags
 - name: Remove the image stream tag
   shell: >
-    {{ openshift_client_binary }} delete -n openshift-node istag node:v3.10 --ignore-not-found
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig delete -n openshift-node istag node:v3.10 --ignore-not-found
 
 - name: Apply the config
   shell: >
-    {{ openshift_client_binary }} apply -f {{ mktemp.stdout }}
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig apply -f {{ mktemp.stdout }}
 
 - name: Remove temp directory
   file:

+ 1 - 1
roles/openshift_prometheus/tasks/install_prometheus.yaml

@@ -51,7 +51,7 @@
 # TODO remove this when annotations are supported by oc_serviceaccount
 - name: annotate serviceaccount
   command: >
-    {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig annotate --overwrite -n {{ openshift_prometheus_namespace }}
     serviceaccount {{ openshift_prometheus_service_name }} {{ item }}
   with_items:
     "{{ openshift_prometheus_serviceaccount_annotations }}"

+ 2 - 2
roles/openshift_sdn/tasks/main.yml

@@ -37,11 +37,11 @@
 # TODO: temporary until we fix apply for image stream tags
 - name: Remove the image stream tag
   shell: >
-    {{ openshift_client_binary }} delete -n openshift-sdn istag node:v3.10 --ignore-not-found
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig delete -n openshift-sdn istag node:v3.10 --ignore-not-found
 
 - name: Apply the config
   shell: >
-    {{ openshift_client_binary }} apply -f "{{ mktemp.stdout }}"
+    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig apply -f "{{ mktemp.stdout }}"
 
 - name: Remove temp directory
   file: