|
@@ -1,13 +1,13 @@
|
|
|
---
|
|
|
## get all pods for the cluster
|
|
|
- command: >
|
|
|
- {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
|
|
|
+ {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
|
|
|
register: _cluster_pods
|
|
|
|
|
|
### Check for cluster state before making changes -- if its red then we don't want to continue
|
|
|
- name: "Checking current health for {{ _es_node }} cluster"
|
|
|
shell: >
|
|
|
- {{ openshift_client_binary }} exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
|
|
|
+ {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec "{{ _cluster_pods.stdout.split(' ')[0] }}" -c elasticsearch -n "{{ openshift_logging_elasticsearch_namespace }}" -- es_cluster_health
|
|
|
register: _pod_status
|
|
|
when: _cluster_pods.stdout_lines | count > 0
|
|
|
|
|
@@ -46,7 +46,7 @@
|
|
|
|
|
|
- name: "Disable shard balancing for logging-{{ _cluster_component }} cluster"
|
|
|
command: >
|
|
|
- {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
|
|
|
+ {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "none" } }'
|
|
|
register: _disable_output
|
|
|
changed_when: "'\"acknowledged\":true' in _disable_output.stdout"
|
|
|
when: _cluster_pods.stdout_lines | count > 0
|
|
@@ -54,7 +54,7 @@
|
|
|
# Flush ES
|
|
|
- name: "Flushing for logging-{{ _cluster_component }} cluster"
|
|
|
command: >
|
|
|
- {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
|
|
|
+ {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_flush/synced'
|
|
|
register: _flush_output
|
|
|
changed_when: "'\"acknowledged\":true' in _flush_output.stdout"
|
|
|
when:
|
|
@@ -62,7 +62,7 @@
|
|
|
- full_restart_cluster | bool
|
|
|
|
|
|
- command: >
|
|
|
- {{ openshift_client_binary }} get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
|
|
|
+ {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get dc -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[*].metadata.name}
|
|
|
register: _cluster_dcs
|
|
|
|
|
|
# If we are currently restarting the "es" cluster we want to check if we are scaling up the number of es nodes
|
|
@@ -92,12 +92,12 @@
|
|
|
|
|
|
## we may need a new first pod to run against -- fetch them all again
|
|
|
- command: >
|
|
|
- {{ openshift_client_binary }} get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
|
|
|
+ {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig get pod -l component={{ _cluster_component }},provider=openshift -n {{ openshift_logging_elasticsearch_namespace }} -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
|
|
|
register: _cluster_pods
|
|
|
|
|
|
- name: "Enable shard balancing for logging-{{ _cluster_component }} cluster"
|
|
|
command: >
|
|
|
- {{ openshift_client_binary }} exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
|
|
|
+ {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig exec {{ _cluster_pods.stdout.split(' ')[0] }} -c elasticsearch -n {{ openshift_logging_elasticsearch_namespace }} -- {{ __es_local_curl }} -XPUT 'https://localhost:9200/_cluster/settings' -d '{ "transient": { "cluster.routing.allocation.enable" : "all" } }'
|
|
|
register: _enable_output
|
|
|
changed_when: "'\"acknowledged\":true' in _enable_output.stdout"
|
|
|
when: _cluster_pods.stdout != ""
|