123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384 |
- ---
- ## get all pods for the cluster
- - command: >
- {{ openshift_client_binary }}
- --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- get pod
- -l component={{ _cluster_component }},provider=openshift
- -n {{ openshift_logging_elasticsearch_namespace }}
- -o jsonpath={.items[?(@.status.phase==\"Running\")].metadata.name}
- register: _cluster_pods
- # make a temp dir for admin certs
- - command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
- register: _logging_handler_tempdir
- changed_when: False
- check_mode: no
- - name: Exporting secrets to use communicating with the ES cluster
- command: >
- {{ openshift_client_binary }}
- --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- extract secret/logging-elasticsearch
- -n {{ openshift_logging_elasticsearch_namespace }}
- --keys=admin-cert --keys=admin-key
- --to={{ _logging_handler_tempdir.stdout }}
- ### Check for cluster state before making changes -- if its red, yellow or missing nodes then we don't want to continue
- - name: "Checking current health for {{ _es_node }} cluster"
- command: >
- curl -s -k
- --cert {{ _logging_handler_tempdir.stdout }}/admin-cert
- --key {{ _logging_handler_tempdir.stdout }}/admin-key
- https://logging-{{ _cluster_component }}.{{ openshift_logging_elasticsearch_namespace }}.svc:9200/_cluster/health?pretty
- register: _pod_status
- when:
- - _cluster_pods.stdout
- - _cluster_pods.stdout.split(' ') | count > 0
- - when:
- - _pod_status.stdout is defined
- - (_pod_status.stdout | from_json)['status'] in ['yellow', 'red'] or (_pod_status.stdout | from_json)['number_of_nodes'] != _cluster_pods.stdout.split(' ') | count
- block:
- - name: Set Logging message to manually restart
- run_once: true
- set_stats:
- data:
- installer_phase_logging:
- message: "Cluster logging-{{ _cluster_component }} was not in an optimal state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart."
- - debug: msg="Cluster logging-{{ _cluster_component }} was not in an optimal state and will not be automatically restarted. Please see documentation regarding doing a {{ 'full' if full_restart_cluster | bool else 'rolling'}} cluster restart."
- - when: _pod_status.stdout is undefined or ( (_pod_status.stdout | from_json)['status'] in ['green'] and (_pod_status.stdout | from_json)['number_of_nodes'] == _cluster_pods.stdout.split(' ') | count )
- block:
- - command: >
- {{ openshift_client_binary }}
- --config={{ openshift.common.config_base }}/master/admin.kubeconfig
- get dc
- -l component={{ _cluster_component }},provider=openshift
- -n {{ openshift_logging_elasticsearch_namespace }}
- -o jsonpath={.items[*].metadata.name}
- register: _cluster_dcs
- ## restart all dcs for full restart
- - name: "Performing full cluster restart for {{ _cluster_component }} cluster"
- include_tasks: full_cluster_restart.yml
- vars:
- logging_restart_cluster_dcs: "{{ _cluster_dcs.stdout_lines }}"
- when:
- - full_restart_cluster | bool
- ## restart the node if it's dc is in the list of nodes to restart
- - name: "Performing rolling cluster restart for {{ _cluster_component }} cluster"
- include_tasks: rolling_cluster_restart.yml
- vars:
- logging_restart_cluster_dcs: "{{ _restart_logging_nodes | intersect(_cluster_dcs.stdout) }}"
- when:
- - not full_restart_cluster | bool
- # remove temp dir
- - name: Cleaning up temp dir
- file:
- path: "{{ _logging_handler_tempdir.stdout }}"
- state: absent
- changed_when: False
|