config.yml 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. ---
  2. - name: Logging Install Checkpoint Start
  3. hosts: all
  4. gather_facts: false
  5. tasks:
  6. - name: Set Logging install 'In Progress'
  7. run_once: true
  8. set_stats:
  9. data:
  10. installer_phase_logging:
  11. title: "Logging Install"
  12. playbook: "playbooks/openshift-logging/config.yml"
  13. status: "In Progress"
  14. start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
  15. # Normally we only collect this information for our master group entries
  16. # we want to also collect this for nodes so we can match group entries to nodes
  17. - name: Get common IP facts when necessary
  18. hosts: oo_nodes_to_config:!oo_masters
  19. gather_facts: false
  20. tasks:
  21. - name: Gather Cluster facts
  22. openshift_facts:
  23. role: common
  24. local_facts:
  25. ip: "{{ openshift_ip | default(None) }}"
  26. - name: Verify and collect ES hosts
  27. hosts: oo_first_master
  28. gather_facts: false
  29. tasks:
  30. - when: openshift_logging_install_logging | default(false) | bool
  31. block:
  32. - assert:
  33. that: openshift_logging_es_nodeselector is defined
  34. msg: "A node selector is required for Elasticsearch pods, please specify one with openshift_logging_es_nodeselector"
  35. - name: Ensure that ElasticSearch has nodes to run on
  36. import_role:
  37. name: openshift_control_plane
  38. tasks_from: ensure_nodes_matching_selector.yml
  39. vars:
  40. openshift_master_ensure_nodes_selector: "{{ openshift_logging_es_nodeselector | map_to_pairs }}"
  41. openshift_master_ensure_nodes_service: Elasticsearch
  42. - command: >
  43. {{ openshift_client_binary }}
  44. --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  45. get nodes
  46. -l {{ openshift_logging_es_nodeselector | map_to_pairs }}
  47. -o jsonpath={.items[*].status.addresses[?(@.type==\"InternalIP\")].address}
  48. register: openshift_logging_es_hosts
  49. - when: openshift_logging_use_ops | default(false) | bool
  50. block:
  51. - assert:
  52. that: openshift_logging_es_ops_nodeselector is defined
  53. msg: "A node selector is required for Elasticsearch Ops pods, please specify one with openshift_logging_es_ops_nodeselector"
  54. - name: Ensure that ElasticSearch Ops has nodes to run on
  55. import_role:
  56. name: openshift_control_plane
  57. tasks_from: ensure_nodes_matching_selector.yml
  58. vars:
  59. openshift_master_ensure_nodes_selector: "{{ openshift_logging_es_ops_nodeselector | map_to_pairs }}"
  60. openshift_master_ensure_nodes_service: "Elasticsearch Ops"
  61. - command: >
  62. {{ openshift_client_binary }}
  63. --config={{ openshift.common.config_base }}/master/admin.kubeconfig
  64. get nodes
  65. -l {{ openshift_logging_es_ops_nodeselector | map_to_pairs }}
  66. -o jsonpath={.items[*].status.addresses[?(@.type==\"InternalIP\")].address}
  67. register: openshift_logging_es_ops_hosts
  68. - set_fact:
  69. openshift_logging_elasticsearch_hosts: "{{ ( openshift_logging_es_hosts.stdout.split(' ') | default([]) + (openshift_logging_es_ops_hosts.stdout.split(' ') if openshift_logging_es_ops_hosts.stdout is defined else []) ) | unique }}"
  70. #- name: Debug groups
  71. # debug:
  72. # var: groups
  73. #- name: Debug hostvars
  74. # debug:
  75. # var: hostvars
  76. # Check to see if the collected ip from the openshift facts above matches our node back to a
  77. # group entry in our inventory so we can maintain our group variables when updating the sysctl
  78. # files for specific nodes based on <node>.status.addresses[@.type==InternalIP].address
  79. - name: Evaluate oo_elasticsearch_nodes
  80. add_host:
  81. name: "{{ item }}"
  82. groups: oo_elasticsearch_nodes
  83. ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
  84. ansible_become: "{{ g_sudo | default(omit) }}"
  85. with_items: "{{ groups.get('oo_nodes_to_config', groups['all']) }}"
  86. changed_when: no
  87. run_once: true
  88. delegate_to: localhost
  89. connection: local
  90. when: hostvars[item].get('openshift',{}).get('common',{}).get('ip', None) in openshift_logging_elasticsearch_hosts
  91. - name: Update vm.max_map_count for ES 5.x
  92. hosts: oo_elasticsearch_nodes
  93. gather_facts: false
  94. tasks:
  95. - when: openshift_logging_install_logging | default(false) | bool
  96. block:
  97. - name: Checking vm max_map_count value
  98. command:
  99. cat /proc/sys/vm/max_map_count
  100. register: _vm_max_map_count
  101. - name: Updating vm.max_map_count value
  102. sysctl:
  103. name: vm.max_map_count
  104. value: 262144
  105. sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf"
  106. reload: yes
  107. when:
  108. - _vm_max_map_count.stdout | default(0) | int < 262144 | int
  109. - name: Remove created 99-elasticsearch sysctl
  110. hosts: all
  111. gather_facts: false
  112. tasks:
  113. - when: not openshift_logging_install_logging | default(false) | bool
  114. file:
  115. state: absent
  116. name: /etc/sysctl.d/99-elasticsearch.conf
  117. - name: OpenShift Aggregated Logging
  118. hosts: oo_first_master
  119. roles:
  120. - openshift_logging
  121. - name: Logging Install Checkpoint End
  122. hosts: all
  123. gather_facts: false
  124. tasks:
  125. - name: Set Logging install 'Complete'
  126. run_once: true
  127. set_stats:
  128. data:
  129. installer_phase_logging:
  130. status: "Complete"
  131. end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"