Browse Source

Adding support for ES 5.x tech preview opt in

Eric Wolinetz 7 năm trước cách đây
mục cha
commit
d5879135f0
54 tập tin đã thay đổi với 1689 bổ sung84 xóa
  1. 36 5
      playbooks/openshift-logging/private/config.yml
  2. 2 0
      roles/openshift_logging/defaults/main.yml
  3. 8 0
      roles/openshift_logging/tasks/install_logging.yaml
  4. 5 0
      roles/openshift_logging/tasks/main.yaml
  5. 0 0
      roles/openshift_logging_curator/files/2.x/curator.yml
  6. 18 0
      roles/openshift_logging_curator/files/5.x/curator.yml
  7. 2 2
      roles/openshift_logging_curator/tasks/main.yaml
  8. 0 0
      roles/openshift_logging_curator/templates/2.x/curator.j2
  9. 113 0
      roles/openshift_logging_curator/templates/5.x/curator.j2
  10. 0 9
      roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
  11. 6 2
      roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
  12. 79 54
      roles/openshift_logging_elasticsearch/tasks/main.yaml
  13. 0 0
      roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2
  14. 0 0
      roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2
  15. 0 0
      roles/openshift_logging_elasticsearch/templates/2.x/es.j2
  16. 0 0
      roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2
  17. 0 0
      roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2
  18. 0 0
      roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2
  19. 0 0
      roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2
  20. 74 0
      roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2
  21. 194 0
      roles/openshift_logging_elasticsearch/templates/5.x/es.j2
  22. 78 0
      roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2
  23. 31 0
      roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2
  24. 30 0
      roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2
  25. 14 0
      roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2
  26. 0 0
      roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2
  27. 0 1
      roles/openshift_logging_elasticsearch/vars/main.yml
  28. 0 0
      roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml
  29. 103 0
      roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml
  30. 1 1
      roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
  31. 0 0
      roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2
  32. 0 0
      roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml
  33. 0 0
      roles/openshift_logging_fluentd/files/2.x/secure-forward.conf
  34. 7 0
      roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml
  35. 0 0
      roles/openshift_logging_fluentd/files/5.x/secure-forward.conf
  36. 4 4
      roles/openshift_logging_fluentd/tasks/main.yaml
  37. 0 0
      roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2
  38. 0 0
      roles/openshift_logging_fluentd/templates/2.x/fluentd.j2
  39. 80 0
      roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2
  40. 249 0
      roles/openshift_logging_fluentd/templates/5.x/fluentd.j2
  41. 3 3
      roles/openshift_logging_kibana/tasks/main.yaml
  42. 0 0
      roles/openshift_logging_kibana/templates/2.x/kibana.j2
  43. 0 0
      roles/openshift_logging_kibana/templates/2.x/oauth-client.j2
  44. 36 0
      roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2
  45. 170 0
      roles/openshift_logging_kibana/templates/5.x/kibana.j2
  46. 16 0
      roles/openshift_logging_kibana/templates/5.x/oauth-client.j2
  47. 36 0
      roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2
  48. 0 0
      roles/openshift_logging_mux/files/2.x/fluent.conf
  49. 26 0
      roles/openshift_logging_mux/files/2.x/secure-forward.conf
  50. 37 0
      roles/openshift_logging_mux/files/5.x/fluent.conf
  51. 26 0
      roles/openshift_logging_mux/files/5.x/secure-forward.conf
  52. 3 3
      roles/openshift_logging_mux/tasks/main.yaml
  53. 0 0
      roles/openshift_logging_mux/templates/2.x/mux.j2
  54. 202 0
      roles/openshift_logging_mux/templates/5.x/mux.j2

+ 36 - 5
playbooks/openshift-logging/private/config.yml

@@ -11,6 +11,38 @@
           status: "In Progress"
           start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
 
+- name: Update vm.max_map_count for ES 5.x
+  hosts: all
+  gather_facts: false
+  tasks:
+  - when:
+    - openshift_logging_es5_techpreview | default(false) | bool
+    - openshift_deployment_type in ['origin']
+    block:
+    - name: Checking vm max_map_count value
+      command:
+        cat /proc/sys/vm/max_map_count
+      register: _vm_max_map_count
+
+    - stat:
+        path: /etc/sysctl.d/99-elasticsearch.conf
+      register: _99_es_conf
+
+    - name: Check for current value of vm.max_map_count in 99-elasticsearch.conf
+      command: >
+        sed /etc/sysctl.d/99-elasticsearch.conf -e 's/vm.max_map_count=\(.*\)/\1/'
+      register: _curr_vm_max_map_count
+      when: _99_es_conf.stat.exists
+
+    - name: Updating vm.max_map_count value
+      sysctl:
+        name: vm.max_map_count
+        value: 262144
+        sysctl_file: "/etc/sysctl.d/99-elasticsearch.conf"
+        reload: yes
+      when:
+      - _vm_max_map_count.stdout | default(0) | int < 262144 | int or _curr_vm_max_map_count.stdout | default(0) | int < 262144
+
 - name: OpenShift Aggregated Logging
   hosts: oo_first_master
   roles:
@@ -20,11 +52,10 @@
 - name: Update Master configs
   hosts: oo_masters:!oo_first_master
   tasks:
-  - block:
-    - import_role:
-        name: openshift_logging
-        tasks_from: update_master_config
-      when: not openshift.common.version_gte_3_9
+  - include_role:
+      name: openshift_logging
+      tasks_from: update_master_config
+    when: not openshift.common.version_gte_3_9
 
 - name: Logging Install Checkpoint End
   hosts: all

+ 2 - 0
roles/openshift_logging/defaults/main.yml

@@ -12,6 +12,8 @@ openshift_logging_install_logging: False
 openshift_logging_purge_logging: False
 openshift_logging_image_pull_secret: ""
 
+openshift_logging_es5_techpreview: False
+
 openshift_logging_curator_default_days: 30
 openshift_logging_curator_run_hour: 0
 openshift_logging_curator_run_minute: 0

+ 8 - 0
roles/openshift_logging/tasks/install_logging.yaml

@@ -59,6 +59,14 @@
   vars:
     generated_certs_dir: "{{openshift.common.config_base}}/logging"
 
+- set_fact:
+    __base_file_dir: "{{ '5.x' if openshift_logging_es5_techpreview | bool else '2.x' }}"
+    __es_version: "{{ '5.x' if openshift_logging_es5_techpreview | bool else '2.x' }}"
+
+- set_fact:
+    openshift_logging_image_version: "techpreview"
+  when: openshift_logging_es5_techpreview | bool
+
 ## Elasticsearch
 
 - set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}

+ 5 - 0
roles/openshift_logging/tasks/main.yaml

@@ -3,6 +3,11 @@
     msg: Only one Fluentd nodeselector key pair should be provided
   when: openshift_logging_fluentd_nodeselector.keys() | count > 1
 
+- assert:
+    that: openshift_deployment_type in ['origin']
+    msg: "Only 'origin' deployments are allowed with openshift_logging_es5_techpreview set to true"
+  when: openshift_logging_es5_techpreview | bool
+
 - name: Create temp directory for doing work in
   command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
   register: mktemp

roles/openshift_logging_curator/files/curator.yml → roles/openshift_logging_curator/files/2.x/curator.yml


+ 18 - 0
roles/openshift_logging_curator/files/5.x/curator.yml

@@ -0,0 +1,18 @@
+# Logging example curator config file
+
+# uncomment and use this to override the defaults from env vars
+#.defaults:
+#  delete:
+#    days: 30
+#  runhour: 0
+#  runminute: 0
+
+# to keep ops logs for a different duration:
+#.operations:
+#  delete:
+#    weeks: 8
+
+# example for a normal project
+#myapp:
+#  delete:
+#    weeks: 1

+ 2 - 2
roles/openshift_logging_curator/tasks/main.yaml

@@ -52,7 +52,7 @@
 
 # configmap
 - copy:
-    src: curator.yml
+    src: "{{ __base_file_dir }}/curator.yml"
     dest: "{{ tempdir }}/curator.yml"
   changed_when: no
 
@@ -96,7 +96,7 @@
 # TODO: scale should not exceed 1
 - name: Generate Curator deploymentconfig
   template:
-    src: curator.j2
+    src: "{{ __base_file_dir }}/curator.j2"
     dest: "{{ tempdir }}/templates/curator-dc.yaml"
   vars:
     component: "{{ curator_component }}"

roles/openshift_logging_curator/templates/curator.j2 → roles/openshift_logging_curator/templates/2.x/curator.j2


+ 113 - 0
roles/openshift_logging_curator/templates/5.x/curator.j2

@@ -0,0 +1,113 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+  name: "{{deploy_name}}"
+  labels:
+    provider: openshift
+    component: "{{component}}"
+    logging-infra: "{{logging_component}}"
+spec:
+  replicas: {{curator_replicas|default(1)}}
+  selector:
+    provider: openshift
+    component: "{{component}}"
+    logging-infra: "{{logging_component}}"
+  strategy:
+    rollingParams:
+      intervalSeconds: 1
+      timeoutSeconds: 600
+      updatePeriodSeconds: 1
+    type: Recreate
+  template:
+    metadata:
+      name: "{{deploy_name}}"
+      labels:
+        logging-infra: "{{logging_component}}"
+        provider: openshift
+        component: "{{component}}"
+    spec:
+      terminationGracePeriod: 600
+      serviceAccountName: aggregated-logging-curator
+{% if curator_node_selector is iterable and curator_node_selector | length > 0 %}
+      nodeSelector:
+{% for key, value in curator_node_selector.items() %}
+        {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+      containers:
+        -
+          name: "curator"
+          image: {{image}}
+          imagePullPolicy: IfNotPresent
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
+          resources:
+{%   if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %}
+            limits:
+{%     if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %}
+              cpu: "{{curator_cpu_limit}}"
+{%     endif %}
+{%     if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
+              memory: "{{curator_memory_limit}}"
+{%     endif %}
+{%   endif %}
+{%   if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
+            requests:
+{%     if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %}
+              cpu: "{{curator_cpu_request}}"
+{%     endif %}
+{%     if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
+              memory: "{{curator_memory_limit}}"
+{%     endif %}
+{%   endif %}
+{% endif %}
+          env:
+            -
+              name: "K8S_HOST_URL"
+              value: "{{openshift_logging_curator_master_url}}"
+            -
+              name: "ES_HOST"
+              value: "{{es_host}}"
+            -
+              name: "ES_PORT"
+              value: "{{es_port}}"
+            -
+              name: "ES_CLIENT_CERT"
+              value: "/etc/curator/keys/cert"
+            -
+              name: "ES_CLIENT_KEY"
+              value: "/etc/curator/keys/key"
+            -
+              name: "ES_CA"
+              value: "/etc/curator/keys/ca"
+            -
+              name: "CURATOR_DEFAULT_DAYS"
+              value: "{{openshift_logging_curator_default_days}}"
+            -
+              name: "CURATOR_RUN_HOUR"
+              value: "{{openshift_logging_curator_run_hour}}"
+            -
+              name: "CURATOR_RUN_MINUTE"
+              value: "{{openshift_logging_curator_run_minute}}"
+            -
+              name: "CURATOR_RUN_TIMEZONE"
+              value: "{{openshift_logging_curator_run_timezone}}"
+            -
+              name: "CURATOR_SCRIPT_LOG_LEVEL"
+              value: "{{openshift_logging_curator_script_log_level}}"
+            -
+              name: "CURATOR_LOG_LEVEL"
+              value: "{{openshift_logging_curator_log_level}}"
+          volumeMounts:
+            - name: certs
+              mountPath: /etc/curator/keys
+              readOnly: true
+            - name: config
+              mountPath: /etc/curator/settings
+              readOnly: true
+      volumes:
+        - name: certs
+          secret:
+            secretName: logging-curator
+        - name: config
+          configMap:
+            name: logging-curator

+ 0 - 9
roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml

@@ -1,9 +0,0 @@
-apiVersion: v1
-kind: ClusterRole
-metadata:
-  name: rolebinding-reader
-rules:
-- resources:
-    - clusterrolebindings
-  verbs:
-    - get

+ 6 - 2
roles/openshift_logging_elasticsearch/tasks/determine_version.yaml

@@ -10,10 +10,14 @@
 
 # should we just assume that we will have the correct major version?
 - set_fact: es_version="{{ openshift_logging_elasticsearch_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
-  when: openshift_logging_elasticsearch_image_version != 'latest'
+  when:
+  - openshift_logging_elasticsearch_image_version != 'latest'
+  - not openshift_logging_es5_techpreview | default(false) | bool
 
 - fail:
     msg: Invalid version specified for Elasticsearch
-  when: es_version not in __allowed_es_versions
+  when:
+  - es_version not in __allowed_es_versions
+  - not openshift_logging_es5_techpreview | default(false) | bool
 
 - include_tasks: get_es_version.yml

+ 79 - 54
roles/openshift_logging_elasticsearch/tasks/main.yaml

@@ -64,7 +64,6 @@
 # we want to make sure we have all the necessary components here
 
 # service account
-
 - name: Create ES service account
   oc_serviceaccount:
     state: present
@@ -82,19 +81,14 @@
     - openshift_logging_image_pull_secret == ''
 
 # rolebinding reader
-- copy:
-    src: rolebinding-reader.yml
-    dest: "{{ tempdir }}/rolebinding-reader.yml"
-
 - name: Create rolebinding-reader role
-  oc_obj:
+  oc_clusterrole:
     state: present
-    name: "rolebinding-reader"
-    kind: clusterrole
-    namespace: "{{ openshift_logging_elasticsearch_namespace }}"
-    files:
-      - "{{ tempdir }}/rolebinding-reader.yml"
-    delete_after: true
+    name: rolebinding-reader
+    rules:
+      - apiGroups: [""]
+        resources: ["clusterrolebindings"]
+        verbs: ["get"]
 
 # SA roles
 - name: Set rolebinding-reader permissions for ES
@@ -114,7 +108,7 @@
 
 # logging-metrics-reader role
 - template:
-    src: logging-metrics-role.j2
+    src: "{{ __base_file_dir }}/logging-metrics-role.j2"
     dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
   vars:
     namespace: "{{ openshift_logging_elasticsearch_namespace }}"
@@ -150,7 +144,7 @@
 # View role and binding
 - name: Generate logging-elasticsearch-view-role
   template:
-    src: rolebinding.j2
+    src: "{{ __base_file_dir }}/rolebinding.j2"
     dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml"
   vars:
     obj_name: logging-elasticsearch-view-role
@@ -183,51 +177,80 @@
     msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
 
 - template:
-    src: elasticsearch-logging.yml.j2
-    dest: "{{ tempdir }}/elasticsearch-logging.yml"
-  vars:
-    root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
-  when: es_logging_contents is undefined
-  changed_when: no
-
-- template:
-    src: elasticsearch.yml.j2
+    src: "{{ __base_file_dir }}/elasticsearch.yml.j2"
     dest: "{{ tempdir }}/elasticsearch.yml"
   vars:
     allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
     es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
     es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"
     es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
-
-  when: es_config_contents is undefined
   changed_when: no
 
 # create diff between current configmap files and our current files
-# NOTE: include_role must be used instead of import_role because
-# this task file is looped over from another role.
-- include_role:
-    name: openshift_logging
-    tasks_from: patch_configmap_files.yaml
-  vars:
-    configmap_name: "logging-elasticsearch"
-    configmap_namespace: "logging"
-    configmap_file_names:
-      - current_file: "elasticsearch.yml"
-        new_file: "{{ tempdir }}/elasticsearch.yml"
-        protected_lines: ["number_of_shards", "number_of_replicas"]
-      - current_file: "logging.yml"
-        new_file: "{{ tempdir }}/elasticsearch-logging.yml"
-
-- name: Set ES configmap
-  oc_configmap:
-    state: present
-    name: "{{ elasticsearch_name }}"
-    namespace: "{{ openshift_logging_elasticsearch_namespace }}"
-    from_file:
-      elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
-      logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
-  register: es_config_creation
-  notify: "restart elasticsearch"
+- when: not openshift_logging_es5_techpreview
+  block:
+    - template:
+        src: "{{ __base_file_dir }}/elasticsearch-logging.yml.j2"
+        dest: "{{ tempdir }}/elasticsearch-logging.yml"
+      vars:
+        root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
+      changed_when: no
+
+    - include_role:
+        name: openshift_logging
+        tasks_from: patch_configmap_files.yaml
+      vars:
+        configmap_name: "logging-elasticsearch"
+        configmap_namespace: "logging"
+        configmap_file_names:
+          - current_file: "elasticsearch.yml"
+            new_file: "{{ tempdir }}/elasticsearch.yml"
+            protected_lines: ["number_of_shards", "number_of_replicas"]
+          - current_file: "logging.yml"
+            new_file: "{{ tempdir }}/elasticsearch-logging.yml"
+
+    - name: Set ES configmap
+      oc_configmap:
+        state: present
+        name: "{{ elasticsearch_name }}"
+        namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+        from_file:
+          elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+          logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+      register: es_config_creation
+      notify: "restart elasticsearch"
+
+- when: openshift_logging_es5_techpreview | bool
+  block:
+    - template:
+        src: "{{ __base_file_dir }}/log4j2.properties.j2"
+        dest: "{{ tempdir }}/log4j2.properties"
+      vars:
+        root_logger: "{{ openshift_logging_es_log_appenders | list }}"
+      changed_when: no
+
+    - include_role:
+        name: openshift_logging
+        tasks_from: patch_configmap_files.yaml
+      vars:
+        configmap_name: "logging-elasticsearch"
+        configmap_namespace: "logging"
+        configmap_file_names:
+          - current_file: "elasticsearch.yml"
+            new_file: "{{ tempdir }}/elasticsearch.yml"
+          - current_file: "log4j2.properties"
+            new_file: "{{ tempdir }}/log4j2.properties"
+
+    - name: Set ES configmap
+      oc_configmap:
+        state: present
+        name: "{{ elasticsearch_name }}"
+        namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+        from_file:
+          elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+          log4j2.properties: "{{ tempdir }}/log4j2.properties"
+      register: es_config_creation
+      notify: "restart elasticsearch"
 
 - when: es_config_creation.changed | bool
   block:
@@ -341,7 +364,7 @@
     # storageclasses with the storageClassName set to "" in pvc.j2
     - name: Creating ES storage template - static
       template:
-        src: pvc.j2
+        src: "{{ __base_file_dir }}/pvc.j2"
         dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
       vars:
         obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
@@ -355,7 +378,7 @@
     # Storageclasses are used by default if configured
     - name: Creating ES storage template - dynamic
       template:
-        src: pvc.j2
+        src: "{{ __base_file_dir }}/pvc.j2"
         dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
       vars:
         obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
@@ -386,7 +409,7 @@
 # DC
 - name: Set ES dc templates
   template:
-    src: es.j2
+    src: "{{ __base_file_dir }}/es.j2"
     dest: "{{ tempdir }}/templates/logging-es-dc.yml"
   vars:
     es_cluster_name: "{{ es_component }}"
@@ -404,6 +427,8 @@
     deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
     es_replicas: 1
     basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}"
+    es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+    es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas| default(0) }}"
 
 - name: Set ES dc
   oc_obj:
@@ -462,7 +487,7 @@
 
 - name: Generating Elasticsearch {{ es_component }} route template
   template:
-    src: route_reencrypt.j2
+    src: "{{ __base_file_dir }}/route_reencrypt.j2"
     dest: "{{mktemp.stdout}}/templates/logging-{{ es_component }}-route.yaml"
   vars:
     obj_name: "logging-{{ es_component }}"

roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 → roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch-logging.yml.j2


roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 → roles/openshift_logging_elasticsearch/templates/2.x/elasticsearch.yml.j2


roles/openshift_logging_elasticsearch/templates/es.j2 → roles/openshift_logging_elasticsearch/templates/2.x/es.j2


roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 → roles/openshift_logging_elasticsearch/templates/2.x/logging-metrics-role.j2


roles/openshift_logging_elasticsearch/templates/pvc.j2 → roles/openshift_logging_elasticsearch/templates/2.x/pvc.j2


roles/openshift_logging_elasticsearch/templates/rolebinding.j2 → roles/openshift_logging_elasticsearch/templates/2.x/rolebinding.j2


roles/openshift_logging_kibana/templates/route_reencrypt.j2 → roles/openshift_logging_elasticsearch/templates/2.x/route_reencrypt.j2


+ 74 - 0
roles/openshift_logging_elasticsearch/templates/5.x/elasticsearch.yml.j2

@@ -0,0 +1,74 @@
+cluster:
+  name: ${CLUSTER_NAME}
+
+script:
+  inline: true
+  stored: true
+
+node:
+  name: ${DC_NAME}
+  master: ${IS_MASTER}
+  data: ${HAS_DATA}
+  max_local_storage_nodes: 1
+
+network:
+  host: 0.0.0.0
+
+cloud:
+  kubernetes:
+    service: ${SERVICE_DNS}
+    namespace: ${NAMESPACE}
+
+discovery.zen:
+  hosts_provider: kubernetes
+  minimum_master_nodes: ${NODE_QUORUM}
+
+gateway:
+  recover_after_nodes: ${NODE_QUORUM}
+  expected_nodes: ${RECOVER_EXPECTED_NODES}
+  recover_after_time: ${RECOVER_AFTER_TIME}
+
+io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+
+openshift.config:
+  use_common_data_model: true
+  project_index_prefix: "project"
+  time_field_name: "@timestamp"
+
+openshift.searchguard:
+  keystore.path: /etc/elasticsearch/secret/admin.jks
+  truststore.path: /etc/elasticsearch/secret/searchguard.truststore
+
+openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default (false)}}
+
+openshift.kibana.index.mode: {{es_kibana_index_mode | default('unique')}}
+
+path:
+  data: /elasticsearch/persistent/${CLUSTER_NAME}/data
+  logs: /elasticsearch/${CLUSTER_NAME}/logs
+
+searchguard:
+  authcz.admin_dn:
+  - CN=system.admin,OU=OpenShift,O=Logging
+  config_index_name: ".searchguard.${DC_NAME}"
+  ssl:
+    transport:
+      enabled: true
+      enforce_hostname_verification: false
+      keystore_type: JKS
+      keystore_filepath: /etc/elasticsearch/secret/searchguard.key
+      keystore_password: kspass
+      truststore_type: JKS
+      truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore
+      truststore_password: tspass
+    http:
+      enabled: true
+      keystore_type: JKS
+      keystore_filepath: /etc/elasticsearch/secret/key
+      keystore_password: kspass
+      clientauth_mode: OPTIONAL
+      truststore_type: JKS
+      truststore_filepath: /etc/elasticsearch/secret/truststore
+      truststore_password: tspass

+ 194 - 0
roles/openshift_logging_elasticsearch/templates/5.x/es.j2

@@ -0,0 +1,194 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+  name: "{{deploy_name}}"
+  labels:
+    provider: openshift
+    component: "{{component}}"
+    deployment: "{{deploy_name}}"
+    logging-infra: "{{logging_component}}"
+spec:
+  replicas: {{es_replicas|default(1)}}
+  revisionHistoryLimit: 0
+  selector:
+    provider: openshift
+    component: "{{component}}"
+    deployment: "{{deploy_name}}"
+    logging-infra: "{{logging_component}}"
+  strategy:
+    type: Recreate
+  triggers: []
+  template:
+    metadata:
+      name: "{{deploy_name}}"
+      labels:
+        logging-infra: "{{logging_component}}"
+        provider: openshift
+        component: "{{component}}"
+        deployment: "{{deploy_name}}"
+    spec:
+      terminationGracePeriod: 600
+      serviceAccountName: aggregated-logging-elasticsearch
+      securityContext:
+        supplementalGroups:
+{% for group in es_storage_groups %}
+        - {{group}}
+{% endfor %}
+{% if es_node_selector is iterable and es_node_selector | length > 0 %}
+      nodeSelector:
+{% for key, value in es_node_selector.items() %}
+        {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+      containers:
+        - name: proxy
+          image: {{ proxy_image }}
+          imagePullPolicy: IfNotPresent
+          args:
+           - --upstream-ca=/etc/elasticsearch/secret/admin-ca
+           - --https-address=:4443
+           - -provider=openshift
+           - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
+           - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+           - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}
+           - -upstream=https://localhost:9200
+           - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
+           - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
+           - --tls-cert=/etc/tls/private/tls.crt
+           - --tls-key=/etc/tls/private/tls.key
+           - -pass-access-token
+           - -pass-user-headers
+          ports:
+          - containerPort: 4443
+            name: proxy
+            protocol: TCP
+          volumeMounts:
+          - mountPath: /etc/tls/private
+            name: proxy-tls
+            readOnly: true
+          - mountPath: /etc/elasticsearch/secret
+            name: elasticsearch
+            readOnly: true
+          resources:
+            limits:
+              memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
+            requests:
+              cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
+              memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
+        -
+          name: "elasticsearch"
+          image: {{image}}
+          imagePullPolicy: IfNotPresent
+          resources:
+            limits:
+{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %}
+              cpu: "{{es_cpu_limit}}"
+{% endif %}
+              memory: "{{es_memory_limit}}"
+            requests:
+              cpu: "{{es_cpu_request}}"
+              memory: "{{es_memory_limit}}"
+{% if es_container_security_context %}
+          securityContext: {{ es_container_security_context | to_yaml }}
+{% endif %}
+          ports:
+            -
+              containerPort: 9200
+              name: "restapi"
+            -
+              containerPort: 9300
+              name: "cluster"
+          env:
+            -
+              name: "DC_NAME"
+              value: "{{deploy_name}}"
+            -
+              name: "NAMESPACE"
+              valueFrom:
+                fieldRef:
+                  fieldPath: metadata.namespace
+            -
+              name: "KUBERNETES_TRUST_CERT"
+              value: "true"
+            -
+              name: "SERVICE_DNS"
+              value: "logging-{{es_cluster_name}}-cluster"
+            -
+              name: "CLUSTER_NAME"
+              value: "logging-{{es_cluster_name}}"
+            -
+              name: "INSTANCE_RAM"
+              value: "{{openshift_logging_elasticsearch_memory_limit}}"
+            -
+              name: "HEAP_DUMP_LOCATION"
+              value: "/elasticsearch/persistent/heapdump.hprof"
+            -
+              name: "NODE_QUORUM"
+              value: "{{es_node_quorum | int}}"
+            -
+              name: "RECOVER_EXPECTED_NODES"
+              value: "{{es_recover_expected_nodes}}"
+            -
+              name: "RECOVER_AFTER_TIME"
+              value: "{{openshift_logging_elasticsearch_recover_after_time}}"
+            -
+              name: "READINESS_PROBE_TIMEOUT"
+              value: "30"
+            -
+              name: "POD_LABEL"
+              value: "component={{component}}"
+            -
+              name: "IS_MASTER"
+              value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
+
+            -
+              name: "HAS_DATA"
+              value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+            -
+              name: "PROMETHEUS_USER"
+              value: "{{openshift_logging_elasticsearch_prometheus_sa}}"
+
+            -
+             name: "PRIMARY_SHARDS"
+             value: "{{ es_number_of_shards | default ('1') }}"
+
+            -
+             name: "REPLICA_SHARDS"
+             value: "{{ es_number_of_replicas | default ('0') }}"
+
+          volumeMounts:
+            - name: elasticsearch
+              mountPath: /etc/elasticsearch/secret
+              readOnly: true
+            - name: elasticsearch-config
+              mountPath: /usr/share/java/elasticsearch/config
+              readOnly: true
+            - name: elasticsearch-storage
+              mountPath: /elasticsearch/persistent
+          readinessProbe:
+            exec:
+              command:
+              - "/usr/share/elasticsearch/probe/readiness.sh"
+            initialDelaySeconds: 10
+            timeoutSeconds: 30
+            periodSeconds: 5
+      volumes:
+        - name: proxy-tls
+          secret:
+            secretName: prometheus-tls
+        - name: elasticsearch
+          secret:
+            secretName: logging-elasticsearch
+        - name: elasticsearch-config
+          configMap:
+            name: logging-elasticsearch
+        - name: elasticsearch-storage
+{% if openshift_logging_elasticsearch_storage_type == 'pvc' %}
+          persistentVolumeClaim:
+            claimName: {{ openshift_logging_elasticsearch_pvc_name }}
+{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %}
+          hostPath:
+            path: {{ openshift_logging_elasticsearch_hostmount_path }}
+{% else %}
+          emptydir: {}
+{% endif %}

+ 78 - 0
roles/openshift_logging_elasticsearch/templates/5.x/log4j2.properties.j2

@@ -0,0 +1,78 @@
+status = error
+
+# log action execution errors for easier debugging
+logger.action.name = org.elasticsearch.action
+logger.action.level = debug
+
+appender.console.type = Console
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+appender.rolling.type = RollingFile
+appender.rolling.name = rolling
+appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
+appender.rolling.layout.type = PatternLayout
+appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
+appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log
+appender.rolling.policies.type = Policies
+appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.rolling.policies.time.interval = 1
+appender.rolling.policies.time.modulate = true
+
+rootLogger.level = info
+{% if 'console' in root_logger  %}
+rootLogger.appenderRef.console.ref = console
+{% endif %}
+{% if 'file' in root_logger %}
+rootLogger.appenderRef.rolling.ref = rolling
+{% endif %}
+
+appender.deprecation_rolling.type = RollingFile
+appender.deprecation_rolling.name = deprecation_rolling
+appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.log
+appender.deprecation_rolling.layout.type = PatternLayout
+appender.deprecation_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%.-10000m%n
+appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.log.gz
+appender.deprecation_rolling.policies.type = Policies
+appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
+appender.deprecation_rolling.policies.size.size = 1GB
+appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
+appender.deprecation_rolling.strategy.max = 4
+
+logger.deprecation.name = org.elasticsearch.deprecation
+logger.deprecation.level = warn
+logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
+logger.deprecation.additivity = false
+
+appender.index_search_slowlog_rolling.type = RollingFile
+appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
+appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog.log
+appender.index_search_slowlog_rolling.layout.type = PatternLayout
+appender.index_search_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
+appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_search_slowlog-%d{yyyy-MM-dd}.log
+appender.index_search_slowlog_rolling.policies.type = Policies
+appender.index_search_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_search_slowlog_rolling.policies.time.interval = 1
+appender.index_search_slowlog_rolling.policies.time.modulate = true
+
+logger.index_search_slowlog_rolling.name = index.search.slowlog
+logger.index_search_slowlog_rolling.level = trace
+logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
+logger.index_search_slowlog_rolling.additivity = false
+
+appender.index_indexing_slowlog_rolling.type = RollingFile
+appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
+appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog.log
+appender.index_indexing_slowlog_rolling.layout.type = PatternLayout
+appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.-10000m%n
+appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
+appender.index_indexing_slowlog_rolling.policies.type = Policies
+appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy
+appender.index_indexing_slowlog_rolling.policies.time.interval = 1
+appender.index_indexing_slowlog_rolling.policies.time.modulate = true
+
+logger.index_indexing_slowlog.name = index.indexing.slowlog.index
+logger.index_indexing_slowlog.level = trace
+logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
+logger.index_indexing_slowlog.additivity = false

+ 31 - 0
roles/openshift_logging_elasticsearch/templates/5.x/logging-metrics-role.j2

@@ -0,0 +1,31 @@
+---
+apiVersion: v1
+kind: List
+items:
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+  kind: Role
+  metadata:
+    annotations:
+      rbac.authorization.kubernetes.io/autoupdate: "true"
+    name: prometheus-metrics-viewer
+    namespace: {{ namespace }}
+  rules:
+  - apiGroups:
+    - metrics.openshift.io
+    resources:
+    - prometheus
+    verbs:
+    - view
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+  kind: RoleBinding
+  metadata:
+    name: prometheus-metrics-viewer
+    namespace: {{ namespace }}
+  roleRef:
+    apiGroup: rbac.authorization.k8s.io
+    kind: Role
+    name: prometheus-metrics-viewer
+  subjects:
+  - kind: ServiceAccount
+    namespace: {{ role_namespace }}
+    name: {{ role_user }}

+ 30 - 0
roles/openshift_logging_elasticsearch/templates/5.x/pvc.j2

@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: {{obj_name}}
+  labels:
+    logging-infra: support
+{% if annotations is defined %}
+  annotations:
+{% for key,value in annotations.items() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+  selector:
+    matchLabels:
+{% for key,value in pv_selector.items() %}
+      {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+  accessModes:
+{% for mode in access_modes %}
+    - {{ mode }}
+{% endfor %}
+  resources:
+    requests:
+      storage: {{size}}
+{% if storage_class_name is defined %}
+  storageClassName: {{ storage_class_name }}
+{% endif %}

+ 14 - 0
roles/openshift_logging_elasticsearch/templates/5.x/rolebinding.j2

@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: RoleBinding
+metadata:
+  name: {{obj_name}}
+roleRef:
+{% if roleRef.kind is defined %}
+  kind: {{ roleRef.kind }}
+{% endif %}
+  name: {{ roleRef.name }}
+subjects:
+{% for sub in subjects %}
+  - kind: {{ sub.kind }}
+    name: {{ sub.name }}
+{% endfor %}

roles/openshift_logging_elasticsearch/templates/route_reencrypt.j2 → roles/openshift_logging_elasticsearch/templates/5.x/route_reencrypt.j2


+ 0 - 1
roles/openshift_logging_elasticsearch/vars/main.yml

@@ -4,7 +4,6 @@ __allowed_es_versions: ["3_5", "3_6", "3_7", "3_8", "3_9"]
 __allowed_es_types: ["data-master", "data-client", "master", "client"]
 __es_log_appenders: ['file', 'console']
 __kibana_index_modes: ["unique", "shared_ops"]
-__es_version: "2.4.4"
 
 __es_local_curl: "curl -s --cacert /etc/elasticsearch/secret/admin-ca --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key"
 

roles/openshift_logging_eventrouter/files/eventrouter-template.yaml → roles/openshift_logging_eventrouter/files/2.x/eventrouter-template.yaml


+ 103 - 0
roles/openshift_logging_eventrouter/files/5.x/eventrouter-template.yaml

@@ -0,0 +1,103 @@
+# this openshift template should match (except nodeSelector) jinja2 template in
+# ../templates/eventrouter-template.j2
+kind: Template
+apiVersion: v1
+metadata:
+  name: eventrouter-template
+  annotations:
+    description: "A pod forwarding kubernetes events to EFK aggregated logging stack."
+    tags: "events,EFK,logging"
+objects:
+  - kind: ServiceAccount
+    apiVersion: v1
+    metadata:
+      name: aggregated-logging-eventrouter
+  - kind: ClusterRole
+    apiVersion: v1
+    metadata:
+      name: event-reader
+    rules:
+    - apiGroups: [""]
+      resources: ["events"]
+      verbs: ["get", "watch", "list"]
+  - kind: ConfigMap
+    apiVersion: v1
+    metadata:
+      name: logging-eventrouter
+    data:
+      config.json: |- 
+        {
+          "sink": "${SINK}"
+        }
+  - kind: DeploymentConfig
+    apiVersion: v1
+    metadata:
+      name: logging-eventrouter
+      labels:
+        component: eventrouter
+        logging-infra: eventrouter
+        provider: openshift
+    spec:
+      selector:
+        component: eventrouter
+        logging-infra: eventrouter
+        provider: openshift
+      replicas: ${REPLICAS}
+      template:
+        metadata:
+          labels:
+            component: eventrouter
+            logging-infra: eventrouter
+            provider: openshift
+          name: logging-eventrouter
+        spec:
+          serviceAccount: aggregated-logging-eventrouter
+          serviceAccountName: aggregated-logging-eventrouter
+          containers:
+            - name: kube-eventrouter
+              image: ${IMAGE}
+              imagePullPolicy: IfNotPresent
+              resources:
+                limits:
+                  memory: ${MEMORY} 
+                  cpu: ${CPU}
+                requires:
+                  memory: ${MEMORY}
+              volumeMounts:
+              - name: config-volume
+                mountPath: /etc/eventrouter
+          volumes:
+            - name: config-volume
+              configMap:
+                name: logging-eventrouter
+  - kind: ClusterRoleBinding
+    apiVersion: v1
+    metadata:
+      name: event-reader-binding
+    subjects:
+    - kind: ServiceAccount
+      name: aggregated-logging-eventrouter
+      namespace: ${NAMESPACE}
+    roleRef:
+      kind: ClusterRole
+      name: event-reader
+
+parameters:
+  - name: SINK
+    displayName: Sink
+    value: stdout
+  - name: REPLICAS
+    displayName: Replicas
+    value: "1"
+  - name: IMAGE
+    displayName: Image
+    value: "docker.io/openshift/origin-logging-eventrouter:latest"
+  - name: MEMORY
+    displayName: Memory
+    value: "128Mi"
+  - name: CPU
+    displayName: CPU
+    value: "100m"
+  - name: NAMESPACE
+    displayName: Namespace
+    value: default

+ 1 - 1
roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml

@@ -23,7 +23,7 @@
 # create EventRouter deployment config
 - name: Generate EventRouter template
   template:
-    src: eventrouter-template.j2
+    src: "{{ __base_file_dir }}/eventrouter-template.j2"
     dest: "{{ tempdir }}/templates/eventrouter-template.yaml"
   vars:
     node_selector: "{{ openshift_logging_eventrouter_nodeselector | default({}) }}"

roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 → roles/openshift_logging_eventrouter/templates/5.x/eventrouter-template.j2


roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml → roles/openshift_logging_fluentd/files/2.x/fluentd-throttle-config.yaml


roles/openshift_logging_mux/files/secure-forward.conf → roles/openshift_logging_fluentd/files/2.x/secure-forward.conf


+ 7 - 0
roles/openshift_logging_fluentd/files/5.x/fluentd-throttle-config.yaml

@@ -0,0 +1,7 @@
+# Logging example fluentd throttling config file
+
+#example-project:
+#  read_lines_limit: 10
+#
+#.operations:
+#  read_lines_limit: 100

roles/openshift_logging_fluentd/files/secure-forward.conf → roles/openshift_logging_fluentd/files/5.x/secure-forward.conf


+ 4 - 4
roles/openshift_logging_fluentd/tasks/main.yaml

@@ -104,17 +104,17 @@
 
 # create Fluentd configmap
 - template:
-    src: fluent.conf.j2
+    src: "{{ __base_file_dir }}/fluent.conf.j2"
     dest: "{{ tempdir }}/fluent.conf"
   vars:
     deploy_type: "{{ openshift_logging_fluentd_deployment_type }}"
 
 - copy:
-    src: fluentd-throttle-config.yaml
+    src: "{{ __base_file_dir }}/fluentd-throttle-config.yaml"
     dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
 
 - copy:
-    src: secure-forward.conf
+    src: "{{ __base_file_dir }}/secure-forward.conf"
     dest: "{{ tempdir }}/secure-forward.conf"
 
 - import_role:
@@ -161,7 +161,7 @@
 # TODO: pass in aggregation configurations
 - name: Generate logging-fluentd daemonset definition
   template:
-    src: fluentd.j2
+    src: "{{ __base_file_dir }}/fluentd.j2"
     dest: "{{ tempdir }}/templates/logging-fluentd.yaml"
   vars:
     daemonset_name: logging-fluentd

roles/openshift_logging_fluentd/templates/fluent.conf.j2 → roles/openshift_logging_fluentd/templates/2.x/fluent.conf.j2


roles/openshift_logging_fluentd/templates/fluentd.j2 → roles/openshift_logging_fluentd/templates/2.x/fluentd.j2


+ 80 - 0
roles/openshift_logging_fluentd/templates/5.x/fluent.conf.j2

@@ -0,0 +1,80 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+{% if deploy_type in ['hosted', 'secure-aggregator'] %}
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+{% else %}
+<source>
+  @type secure_forward
+  @label @INGRESS
+
+  self_hostname ${HOSTNAME}
+  bind 0.0.0.0
+  port {{openshift_logging_fluentd_aggregating_port}}
+
+  shared_key {{openshift_logging_fluentd_shared_key}}
+
+  secure {{openshift_logging_fluentd_aggregating_secure}}
+  enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+  ca_cert_path        {{openshift_logging_fluentd_aggregating_cert_path}}
+  ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+  ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+  <client>
+    host {{openshift_logging_fluentd_aggregating_host}}
+  </client>
+</source>
+{% endif %}
+
+<label @INGRESS>
+{% if deploy_type in ['hosted', 'secure-host'] %}
+## filters
+  @include configs.d/openshift/filter-pre-*.conf
+  @include configs.d/openshift/filter-retag-journal.conf
+  @include configs.d/openshift/filter-k8s-meta.conf
+  @include configs.d/openshift/filter-kibana-transform.conf
+  @include configs.d/openshift/filter-k8s-flatten-hash.conf
+  @include configs.d/openshift/filter-k8s-record-transform.conf
+  @include configs.d/openshift/filter-syslog-record-transform.conf
+  @include configs.d/openshift/filter-viaq-data-model.conf
+  @include configs.d/openshift/filter-post-*.conf
+##
+</label>
+
+<label @OUTPUT>
+## matches
+  @include configs.d/openshift/output-pre-*.conf
+  @include configs.d/openshift/output-operations.conf
+  @include configs.d/openshift/output-applications.conf
+  # no post - applications.conf matches everything left
+##
+{% else %}
+  <match **>
+    @type secure_forward
+
+    self_hostname ${HOSTNAME}
+    shared_key {{openshift_logging_fluentd_shared_key}}
+
+    secure {{openshift_logging_fluentd_aggregating_secure}}
+    enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+    ca_cert_path        {{openshift_logging_fluentd_aggregating_cert_path}}
+    ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+    ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+    <server>
+      host {{openshift_logging_fluentd_aggregating_host}}
+      port {{openshift_logging_fluentd_aggregating_port}}
+    </server>
+  </match>
+{% endif %}
+</label>

+ 249 - 0
roles/openshift_logging_fluentd/templates/5.x/fluentd.j2

@@ -0,0 +1,249 @@
+apiVersion: extensions/v1beta1
+kind: "DaemonSet"
+metadata:
+  name: "{{ daemonset_name }}"
+  labels:
+    provider: openshift
+    component: "{{ daemonset_component }}"
+    logging-infra: "{{ daemonset_component }}"
+spec:
+  selector:
+    matchLabels:
+      provider: openshift
+      component: "{{ daemonset_component }}"
+  updateStrategy:
+    type: RollingUpdate
+    rollingUpdate:
+      minReadySeconds: 600
+  template:
+    metadata:
+      name: "{{ daemonset_container_name }}"
+      labels:
+        logging-infra: "{{ daemonset_component }}"
+        provider: openshift
+        component: "{{ daemonset_component }}"
+    spec:
+      serviceAccountName: "{{ daemonset_serviceAccount }}"
+      nodeSelector:
+        {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}"
+      containers:
+      - name: "{{ daemonset_container_name }}"
+        image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}"
+        imagePullPolicy: IfNotPresent
+        securityContext:
+          privileged: true
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
+        resources:
+{%   if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %}
+          limits:
+{%     if fluentd_cpu_limit is not none %}
+            cpu: "{{fluentd_cpu_limit}}"
+{%     endif %}
+{%     if fluentd_memory_limit is not none %}
+            memory: "{{fluentd_memory_limit}}"
+{%     endif %}
+{%   endif %}
+{%   if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
+          requests:
+{%     if fluentd_cpu_request is not none %}
+            cpu: "{{fluentd_cpu_request}}"
+{%     endif %}
+{%     if fluentd_memory_limit is not none %}
+            memory: "{{fluentd_memory_limit}}"
+{%     endif %}
+{%   endif %}
+{% endif %}
+        volumeMounts:
+        - name: runlogjournal
+          mountPath: /run/log/journal
+        - name: varlog
+          mountPath: /var/log
+        - name: varlibdockercontainers
+          mountPath: /var/lib/docker/containers
+          readOnly: true
+        - name: config
+          mountPath: /etc/fluent/configs.d/user
+          readOnly: true
+        - name: certs
+          mountPath: /etc/fluent/keys
+          readOnly: true
+        - name: dockerhostname
+          mountPath: /etc/docker-hostname
+          readOnly: true
+        - name: localtime
+          mountPath: /etc/localtime
+          readOnly: true
+        - name: dockercfg
+          mountPath: /etc/sysconfig/docker
+          readOnly: true
+        - name: dockerdaemoncfg
+          mountPath: /etc/docker
+          readOnly: true
+        - name: filebufferstorage
+          mountPath: /var/lib/fluentd
+{% if openshift_logging_mux_client_mode is defined and
+     ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+      (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
+        - name: muxcerts
+          mountPath: /etc/fluent/muxkeys
+          readOnly: true
+{% endif %}
+        env:
+        - name: "K8S_HOST_URL"
+          value: "{{ openshift_logging_fluentd_master_url }}"
+        - name: "ES_HOST"
+          value: "{{ app_host }}"
+        - name: "ES_PORT"
+          value: "{{ app_port }}"
+        - name: "ES_CLIENT_CERT"
+          value: "{{ openshift_logging_fluentd_app_client_cert }}"
+        - name: "ES_CLIENT_KEY"
+          value: "{{ openshift_logging_fluentd_app_client_key }}"
+        - name: "ES_CA"
+          value: "{{ openshift_logging_fluentd_app_ca }}"
+        - name: "OPS_HOST"
+          value: "{{ ops_host }}"
+        - name: "OPS_PORT"
+          value: "{{ ops_port }}"
+        - name: "OPS_CLIENT_CERT"
+          value: "{{ openshift_logging_fluentd_ops_client_cert }}"
+        - name: "OPS_CLIENT_KEY"
+          value: "{{ openshift_logging_fluentd_ops_client_key }}"
+        - name: "OPS_CA"
+          value: "{{ openshift_logging_fluentd_ops_ca }}"
+        - name: "JOURNAL_SOURCE"
+          value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
+        - name: "JOURNAL_READ_FROM_HEAD"
+          value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}"
+        - name: "BUFFER_QUEUE_LIMIT"
+          value: "{{ openshift_logging_fluentd_buffer_queue_limit }}"
+        - name: "BUFFER_SIZE_LIMIT"
+          value: "{{ openshift_logging_fluentd_buffer_size_limit }}"
+        - name: "FLUENTD_CPU_LIMIT"
+          valueFrom:
+            resourceFieldRef:
+              containerName: "{{ daemonset_container_name }}"
+              resource: limits.cpu
+        - name: "FLUENTD_MEMORY_LIMIT"
+          valueFrom:
+            resourceFieldRef:
+              containerName: "{{ daemonset_container_name }}"
+              resource: limits.memory
+        - name: "FILE_BUFFER_LIMIT"
+          value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256Mi') }}"
+{% if openshift_logging_mux_client_mode is defined and
+     ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+      (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
+        - name: "MUX_CLIENT_MODE"
+          value: "{{ openshift_logging_mux_client_mode }}"
+{% endif %}
+{% if openshift_logging_install_eventrouter is defined and openshift_logging_install_eventrouter %}
+        - name: "TRANSFORM_EVENTS"
+          value: "true"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog is defined and openshift_logging_fluentd_remote_syslog %}
+        - name: USE_REMOTE_SYSLOG
+          value: "true"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_host is defined %}
+        - name: REMOTE_SYSLOG_HOST
+          value: "{{ openshift_logging_fluentd_remote_syslog_host }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_port is defined %}
+        - name: REMOTE_SYSLOG_PORT
+          value: "{{ openshift_logging_fluentd_remote_syslog_port }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_severity is defined %}
+        - name: REMOTE_SYSLOG_SEVERITY
+          value: "{{ openshift_logging_fluentd_remote_syslog_severity }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_facility is defined %}
+        - name: REMOTE_SYSLOG_FACILITY
+          value: "{{ openshift_logging_fluentd_remote_syslog_facility }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_remove_tag_prefix is defined %}
+        - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX
+          value: "{{ openshift_logging_fluentd_remote_syslog_remove_tag_prefix }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_tag_key is defined %}
+        - name: REMOTE_SYSLOG_TAG_KEY
+          value: "{{ openshift_logging_fluentd_remote_syslog_tag_key }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_use_record is defined %}
+        - name: REMOTE_SYSLOG_USE_RECORD
+          value: "{{ openshift_logging_fluentd_remote_syslog_use_record }}"
+{% endif %}
+
+{% if openshift_logging_fluentd_remote_syslog_payload_key is defined %}
+        - name: REMOTE_SYSLOG_PAYLOAD_KEY
+          value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}"
+{% endif %}
+
+{% if audit_container_engine %}
+        - name: "AUDIT_CONTAINER_ENGINE"
+          value: "{{ audit_container_engine | lower }}"
+{% endif %}
+
+{% if audit_container_engine %}
+        - name: "NODE_NAME"
+          valueFrom:
+            fieldRef:
+              fieldPath: spec.nodeName
+{% endif %}
+
+{% if audit_log_file != '' %}
+        - name: AUDIT_FILE
+          value: "{{ audit_log_file }}"
+{% endif %}
+
+{% if audit_pos_log_file != '' %}
+        - name: AUDIT_POS_FILE
+          value: "{{ audit_pos_log_file }}"
+{% endif %}
+
+      volumes:
+      - name: runlogjournal
+        hostPath:
+          path: /run/log/journal
+      - name: varlog
+        hostPath:
+          path: /var/log
+      - name: varlibdockercontainers
+        hostPath:
+          path: /var/lib/docker/containers
+      - name: config
+        configMap:
+          name: logging-fluentd
+      - name: certs
+        secret:
+          secretName: logging-fluentd
+      - name: dockerhostname
+        hostPath:
+          path: /etc/hostname
+      - name: localtime
+        hostPath:
+          path: /etc/localtime
+      - name: dockercfg
+        hostPath:
+          path: /etc/sysconfig/docker
+      - name: dockerdaemoncfg
+        hostPath:
+          path: /etc/docker
+{% if openshift_logging_mux_client_mode is defined and
+     ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+      (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
+      - name: muxcerts
+        secret:
+          secretName: logging-mux
+{% endif %}
+      - name: filebufferstorage
+        hostPath:
+          path: "/var/lib/fluentd"

+ 3 - 3
roles/openshift_logging_kibana/tasks/main.yaml

@@ -133,7 +133,7 @@
 
 - name: Generating Kibana route template
   template:
-    src: route_reencrypt.j2
+    src: "{{ __base_file_dir }}/route_reencrypt.j2"
     dest: "{{ tempdir }}/templates/kibana-route.yaml"
   vars:
     obj_name: "{{ kibana_name }}"
@@ -174,7 +174,7 @@
 # create oauth client
 - name: Create oauth-client template
   template:
-    src: oauth-client.j2
+    src: "{{ __base_file_dir }}/oauth-client.j2"
     dest: "{{ tempdir }}/templates/oauth-client.yml"
   vars:
     kibana_hostnames: "{{ proxy_hostnames | unique }}"
@@ -233,7 +233,7 @@
 # create Kibana DC
 - name: Generate Kibana DC template
   template:
-    src: kibana.j2
+    src: "{{ __base_file_dir }}/kibana.j2"
     dest: "{{ tempdir }}/templates/kibana-dc.yaml"
   vars:
     component: "{{ kibana_component }}"

roles/openshift_logging_kibana/templates/kibana.j2 → roles/openshift_logging_kibana/templates/2.x/kibana.j2


roles/openshift_logging_kibana/templates/oauth-client.j2 → roles/openshift_logging_kibana/templates/2.x/oauth-client.j2


+ 36 - 0
roles/openshift_logging_kibana/templates/2.x/route_reencrypt.j2

@@ -0,0 +1,36 @@
+apiVersion: "v1"
+kind: "Route"
+metadata:
+  name: "{{obj_name}}"
+{% if labels is defined%}
+  labels:
+{% for key, value in labels.items() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+  host: {{ route_host }}
+  tls:
+{% if tls_key is defined and tls_key | length > 0 %}
+    key: |
+{{ tls_key|indent(6, true) }}
+{% if tls_cert is defined and tls_cert | length > 0 %}
+    certificate: |
+{{ tls_cert|indent(6, true) }}
+{% endif %}
+{% endif %}
+    caCertificate: |
+{% for line in tls_ca_cert.split('\n') %}
+      {{ line }}
+{% endfor %}
+    destinationCACertificate: |
+{% for line in tls_dest_ca_cert.split('\n') %}
+      {{ line }}
+{% endfor %}
+    termination: reencrypt
+{% if edge_term_policy is defined and edge_term_policy | length > 0 %}
+    insecureEdgeTerminationPolicy: {{ edge_term_policy }}
+{% endif %}
+  to:
+    kind: Service
+    name: {{ service_name }}

+ 170 - 0
roles/openshift_logging_kibana/templates/5.x/kibana.j2

@@ -0,0 +1,170 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+  name: "{{ deploy_name }}"
+  labels:
+    provider: openshift
+    component: "{{ component }}"
+    logging-infra: "{{ logging_component }}"
+spec:
+  replicas: {{ kibana_replicas | default(1) }}
+  selector:
+    provider: openshift
+    component: "{{ component }}"
+    logging-infra: "{{ logging_component }}"
+  strategy:
+    rollingParams:
+      intervalSeconds: 1
+      timeoutSeconds: 600
+      updatePeriodSeconds: 1
+    type: Rolling
+  template:
+    metadata:
+      name: "{{ deploy_name }}"
+      labels:
+        logging-infra: "{{ logging_component }}"
+        provider: openshift
+        component: "{{ component }}"
+    spec:
+      serviceAccountName: aggregated-logging-kibana
+{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
+      nodeSelector:
+{% for key, value in kibana_node_selector.items() %}
+        {{ key }}: "{{ value }}"
+{% endfor %}
+{% endif %}
+      containers:
+        -
+          name: "kibana"
+          image: {{ image }}
+          imagePullPolicy: IfNotPresent
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
+          resources:
+{%   if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
+            limits:
+{%     if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+              cpu: "{{ kibana_cpu_limit }}"
+{%     endif %}
+{%     if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+              memory: "{{ kibana_memory_limit }}"
+{%     endif %}
+{%   endif %}
+{%   if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
+            requests:
+{%     if kibana_cpu_request is not none and kibana_cpu_request != "" %}
+              cpu: "{{ kibana_cpu_request }}"
+{%     endif %}
+{%     if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+              memory: "{{ kibana_memory_limit }}"
+{%     endif %}
+{%   endif %}
+{% endif %}
+          env:
+            - name: "ES_URL"
+              value: "https://{{ es_host }}:{{ es_port }}"
+            -
+              name: "KIBANA_MEMORY_LIMIT"
+              valueFrom:
+                resourceFieldRef:
+                  containerName: kibana
+                  resource: limits.memory
+          volumeMounts:
+            - name: kibana
+              mountPath: /etc/kibana/keys
+              readOnly: true
+          readinessProbe:
+            exec:
+              command:
+              - "/usr/share/kibana/probe/readiness.sh"
+            initialDelaySeconds: 5
+            timeoutSeconds: 4
+            periodSeconds: 5
+        -
+          name: "kibana-proxy"
+          image: {{ proxy_image }}
+          imagePullPolicy: IfNotPresent
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
+          resources:
+{%   if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
+            limits:
+{%     if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+              cpu: "{{ kibana_proxy_cpu_limit }}"
+{%     endif %}
+{%     if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+              memory: "{{ kibana_proxy_memory_limit }}"
+{%     endif %}
+{%   endif %}
+{%   if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
+            requests:
+{%     if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %}
+              cpu: "{{ kibana_proxy_cpu_request }}"
+{%     endif %}
+{%     if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+              memory: "{{ kibana_proxy_memory_limit }}"
+{%     endif %}
+{%   endif %}
+{% endif %}
+          ports:
+            -
+              name: "oaproxy"
+              containerPort: 3000
+          env:
+            -
+             name: "OAP_BACKEND_URL"
+             value: "http://localhost:5601"
+            -
+             name: "OAP_AUTH_MODE"
+             value: "oauth2"
+            -
+             name: "OAP_TRANSFORM"
+             value: "user_header,token_header"
+            -
+             name: "OAP_OAUTH_ID"
+             value: kibana-proxy
+            -
+             name: "OAP_MASTER_URL"
+             value: {{ openshift_logging_kibana_master_url }}
+            -
+             name: "OAP_PUBLIC_MASTER_URL"
+             value: {{ openshift_logging_kibana_master_public_url }}
+            -
+             name: "OAP_LOGOUT_REDIRECT"
+             value: {{ openshift_logging_kibana_master_public_url }}/console/logout
+            -
+             name: "OAP_MASTER_CA_FILE"
+             value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+            -
+             name: "OAP_DEBUG"
+             value: "{{ openshift_logging_kibana_proxy_debug }}"
+            -
+             name: "OAP_OAUTH_SECRET_FILE"
+             value: "/secret/oauth-secret"
+            -
+             name: "OAP_SERVER_CERT_FILE"
+             value: "/secret/server-cert"
+            -
+             name: "OAP_SERVER_KEY_FILE"
+             value: "/secret/server-key"
+            -
+             name: "OAP_SERVER_TLS_FILE"
+             value: "/secret/server-tls.json"
+            -
+             name: "OAP_SESSION_SECRET_FILE"
+             value: "/secret/session-secret"
+            -
+             name: "OCP_AUTH_PROXY_MEMORY_LIMIT"
+             valueFrom:
+               resourceFieldRef:
+                 containerName: kibana-proxy
+                 resource: limits.memory
+          volumeMounts:
+            - name: kibana-proxy
+              mountPath: /secret
+              readOnly: true
+      volumes:
+        - name: kibana
+          secret:
+            secretName: logging-kibana
+        - name: kibana-proxy
+          secret:
+            secretName: logging-kibana-proxy

+ 16 - 0
roles/openshift_logging_kibana/templates/5.x/oauth-client.j2

@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: OAuthClient
+metadata:
+  name: kibana-proxy
+  labels:
+    logging-infra: support
+secret: {{ secret }}
+redirectURIs:
+{% for host in kibana_hostnames %}
+- {{ host }}
+{% endfor %}
+scopeRestrictions:
+- literals:
+  - user:info
+  - user:check-access
+  - user:list-projects

+ 36 - 0
roles/openshift_logging_kibana/templates/5.x/route_reencrypt.j2

@@ -0,0 +1,36 @@
+apiVersion: "v1"
+kind: "Route"
+metadata:
+  name: "{{obj_name}}"
+{% if labels is defined%}
+  labels:
+{% for key, value in labels.items() %}
+    {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+  host: {{ route_host }}
+  tls:
+{% if tls_key is defined and tls_key | length > 0 %}
+    key: |
+{{ tls_key|indent(6, true) }}
+{% if tls_cert is defined and tls_cert | length > 0 %}
+    certificate: |
+{{ tls_cert|indent(6, true) }}
+{% endif %}
+{% endif %}
+    caCertificate: |
+{% for line in tls_ca_cert.split('\n') %}
+      {{ line }}
+{% endfor %}
+    destinationCACertificate: |
+{% for line in tls_dest_ca_cert.split('\n') %}
+      {{ line }}
+{% endfor %}
+    termination: reencrypt
+{% if edge_term_policy is defined and edge_term_policy | length > 0 %}
+    insecureEdgeTerminationPolicy: {{ edge_term_policy }}
+{% endif %}
+  to:
+    kind: Service
+    name: {{ service_name }}

roles/openshift_logging_mux/files/fluent.conf → roles/openshift_logging_mux/files/2.x/fluent.conf


+ 26 - 0
roles/openshift_logging_mux/files/2.x/secure-forward.conf

@@ -0,0 +1,26 @@
+# <store>
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+  # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+  # or IP
+#   host server.fqdn.example.com
+#   port 24284
+# </server>
+# <server>
+  # ip address to connect
+#   host 203.0.113.8
+  # specify hostlabel for FQDN verification if ipaddress is used for host
+#   hostlabel server.fqdn.example.com
+# </server>
+# </store>

+ 37 - 0
roles/openshift_logging_mux/files/5.x/fluent.conf

@@ -0,0 +1,37 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+
+<label @INGRESS>
+## filters
+  @include configs.d/openshift/filter-pre-*.conf
+  @include configs.d/openshift/filter-retag-journal.conf
+  @include configs.d/openshift/filter-k8s-meta.conf
+  @include configs.d/openshift/filter-kibana-transform.conf
+  @include configs.d/openshift/filter-k8s-flatten-hash.conf
+  @include configs.d/openshift/filter-k8s-record-transform.conf
+  @include configs.d/openshift/filter-syslog-record-transform.conf
+  @include configs.d/openshift/filter-viaq-data-model.conf
+  @include configs.d/openshift/filter-post-*.conf
+##
+</label>
+
+<label @OUTPUT>
+## matches
+  @include configs.d/openshift/output-pre-*.conf
+  @include configs.d/openshift/output-operations.conf
+  @include configs.d/openshift/output-applications.conf
+  # no post - applications.conf matches everything left
+##
+</label>

+ 26 - 0
roles/openshift_logging_mux/files/5.x/secure-forward.conf

@@ -0,0 +1,26 @@
+# <store>
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+  # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+  # or IP
+#   host server.fqdn.example.com
+#   port 24284
+# </server>
+# <server>
+  # ip address to connect
+#   host 203.0.113.8
+  # specify hostlabel for FQDN verification if ipaddress is used for host
+#   hostlabel server.fqdn.example.com
+# </server>
+# </store>

+ 3 - 3
roles/openshift_logging_mux/tasks/main.yaml

@@ -86,12 +86,12 @@
 
 # create Mux configmap
 - copy:
-    src: fluent.conf
+    src: "{{ __base_file_dir }}/fluent.conf"
     dest: "{{mktemp.stdout}}/fluent-mux.conf"
   changed_when: no
 
 - copy:
-    src: secure-forward.conf
+    src: "{{ __base_file_dir }}/secure-forward.conf"
     dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
   changed_when: no
 
@@ -170,7 +170,7 @@
 # create Mux DC
 - name: Generating mux deploymentconfig
   template:
-    src: mux.j2
+    src: "{{ __base_file_dir }}/mux.j2"
     dest: "{{mktemp.stdout}}/templates/logging-mux-dc.yaml"
   vars:
     component: mux

roles/openshift_logging_mux/templates/mux.j2 → roles/openshift_logging_mux/templates/2.x/mux.j2


+ 202 - 0
roles/openshift_logging_mux/templates/5.x/mux.j2

@@ -0,0 +1,202 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+  name: "{{deploy_name}}"
+  labels:
+    provider: openshift
+    component: "{{component}}"
+    logging-infra: "{{logging_component}}"
+spec:
+  replicas: {{mux_replicas|default(1)}}
+  selector:
+    provider: openshift
+    component: "{{component}}"
+    logging-infra: "{{logging_component}}"
+  strategy:
+    rollingParams:
+      intervalSeconds: 1
+      timeoutSeconds: 600
+      updatePeriodSeconds: 1
+    type: Rolling
+  template:
+    metadata:
+      name: "{{deploy_name}}"
+      labels:
+        logging-infra: "{{logging_component}}"
+        provider: openshift
+        component: "{{component}}"
+    spec:
+      serviceAccountName: aggregated-logging-mux
+{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
+      nodeSelector:
+{% for key, value in mux_node_selector.items() %}
+        {{key}}: "{{value}}"
+{% endfor %}
+{% endif %}
+      containers:
+      - name: "mux"
+        image: {{image}}
+        imagePullPolicy: IfNotPresent
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
+        resources:
+{%   if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+          limits:
+{%     if mux_cpu_limit is not none %}
+            cpu: "{{mux_cpu_limit}}"
+{%     endif %}
+{%     if mux_memory_limit is not none %}
+            memory: "{{mux_memory_limit}}"
+{%     endif %}
+{%   endif %}
+{%   if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
+          requests:
+{%     if mux_cpu_request is not none %}
+            cpu: "{{mux_cpu_request}}"
+{%     endif %}
+{%     if mux_memory_limit is not none %}
+            memory: "{{mux_memory_limit}}"
+{%     endif %}
+{%   endif %}
+{% endif %}
+        ports:
+        - containerPort: {{ openshift_logging_mux_port }}
+          name: mux-forward
+        volumeMounts:
+        - name: config
+          mountPath: /etc/fluent/configs.d/user
+          readOnly: true
+        - name: certs
+          mountPath: /etc/fluent/keys
+          readOnly: true
+        - name: dockerhostname
+          mountPath: /etc/docker-hostname
+          readOnly: true
+        - name: localtime
+          mountPath: /etc/localtime
+          readOnly: true
+        - name: muxcerts
+          mountPath: /etc/fluent/muxkeys
+          readOnly: true
+        - name: filebufferstorage
+          mountPath: /var/lib/fluentd
+        env:
+        - name: "K8S_HOST_URL"
+          value: "{{openshift_logging_mux_master_url}}"
+        - name: "ES_HOST"
+          value: "{{openshift_logging_mux_app_host}}"
+        - name: "ES_PORT"
+          value: "{{openshift_logging_mux_app_port}}"
+        - name: "ES_CLIENT_CERT"
+          value: "{{openshift_logging_mux_app_client_cert}}"
+        - name: "ES_CLIENT_KEY"
+          value: "{{openshift_logging_mux_app_client_key}}"
+        - name: "ES_CA"
+          value: "{{openshift_logging_mux_app_ca}}"
+        - name: "OPS_HOST"
+          value: "{{openshift_logging_mux_ops_host}}"
+        - name: "OPS_PORT"
+          value: "{{openshift_logging_mux_ops_port}}"
+        - name: "OPS_CLIENT_CERT"
+          value: "{{openshift_logging_mux_ops_client_cert}}"
+        - name: "OPS_CLIENT_KEY"
+          value: "{{openshift_logging_mux_ops_client_key}}"
+        - name: "OPS_CA"
+          value: "{{openshift_logging_mux_ops_ca}}"
+        - name: "JOURNAL_SOURCE"
+          value: "{{openshift_logging_mux_journal_source | default('')}}"
+        - name: "JOURNAL_READ_FROM_HEAD"
+          value: "{{openshift_logging_mux_journal_read_from_head|lower}}"
+        - name: FORWARD_LISTEN_HOST
+          value: "{{ openshift_logging_mux_hostname }}"
+        - name: FORWARD_LISTEN_PORT
+          value: "{{ openshift_logging_mux_port }}"
+        - name: USE_MUX
+          value: "true"
+        - name: "BUFFER_QUEUE_LIMIT"
+          value: "{{ openshift_logging_mux_buffer_queue_limit }}"
+        - name: "BUFFER_SIZE_LIMIT"
+          value: "{{ openshift_logging_mux_buffer_size_limit }}"
+        - name: "MUX_CPU_LIMIT"
+          valueFrom:
+            resourceFieldRef:
+              containerName: "mux"
+              resource: limits.cpu
+        - name: "MUX_MEMORY_LIMIT"
+          valueFrom:
+            resourceFieldRef:
+              containerName: "mux"
+              resource: limits.memory
+        - name: "FILE_BUFFER_LIMIT"
+          value: "{{ openshift_logging_mux_file_buffer_limit | default('2Gi') }}"
+
+{% if openshift_logging_mux_remote_syslog is defined and openshift_logging_mux_remote_syslog %}
+        - name: USE_REMOTE_SYSLOG
+          value: "true"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_host is defined %}
+        - name: REMOTE_SYSLOG_HOST
+          value: "{{ openshift_logging_mux_remote_syslog_host }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_port is defined %}
+        - name: REMOTE_SYSLOG_PORT
+          value: "{{ openshift_logging_mux_remote_syslog_port }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_severity is defined %}
+        - name: REMOTE_SYSLOG_SEVERITY
+          value: "{{ openshift_logging_mux_remote_syslog_severity }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_facility is defined %}
+        - name: REMOTE_SYSLOG_FACILITY
+          value: "{{ openshift_logging_mux_remote_syslog_facility }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_remove_tag_prefix is defined %}
+        - name: REMOTE_SYSLOG_REMOVE_TAG_PREFIX
+          value: "{{ openshift_logging_mux_remote_syslog_remove_tag_prefix }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_tag_key is defined %}
+        - name: REMOTE_SYSLOG_TAG_KEY
+          value: "{{ openshift_logging_mux_remote_syslog_tag_key }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_use_record is defined %}
+        - name: REMOTE_SYSLOG_USE_RECORD
+          value: "{{ openshift_logging_mux_remote_syslog_use_record }}"
+{% endif %}
+
+{% if openshift_logging_mux_remote_syslog_payload_key is defined %}
+        - name: REMOTE_SYSLOG_PAYLOAD_KEY
+          value: "{{ openshift_logging_mux_remote_syslog_payload_key }}"
+{% endif %}
+
+      volumes:
+      - name: config
+        configMap:
+          name: logging-mux
+      - name: certs
+        secret:
+          secretName: logging-fluentd
+      - name: dockerhostname
+        hostPath:
+          path: /etc/hostname
+      - name: localtime
+        hostPath:
+          path: /etc/localtime
+      - name: muxcerts
+        secret:
+          secretName: logging-mux
+      - name: filebufferstorage
+{% if openshift_logging_mux_file_buffer_storage_type == 'pvc' %}
+        persistentVolumeClaim:
+          claimName: {{ openshift_logging_mux_file_buffer_pvc_name }}
+{% elif openshift_logging_mux_file_buffer_storage_type == 'hostmount' %}
+        hostPath:
+          path: "/var/log/fluentd"
+{% else %}
+        emptydir: {}
+{% endif %}