Browse Source

Merge pull request #1638 from JayKayy/logging

Adding openshift_efk role
Brenton Leanhardt 9 years ago
parent
commit
1436e46e0b

+ 6 - 0
playbooks/adhoc/openshift_hosted_logging_efk.yaml

@@ -0,0 +1,6 @@
+---
+- hosts: masters[0]
+  roles:
+  - role: openshift_hosted_logging
+    openshift_hosted_logging_cleanup: no 
+

+ 10 - 0
roles/openshift_hosted_logging/README.md

@@ -0,0 +1,10 @@
+###Required vars:
+
+- openshift_hosted_logging_hostname: kibana.example.com
+- openshift_hosted_logging_elasticsearch_cluster_size: 1
+- openshift_hosted_logging_master_public_url: https://localhost:8443
+
+###Optional vars:
+- openshift_hosted_logging_secret_vars: (defaults to nothing=/dev/null) kibana.crt=/etc/origin/master/ca.crt kibana.key=/etc/origin/master/ca.key ca.crt=/etc/origin/master/ca.crt ca.key=/etc/origin/master/ca.key
+- openshift_hosted_logging_fluentd_replicas: (defaults to 1) 3
+- openshift_hosted_logging_cleanup: (defaults to no) Set this to 'yes' in order to cleanup logging components instead of deploying.

+ 6 - 0
roles/openshift_hosted_logging/files/logging-deployer-sa.yaml

@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: logging-deployer
+secrets:
+- name: logging-deployer

+ 3 - 0
roles/openshift_hosted_logging/meta/main.yaml

@@ -0,0 +1,3 @@
+---
+dependencies:
+  - { role: openshift_common }

+ 59 - 0
roles/openshift_hosted_logging/tasks/cleanup_logging.yaml

@@ -0,0 +1,59 @@
+---
+  - name: Create temp directory for kubeconfig
+    command: mktemp -d /tmp/openshift-ansible-XXXXXX
+    register: mktemp
+    changed_when: False
+
+  - name: Copy the admin client config(s)
+    command: >
+      cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+    changed_when: False
+
+  - name: "Checking for logging project"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project logging"
+    register: logging_project
+    failed_when: "'FAILED' in logging_project.stderr"
+
+  - name: "Changing projects"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+
+
+  - name: "Cleanup any previous logging infrastructure"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all --selector logging-infra={{ item }}"
+    with_items:
+      - kibana
+      - fluentd
+      - elasticsearch
+    ignore_errors: yes
+
+  - name: "Cleanup existing support infrastructure"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete --ignore-not-found all,sa,oauthclient --selector logging-infra=support"
+    ignore_errors: yes
+
+  - name: "Cleanup existing secrets"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete secret logging-fluentd logging-elasticsearch logging-es-proxy logging-kibana logging-kibana-proxy logging-kibana-ops-proxy"
+    ignore_errors: yes
+    register: clean_result
+    failed_when: clean_result.rc == 1 and 'not found' not in clean_result.stderr
+
+  - name: "Cleanup existing logging deployers"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete pods --all"
+
+
+  - name: "Cleanup logging project"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete project logging"
+
+
+  - name: "Remove deployer template"
+    command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete template logging-deployer-template -n openshift"
+    register: delete_ouput
+    failed_when: delete_ouput.rc == 1 and 'exists' not in delete_ouput.stderr
+
+
+  - name: Delete temp directory
+    file:
+      name: "{{ mktemp.stdout }}"
+      state: absent
+    changed_when: False
+
+  - debug: msg="Success!"

+ 105 - 0
roles/openshift_hosted_logging/tasks/deploy_logging.yaml

@@ -0,0 +1,105 @@
+---
+  - fail: msg="This role requires the following vars to be defined. openshift_hosted_logging_master_public_url, openshift_hosted_logging_hostname, openshift_hosted_logging_elasticsearch_cluster_size"
+    when: "openshift_hosted_logging_hostname is not defined or
+          openshift_hosted_logging_elasticsearch_cluster_size is not defined or
+          openshift_hosted_logging_master_public_url is not defined"
+
+  - name: Create temp directory for kubeconfig
+    command: mktemp -d /tmp/openshift-ansible-XXXXXX
+    register: mktemp
+    changed_when: False
+
+  - name: Copy the admin client config(s)
+    command: >
+      cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+    changed_when: False
+
+  - name: "Create logging project"
+    command: {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project logging
+
+  - name: "Changing projects"
+    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig project logging"
+
+  - name: "Creating logging deployer secret"
+    command: " {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new logging-deployer {{ openshift_hosted_logging_secret_vars | default('nothing=/dev/null') }}"
+    register: secret_output
+    failed_when: "secret_output.rc == 1 and 'exists' not in secret_output.stderr"
+
+  - name: "Copy serviceAccount file"
+    copy: dest=/tmp/logging-deployer-sa.yaml
+          src={{role_path}}/files/logging-deployer-sa.yaml
+          force=yes
+
+  - name: "Create logging-deployer service account"
+    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f  /tmp/logging-deployer-sa.yaml"
+    register: deployer_output
+    failed_when: "deployer_output.rc == 1 and 'exists' not in deployer_output.stderr"
+
+  - name: "Set permissions for logging-deployer service account"
+    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-role-to-user edit system:serviceaccount:logging:logging-deployer"
+    register: permiss_output
+    failed_when: "permiss_output.rc == 1 and 'exists' not in permiss_output.stderr"
+
+  - name: "Set permissions for fluentd"
+    command: {{ openshift.common.admin_binary}} policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd
+    register: fluentd_output
+    failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+
+  - name: "Set additional permissions for fluentd"
+    command: {{ openshift.common.admin_binary}} policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd
+    register: fluentd2_output
+    failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+
+  - name: "Create deployer template"
+    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f /usr/share/openshift/examples/infrastructure-templates/enterprise/logging-deployer.yaml -n openshift"
+    register: template_output
+    failed_when: "template_output.rc == 1 and 'exists' not in template_output.stderr"
+
+  - name: "Process the deployer template"
+    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-deployer-template -n openshift -v {{ oc_process_values }} |  {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
+
+  - name: "Wait for image pull and deployer pod"
+    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get pods | grep logging-deployer.*Completed"
+    register: result
+    until: result.rc == 0
+    retries: 15
+    delay: 10
+
+  - name: "Process support template"
+    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig process logging-support-template |  {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f -"
+
+  - name: "Set insecured registry"
+    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig annotate is --all  openshift.io/image.insecureRepository=true --overwrite"
+    when: "target_registry is defined and insecure_registry == 'true'"
+
+  - name: "Wait for imagestreams to become available"
+    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get is | grep logging-fluentd"
+    register: result
+    until: result.rc == 0
+    failed_when: result.rc == 1 and 'not found' not in result.stderr
+    retries: 20
+    delay: 10
+
+  - name: "Wait for replication controllers to become available"
+    shell:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get rc | grep logging-fluentd-1"
+    register: result
+    until: result.rc == 0
+    failed_when: result.rc == 1 and 'not found' not in result.stderr
+    retries: 20
+    delay: 10
+
+
+  - name: "Scale fluentd deployment config"
+    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale dc/logging-fluentd --replicas={{ fluentd_replicas | default('1') }}"
+
+
+  - name: "Scale fluentd replication controller"
+    command:  "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale rc/logging-fluentd-1 --replicas={{ fluentd_replicas | default('1') }}"
+
+  - debug: msg="Logging components deployed. Note persistant volume for elasticsearch must be setup manually"
+
+  - name: Delete temp directory
+    file:
+      name: "{{ mktemp.stdout }}"
+      state: absent
+    changed_when: False

+ 8 - 0
roles/openshift_hosted_logging/tasks/main.yaml

@@ -0,0 +1,8 @@
+---
+- name: Cleanup logging deployment
+  include: "{{ role_path }}/tasks/cleanup_logging.yaml"
+  when: openshift_hosted_logging_cleanup | default(false) | bool
+
+- name: Deploy logging
+  include: "{{ role_path }}/tasks/deploy_logging.yaml"
+  when: not openshift_hosted_logging_cleanup | default(false) | bool

+ 6 - 0
roles/openshift_hosted_logging/vars/main.yaml

@@ -0,0 +1,6 @@
+kh_kv: "KIBANA_HOSTNAME={{ openshift_hosted_logging_hostname | quote }}"
+es_cs_kv: "ES_CLUSTER_SIZE={{ openshift_hosted_logging_elasticsearch_cluster_size | quote }}"
+pmu_kv: "PUBLIC_MASTER_URL={{ openshift_hosted_logging_master_public_url | quote }}"
+ip_kv: "{{ 'IMAGE_PREFIX=' ~ target_registry | quote if target_registry is defined else '' }}"
+oc_process_values: "{{ kh_kv }},{{ es_cs_kv }},{{ pmu_kv }},{{ ip_kv }}"
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"