فهرست منبع

Merge pull request #3135 from ewolinetz/fix_logging_jks_gen

Updating to use local_action script to generate jks certs for logging
Scott Dodson 8 سال پیش
والد
کامیت
27271874f7

+ 1 - 0
openshift-ansible.spec

@@ -18,6 +18,7 @@ Requires:      python2
 Requires:      python-six
 Requires:      tar
 Requires:      openshift-ansible-docs = %{version}-%{release}
+Requires:      java-1.8.0-openjdk-headless
 Requires:      httpd-tools
 
 %description

+ 3 - 0
roles/openshift_logging/README.md

@@ -6,6 +6,9 @@ This role is used for installing the Aggregated Logging stack. It should be run
 a single host, it will create any missing certificates and API objects that the current
 [logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does.
 
+This role requires that the control host it is run on has Java installed as part of keystore
+generation for Elasticsearch (it uses JKS) as well as openssl to sign certificates.
+
 As part of the installation, it is recommended that you add the Fluentd node selector label
 to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels).
 

+ 11 - 1
roles/openshift_logging/files/generate-jks.sh

@@ -1,6 +1,10 @@
 #! /bin/sh
 set -ex
 
+function usage() {
+  echo Usage: `basename $0` cert_directory [logging_namespace] 1>&2
+}
+
 function generate_JKS_chain() {
     dir=${SCRATCH_DIR:-_output}
     ADD_OID=$1
@@ -147,8 +151,14 @@ function createTruststore() {
     -noprompt -alias sig-ca
 }
 
-dir="$CERT_DIR"
+if [ $# -lt 1 ]; then
+  usage
+  exit 1
+fi
+
+dir=$1
 SCRATCH_DIR=$dir
+PROJECT=${2:-logging}
 
 if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then
   generate_JKS_client_cert "system.admin"

+ 2 - 76
roles/openshift_logging/tasks/generate_certs.yaml

@@ -85,82 +85,8 @@
   loop_control:
     loop_var: node_name
 
-- name: Check for jks-generator service account
-  command: >
-    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get serviceaccount/jks-generator --no-headers -n {{openshift_logging_namespace}}
-  register: serviceaccount_result
-  ignore_errors: yes
-  when: not ansible_check_mode
-  changed_when: no
-
-- name: Create jks-generator service account
-  command: >
-    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create serviceaccount jks-generator -n {{openshift_logging_namespace}}
-  when: not ansible_check_mode and "not found" in serviceaccount_result.stderr
-
-- name: Check for hostmount-anyuid scc entry
-  command: >
-    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}'
-  register: scc_result
-  when: not ansible_check_mode
-  changed_when: no
-
-- name: Add to hostmount-anyuid scc
-  command: >
-    {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}}
-  when:
-    - not ansible_check_mode
-    - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1
-
-- name: Copy JKS generation script
-  copy:
-    src: generate-jks.sh
-    dest: "{{generated_certs_dir}}/generate-jks.sh"
-  check_mode: no
-
-- name: Generate JKS pod template
-  template:
-    src: jks_pod.j2
-    dest: "{{mktemp.stdout}}/jks_pod.yaml"
-  check_mode: no
-  changed_when: no
-
-# check if pod generated files exist -- if they all do don't run the pod
-- name: Checking for elasticsearch.jks
-  stat: path="{{generated_certs_dir}}/elasticsearch.jks"
-  register: elasticsearch_jks
-  check_mode: no
-
-- name: Checking for logging-es.jks
-  stat: path="{{generated_certs_dir}}/logging-es.jks"
-  register: logging_es_jks
-  check_mode: no
-
-- name: Checking for system.admin.jks
-  stat: path="{{generated_certs_dir}}/system.admin.jks"
-  register: system_admin_jks
-  check_mode: no
-
-- name: Checking for truststore.jks
-  stat: path="{{generated_certs_dir}}/truststore.jks"
-  register: truststore_jks
-  check_mode: no
-
-- name: create JKS generation pod
-  command: >
-    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name
-  register: podoutput
-  check_mode: no
-  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
-
-- command: >
-    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}}
-  register: result
-  until: result.stdout.find("Succeeded") != -1
-  retries: 5
-  delay: 10
-  changed_when: no
-  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+- name: Creating necessary JKS certs
+  include: generate_jks.yaml
 
 # check for secret/logging-kibana-proxy
 - command: >

+ 111 - 0
roles/openshift_logging/tasks/generate_jks.yaml

@@ -0,0 +1,111 @@
+---
+# check if pod generated files exist -- if they all do don't run the pod
+- name: Checking for elasticsearch.jks
+  stat: path="{{generated_certs_dir}}/elasticsearch.jks"
+  register: elasticsearch_jks
+  check_mode: no
+
+- name: Checking for logging-es.jks
+  stat: path="{{generated_certs_dir}}/logging-es.jks"
+  register: logging_es_jks
+  check_mode: no
+
+- name: Checking for system.admin.jks
+  stat: path="{{generated_certs_dir}}/system.admin.jks"
+  register: system_admin_jks
+  check_mode: no
+
+- name: Checking for truststore.jks
+  stat: path="{{generated_certs_dir}}/truststore.jks"
+  register: truststore_jks
+  check_mode: no
+
+- name: Create temp directory for doing work in
+  local_action: command mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+  register: local_tmp
+  changed_when: False
+  check_mode: no
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+  file:
+    path: "{{local_tmp.stdout}}/elasticsearch.jks"
+    state: touch
+    mode: "u=rw,g=r,o=r"
+  when: elasticsearch_jks.stat.exists
+  changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+  file:
+    path: "{{local_tmp.stdout}}/logging-es.jks"
+    state: touch
+    mode: "u=rw,g=r,o=r"
+  when: logging_es_jks.stat.exists
+  changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+  file:
+    path: "{{local_tmp.stdout}}/system.admin.jks"
+    state: touch
+    mode: "u=rw,g=r,o=r"
+  when: system_admin_jks.stat.exists
+  changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+  file:
+    path: "{{local_tmp.stdout}}/truststore.jks"
+    state: touch
+    mode: "u=rw,g=r,o=r"
+  when: truststore_jks.stat.exists
+  changed_when: False
+
+- name: pulling down signing items from host
+  fetch:
+    src: "{{generated_certs_dir}}/{{item}}"
+    dest: "{{local_tmp.stdout}}/{{item}}"
+    flat: yes
+  with_items:
+    - ca.crt
+    - ca.key
+    - ca.serial.txt
+    - ca.crl.srl
+    - ca.db
+
+- local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf
+  vars:
+    - top_dir: "{{local_tmp.stdout}}"
+
+- name: Run JKS generation script
+  local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
+  check_mode: no
+  become: yes
+  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+  copy:
+    src: "{{local_tmp.stdout}}/elasticsearch.jks"
+    dest: "{{generated_certs_dir}}/elasticsearch.jks"
+  when: not elasticsearch_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+  copy:
+    src: "{{local_tmp.stdout}}/logging-es.jks"
+    dest: "{{generated_certs_dir}}/logging-es.jks"
+  when: not logging_es_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+  copy:
+    src: "{{local_tmp.stdout}}/system.admin.jks"
+    dest: "{{generated_certs_dir}}/system.admin.jks"
+  when: not system_admin_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+  copy:
+    src: "{{local_tmp.stdout}}/truststore.jks"
+    dest: "{{generated_certs_dir}}/truststore.jks"
+  when: not truststore_jks.stat.exists
+
+- name: Cleaning up temp dir
+  file:
+    path: "{{local_tmp.stdout}}"
+    state: absent
+  changed_when: False

+ 0 - 1
roles/openshift_logging/tasks/main.yaml

@@ -3,7 +3,6 @@
     msg: Only one Fluentd nodeselector key pair should be provided
   when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
 
-
 - name: Create temp directory for doing work in
   command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
   register: mktemp