Browse Source

Removing shell module calls and cleaning up changed

ewolinetz 8 years ago
parent
commit
55ddb4f4b9

+ 37 - 12
roles/openshift_logging/tasks/generate_certs.yaml

@@ -91,6 +91,7 @@
   register: serviceaccount_result
   ignore_errors: yes
   when: not ansible_check_mode
+  changed_when: no
 
 - name: Create jks-generator service account
   command: >
@@ -98,35 +99,59 @@
   when: not ansible_check_mode and "not found" in serviceaccount_result.stderr
 
 - name: Check for hostmount-anyuid scc entry
-  shell: >
-    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}' |
-    grep system:serviceaccount:{{openshift_logging_namespace | quote}}:jks-generator
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get scc hostmount-anyuid -o jsonpath='{.users}'
   register: scc_result
-  ignore_errors: yes
   when: not ansible_check_mode
+  changed_when: no
 
 - name: Add to hostmount-anyuid scc
   command: >
     {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig policy add-scc-to-user hostmount-anyuid -z jks-generator -n {{openshift_logging_namespace}}
-  when: not ansible_check_mode and scc_result.rc == 1
+  when:
+    - not ansible_check_mode
+    - scc_result.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:jks-generator") == -1
 
-- name: Copy jks script
+- name: Copy JKS generation script
   copy:
     src: generate-jks.sh
     dest: "{{generated_certs_dir}}/generate-jks.sh"
   check_mode: no
 
-- name: Generate JKS chains
+- name: Generate JKS pod template
   template:
     src: jks_pod.j2
     dest: "{{mktemp.stdout}}/jks_pod.yaml"
   check_mode: no
+  changed_when: no
+
+# check if pod generated files exist -- if they all do don't run the pod
+- name: Checking for elasticsearch.jks
+  stat: path="{{generated_certs_dir}}/elasticsearch.jks"
+  register: elasticsearch_jks
+  check_mode: no
+
+- name: Checking for logging-es.jks
+  stat: path="{{generated_certs_dir}}/logging-es.jks"
+  register: logging_es_jks
+  check_mode: no
+
+- name: Checking for system.admin.jks
+  stat: path="{{generated_certs_dir}}/system.admin.jks"
+  register: system_admin_jks
+  check_mode: no
+
+- name: Checking for truststore.jks
+  stat: path="{{generated_certs_dir}}/truststore.jks"
+  register: truststore_jks
+  check_mode: no
 
-- name: create pod
+- name: create JKS generation pod
   command: >
     {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{mktemp.stdout}}/jks_pod.yaml -n {{openshift_logging_namespace}} -o name
   register: podoutput
   check_mode: no
+  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
 
 - command: >
     {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{podoutput.stdout}} -o jsonpath='{.status.phase}' -n {{openshift_logging_namespace}}
@@ -134,13 +159,13 @@
   until: result.stdout.find("Succeeded") != -1
   retries: 5
   delay: 10
+  changed_when: no
+  when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
 
 - name: Generate proxy session
-  command: echo {{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
-  register: session_secret
+  set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
   check_mode: no
 
 - name: Generate oauth client secret
-  command: echo {{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
-  register: oauth_secret
+  set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
   check_mode: no

+ 1 - 0
roles/openshift_logging/tasks/generate_clusterrolebindings.yaml

@@ -10,3 +10,4 @@
         name: "{{acct_name}}"
         namespace: "{{openshift_logging_namespace}}"
   check_mode: no
+  changed_when: no

+ 1 - 0
roles/openshift_logging/tasks/generate_clusterroles.yaml

@@ -8,3 +8,4 @@
         verbs:
           - get
   check_mode: no
+  changed_when: no

+ 6 - 0
roles/openshift_logging/tasks/generate_deploymentconfigs.yaml

@@ -9,6 +9,7 @@
     es_host: logging-es
     es_port: "{{openshift_logging_es_port}}"
   check_mode: no
+  changed_when: no
 
 - name: Generate OPS kibana deploymentconfig
   template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
@@ -20,6 +21,7 @@
     es_host: logging-es-ops
     es_port: "{{openshift_logging_es_ops_port}}"
   check_mode: no
+  changed_when: no
 
 - name: Generate elasticsearch deploymentconfig
   template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
@@ -30,6 +32,7 @@
     image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
     es_cluster_name: "{{component}}"
   check_mode: no
+  changed_when: no
 
 - name: Generate OPS elasticsearch deploymentconfig
   template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
@@ -40,6 +43,7 @@
     image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
     es_cluster_name: "{{component}}"
   check_mode: no
+  changed_when: no
 
 - name: Generate curator deploymentconfig
   template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
@@ -48,6 +52,7 @@
     deploy_name: "logging-{{component}}"
     image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
   check_mode: no
+  changed_when: no
 
 - name: Generate OPS curator deploymentconfig
   template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
@@ -57,3 +62,4 @@
     image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
     openshift_logging_es_host: logging-es-ops
   check_mode: no
+  changed_when: no

+ 2 - 0
roles/openshift_logging/tasks/generate_pvcs.yaml

@@ -26,6 +26,7 @@
     - not openshift_logging_es_pvc_dynamic
     - es_pvc_pool is defined
   check_mode: no
+  changed_when: no
 
 - name: Generating PersistentVolumeClaims - Dynamic
   template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
@@ -45,3 +46,4 @@
     - openshift_logging_es_pvc_dynamic
     - es_pvc_pool is defined
   check_mode: no
+  changed_when: no

+ 1 - 0
roles/openshift_logging/tasks/generate_rolebindings.yaml

@@ -9,3 +9,4 @@
       - kind: ServiceAccount
         name: aggregated-logging-elasticsearch
   check_mode: no
+  changed_when: no

+ 1 - 0
roles/openshift_logging/tasks/generate_routes.yaml

@@ -18,3 +18,4 @@
   loop_control:
     loop_var: route_info
   when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana'
+  changed_when: no

+ 11 - 7
roles/openshift_logging/tasks/generate_secrets.yaml

@@ -9,10 +9,10 @@
     - { name: "curator_key", file: "system.logging.curator.key"}
     - { name: "curator_cert", file: "system.logging.curator.crt"}
     - { name: "fluentd_key", file: "system.logging.fluentd.key"}
-    - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}    
-    - { name: "kibana_internal_key", file: "kibana-internal.key"}    
-    - { name: "kibana_internal_cert", file: "kibana-internal.crt"}    
-    - { name: "server_tls", file: "server-tls.json"}    
+    - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
+    - { name: "kibana_internal_key", file: "kibana-internal.key"}
+    - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+    - { name: "server_tls", file: "server-tls.json"}
 
 - name: Generating secrets for logging components
   template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
@@ -34,14 +34,15 @@
   when: secret_name not in openshift_logging_facts.{{component}}.secrets or
         secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0
   check_mode: no
+  changed_when: no
 
 - name: Generating secrets for kibana proxy
   template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
   vars:
     secret_name: logging-kibana-proxy
-    secrets: 
-     - {key: oauth-secret, value: "{{oauth_secret.stdout}}"}
-     - {key: session-secret, value: "{{session_secret.stdout}}"}
+    secrets:
+     - {key: oauth-secret, value: "{{oauth_secret}}"}
+     - {key: session-secret, value: "{{session_secret}}"}
      - {key: server-key, value: "{{kibana_key_file}}"}
      - {key: server-cert, value: "{{kibana_cert_file}}"}
      - {key: server-tls, value: "{{server_tls_file}}"}
@@ -52,6 +53,7 @@
   when: secret_name not in openshift_logging_facts.kibana.secrets or
         secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0
   check_mode: no
+  changed_when: no
 
 - name: Generating secrets for elasticsearch
   command: >
@@ -67,7 +69,9 @@
   when: secret_name not in openshift_logging_facts.elasticsearch.secrets or
         secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0
   check_mode: no
+  changed_when: no
 
 - copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
   when: logging_es_secret.stdout is defined
   check_mode: no
+  changed_when: no

+ 1 - 0
roles/openshift_logging/tasks/generate_serviceaccounts.yaml

@@ -11,3 +11,4 @@
   loop_control:
     loop_var: component
   check_mode: no
+  changed_when: no

+ 6 - 0
roles/openshift_logging/tasks/generate_services.yaml

@@ -11,6 +11,7 @@
       provider: openshift
       component: es
   check_mode: no
+  changed_when: no
 
 - name: Generating logging-es-cluster service
   template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
@@ -24,6 +25,7 @@
       provider: openshift
       component: es
   check_mode: no
+  changed_when: no
 
 - name: Generating logging-kibana service
   template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
@@ -37,6 +39,7 @@
       provider: openshift
       component: kibana
   check_mode: no
+  changed_when: no
 
 - name: Generating logging-es-ops service
   template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
@@ -51,6 +54,7 @@
       component: es-ops
   when: openshift_logging_use_ops
   check_mode: no
+  changed_when: no
 
 - name: Generating logging-es-ops-cluster service
   template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
@@ -65,6 +69,7 @@
       component: es-ops
   when: openshift_logging_use_ops
   check_mode: no
+  changed_when: no
 
 - name: Generating logging-kibana-ops service
   template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
@@ -79,3 +84,4 @@
       component: kibana-ops
   when: openshift_logging_use_ops
   check_mode: no
+  changed_when: no

+ 20 - 0
roles/openshift_logging/tasks/install_curator.yaml

@@ -1,4 +1,20 @@
 ---
+- command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator
+    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+  register: curator_replica_count
+  when: not ansible_check_mode
+  ignore_errors: yes
+  changed_when: no
+
+- command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops
+    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+  register: curator_ops_replica_count
+  when: not ansible_check_mode
+  ignore_errors: yes
+  changed_when: no
+
 - name: Generate curator deploymentconfig
   template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
   vars:
@@ -10,7 +26,9 @@
     es_port: "{{openshift_logging_es_port}}"
     curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
     curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
+    replicas: "{{curator_replica_count.stdout | default (1)}}"
   check_mode: no
+  changed_when: no
 
 - name: Generate OPS curator deploymentconfig
   template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
@@ -23,5 +41,7 @@
     es_port: "{{openshift_logging_es_ops_port}}"
     curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
     curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
+    replicas: "{{curator_ops_replica_count.stdout | default (1)}}"
   when: openshift_logging_use_ops
   check_mode: no
+  changed_when: no

+ 2 - 0
roles/openshift_logging/tasks/install_elasticsearch.yaml

@@ -38,6 +38,7 @@
   check_mode: no
   when:
     - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+  changed_when: no
 
 # --------- Tasks for Operation clusters ---------
 
@@ -103,3 +104,4 @@
     - openshift_logging_use_ops
     - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
   check_mode: no
+  changed_when: no

+ 27 - 22
roles/openshift_logging/tasks/install_fluentd.yaml

@@ -1,22 +1,8 @@
 ---
-- command: >
-    echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}"
-  register: fluentd_ops_host
+- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
   check_mode: no
 
-- command: >
-    echo "{{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}"
-  register: fluentd_ops_port
-  check_mode: no
-
-- command: >
-    echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
-  register: openshift_logging_fluentd_nodeselector_key
-  check_mode: no
-
-- command: >
-    echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
-  register: openshift_logging_fluentd_nodeselector_value
+- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
   check_mode: no
 
 - name: Generating Fluentd daemonset
@@ -26,24 +12,43 @@
     daemonset_component: fluentd
     daemonset_container_name: fluentd-elasticsearch
     daemonset_serviceAccount: aggregated-logging-fluentd
-    ops_host: "{{ fluentd_ops_host.stdout }}"
-    ops_port: "{{ fluentd_ops_port.stdout }}"
-    fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector_key.stdout}}"
-    fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}"
+    ops_host: "{{ fluentd_ops_host }}"
+    ops_port: "{{ fluentd_ops_port }}"
+    fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+    fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+  check_mode: no
+  changed_when: no
+
+- name: "Check fluentd privileged permissions"
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    get scc/privileged -o jsonpath='{.users}'
+  register: fluentd_privileged
   check_mode: no
+  changed_when: no
 
-- name: "Set permissions for fluentd"
+- name: "Set privileged permissions for fluentd"
   command: >
     {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
   register: fluentd_output
   failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
   check_mode: no
+  when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check fluentd cluster-reader permissions"
+  command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+    get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+  register: fluentd_cluster_reader
+  check_mode: no
+  changed_when: no
 
-- name: "Set additional permissions for fluentd"
+- name: "Set cluster-reader permissions for fluentd"
   command: >
     {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
     add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
   register: fluentd2_output
   failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
   check_mode: no
+  when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1

+ 23 - 0
roles/openshift_logging/tasks/install_kibana.yaml

@@ -1,4 +1,23 @@
 ---
+- command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana
+    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+  register: kibana_replica_count
+  when: not ansible_check_mode
+  ignore_errors: yes
+  changed_when: no
+
+- command: >
+    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops
+    -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+  register: kibana_ops_replica_count
+  when:
+    - not ansible_check_mode
+    - openshift_logging_use_ops
+  ignore_errors: yes
+  changed_when: no
+
+
 - name: Generate kibana deploymentconfig
   template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
   vars:
@@ -13,7 +32,9 @@
     kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
     kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
     kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
+    replicas: "{{kibana_replica_count.stdout | default (0)}}"
   check_mode: no
+  changed_when: no
 
 - name: Generate OPS kibana deploymentconfig
   template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
@@ -29,5 +50,7 @@
     kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
     kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
     kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
+    replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
   when: openshift_logging_use_ops
   check_mode: no
+  changed_when: no

+ 1 - 0
roles/openshift_logging/tasks/install_logging.yaml

@@ -27,6 +27,7 @@
   command: ls -1 {{mktemp.stdout}}/templates/
   register: logging_objects
   check_mode: no
+  changed_when: no
 
 - name: Creating API objects from generated templates
   command: >

+ 4 - 2
roles/openshift_logging/tasks/install_support.yaml

@@ -6,6 +6,7 @@
   register: logging_project_result
   ignore_errors: yes
   when: not ansible_check_mode
+  changed_when: no
 
 - name: "Create logging project"
   command: >
@@ -37,9 +38,10 @@
 - name: Generate kibana-proxy oauth client
   template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
   vars:
-    secret: "{{oauth_secret.stdout}}"
-  when: oauth_secret.stdout is defined
+    secret: "{{oauth_secret}}"
+  when: oauth_secret is defined
   check_mode: no
+  changed_when: no
 
 - include: generate_clusterroles.yaml
 

+ 1 - 0
roles/openshift_logging/tasks/label_node.yaml

@@ -5,6 +5,7 @@
   register: label_value
   failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr
   when: not ansible_check_mode
+  changed_when: no
 
 - command: >
     {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite

+ 2 - 4
roles/openshift_logging/tasks/procure_server_certs.yaml

@@ -10,14 +10,12 @@
   check_mode: no
 
 - name: Trying to discover server cert variable name for {{ cert_info.procure_component }}
-  command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}"
-  register: procure_component_crt
+  set_fact: procure_component_crt={{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}
   when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
   check_mode: no
 
 - name: Trying to discover the server key variable name for {{ cert_info.procure_component }}
-  command: echo "{{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}"
-  register: procure_component_key
+  set_fact: procure_component_key={{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}
   when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
   check_mode: no
 

+ 2 - 0
roles/openshift_logging/tasks/scale.yaml

@@ -5,6 +5,7 @@
   register: replica_count
   failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr
   when: not ansible_check_mode
+  changed_when: no
 
 - command: >
     {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
@@ -24,3 +25,4 @@
   when:
     - not ansible_check_mode
     - replica_count.stdout|int != desired
+  changed_when: no

+ 9 - 12
roles/openshift_logging/tasks/start_cluster.yaml

@@ -1,26 +1,17 @@
 ---
 - command: >
-    echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
-  register: openshift_logging_fluentd_nodeselector_key
-  check_mode: no
-
-- command: >
-    echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
-  register: openshift_logging_fluentd_nodeselector_value
-  check_mode: no
-
-- command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
   register: fluentd_hosts
   when: "'--all' in openshift_logging_fluentd_hosts"
   check_mode: no
+  changed_when: no
 
 - name: start fluentd
   include: label_node.yaml
   vars:
     host: "{{fluentd_host}}"
-    label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}"
-    value: "{{openshift_logging_fluentd_nodeselector_value.stdout}}"
+    label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+    value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
   with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
   loop_control:
     loop_var: fluentd_host
@@ -29,6 +20,7 @@
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
   register: es_dc
   check_mode: no
+  changed_when: no
 
 - name: start elasticsearch
   include: scale.yaml
@@ -42,6 +34,7 @@
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
   register: kibana_dc
   check_mode: no
+  changed_when: no
 
 - name: start kibana
   include: scale.yaml
@@ -55,6 +48,7 @@
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
   register: curator_dc
   check_mode: no
+  changed_when: no
 
 - name: start curator
   include: scale.yaml
@@ -68,6 +62,7 @@
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
   register: es_dc
   check_mode: no
+  changed_when: no
 
 - name: start elasticsearch-ops
   include: scale.yaml
@@ -82,6 +77,7 @@
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
   register: kibana_dc
   check_mode: no
+  changed_when: no
 
 - name: start kibana-ops
   include: scale.yaml
@@ -96,6 +92,7 @@
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
   register: curator_dc
   check_mode: no
+  changed_when: no
 
 - name: start curator-ops
   include: scale.yaml

+ 8 - 9
roles/openshift_logging/tasks/stop_cluster.yaml

@@ -1,22 +1,15 @@
 ---
 - command: >
-    echo "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
-  register: openshift_logging_fluentd_nodeselector_key
-
-- command: >
-    echo "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
-  register: openshift_logging_fluentd_nodeselector_value
-
-- command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
   register: fluentd_hosts
   when: "'--all' in openshift_logging_fluentd_hosts"
+  changed_when: no
 
 - name: stop fluentd
   include: label_node.yaml
   vars:
     host: "{{fluentd_host}}"
-    label: "{{openshift_logging_fluentd_nodeselector_key.stdout}}"
+    label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
     unlabel: True
   with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
   loop_control:
@@ -25,6 +18,7 @@
 - command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
   register: es_dc
+  changed_when: no
 
 - name: stop elasticsearch
   include: scale.yaml
@@ -37,6 +31,7 @@
 - command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
   register: kibana_dc
+  changed_when: no
 
 - name: stop kibana
   include: scale.yaml
@@ -49,6 +44,7 @@
 - command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
   register: curator_dc
+  changed_when: no
 
 - name: stop curator
   include: scale.yaml
@@ -61,6 +57,7 @@
 - command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
   register: es_dc
+  changed_when: no
 
 - name: stop elasticsearch-ops
   include: scale.yaml
@@ -74,6 +71,7 @@
 - command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
   register: kibana_dc
+  changed_when: no
 
 - name: stop kibana-ops
   include: scale.yaml
@@ -87,6 +85,7 @@
 - command: >
     {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
   register: curator_dc
+  changed_when: no
 
 - name: stop curator-ops
   include: scale.yaml

+ 1 - 1
roles/openshift_logging/templates/curator.j2

@@ -7,7 +7,7 @@ metadata:
     component: "{{component}}"
     logging-infra: "{{logging_component}}"
 spec:
-  replicas: 0
+  replicas: {{replicas|default(0)}}
   selector:
     provider: openshift
     component: "{{component}}"

+ 1 - 1
roles/openshift_logging/templates/es.j2

@@ -8,7 +8,7 @@ metadata:
     deployment: "{{deploy_name}}"
     logging-infra: "{{logging_component}}"
 spec:
-  replicas: 0
+  replicas: {{replicas|default(0)}}
   selector:
     provider: openshift
     component: "{{component}}"

+ 1 - 1
roles/openshift_logging/templates/kibana.j2

@@ -7,7 +7,7 @@ metadata:
     component: "{{component}}"
     logging-infra: "{{logging_component}}"
 spec:
-  replicas: 0
+  replicas: {{replicas|default(0)}}
   selector:
     provider: openshift
     component: "{{component}}"