Browse Source

Create logging deployments with non-zero replica counts

When we currently create the set of logging `DeploymentConfig`s, we
create them with zero desired replicas. This causes the deployment to
immediately succeed as there is no work to be done. This inhibits our
ability to use nice CLI UX features like `oc rollout status` to monitor
the logging stack deployments. Instead, we should can create the configs
with the correct number of replicas in the first place and stop using
`oc scale` to bring them up after the fact.

Signed-off-by: Steve Kuznetsov <skuznets@redhat.com>
Steve Kuznetsov 7 years ago
parent
commit
108a42cbbd

+ 1 - 10
roles/openshift_logging_curator/tasks/main.yaml

@@ -91,7 +91,7 @@
     es_port: "{{ openshift_logging_curator_es_port }}"
     curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
     curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
-    replicas: "{{ openshift_logging_curator_replicas | default (0)}}"
+    replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
     curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
   check_mode: no
   changed_when: no
@@ -106,15 +106,6 @@
     - "{{ tempdir }}/templates/curator-dc.yaml"
     delete_after: true
 
-# scale up
-- name: Start Curator
-  oc_scale:
-    kind: dc
-    name: "{{ curator_name }}"
-    namespace: "{{ openshift_logging_namespace }}"
-    replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
-
-
 - name: Delete temp directory
   file:
     name: "{{ tempdir }}"

+ 1 - 1
roles/openshift_logging_curator/templates/curator.j2

@@ -7,7 +7,7 @@ metadata:
     component: "{{component}}"
     logging-infra: "{{logging_component}}"
 spec:
-  replicas: {{replicas|default(0)}}
+  replicas: {{replicas|default(1)}}
   selector:
     provider: openshift
     component: "{{component}}"

+ 1 - 8
roles/openshift_logging_elasticsearch/tasks/main.yaml

@@ -256,6 +256,7 @@
     es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
     es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
     deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
+    replicas: 1
 
 - name: Set ES dc
   oc_obj:
@@ -267,14 +268,6 @@
     - "{{ tempdir }}/templates/logging-es-dc.yml"
     delete_after: true
 
-# scale up
-- name: Start Elasticsearch
-  oc_scale:
-    kind: dc
-    name: "{{ es_deploy_name }}"
-    namespace: "{{ openshift_logging_elasticsearch_namespace }}"
-    replicas: 1
-
 ## Placeholder for migration when necessary ##
 
 - name: Delete temp directory

+ 1 - 1
roles/openshift_logging_elasticsearch/templates/es.j2

@@ -8,7 +8,7 @@ metadata:
     deployment: "{{deploy_name}}"
     logging-infra: "{{logging_component}}"
 spec:
-  replicas: {{replicas|default(0)}}
+  replicas: {{replicas|default(1)}}
   selector:
     provider: openshift
     component: "{{component}}"

+ 1 - 9
roles/openshift_logging_kibana/tasks/main.yaml

@@ -210,7 +210,7 @@
     kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
     kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
     kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
-    replicas: "{{ openshift_logging_kibana_replicas | default (0) }}"
+    replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
     kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
 
 - name: Set Kibana DC
@@ -223,14 +223,6 @@
     - "{{ tempdir }}/templates/kibana-dc.yaml"
     delete_after: true
 
-# Scale up Kibana -- is this really necessary?
-- name: Start Kibana
-  oc_scale:
-    kind: dc
-    name: "{{ kibana_name }}"
-    namespace: "{{ openshift_logging_namespace }}"
-    replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
-
 # update master configs?
 
 - name: Delete temp directory

+ 1 - 1
roles/openshift_logging_kibana/templates/kibana.j2

@@ -7,7 +7,7 @@ metadata:
     component: "{{ component }}"
     logging-infra: "{{ logging_component }}"
 spec:
-  replicas: {{ replicas | default(0) }}
+  replicas: {{ replicas | default(1) }}
   selector:
     provider: openshift
     component: "{{ component }}"

+ 1 - 9
roles/openshift_logging_mux/tasks/main.yaml

@@ -172,7 +172,7 @@
     ops_port: "{{ openshift_logging_mux_ops_port }}"
     mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
     mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
-    replicas: "{{ openshift_logging_mux_replicas | default(0) }}"
+    replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
     mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
   check_mode: no
   changed_when: no
@@ -187,14 +187,6 @@
     - "{{ tempdir }}/templates/logging-mux-dc.yaml"
     delete_after: true
 
-# Scale up Mux
-- name: Start Mux
-  oc_scale:
-    kind: dc
-    name: "logging-mux"
-    namespace: "{{ openshift_logging_mux_namespace }}"
-    replicas: "{{ openshift_logging_mux_replicas | default (1) }}"
-
 - name: Delete temp directory
   file:
     name: "{{ tempdir }}"

+ 1 - 1
roles/openshift_logging_mux/templates/mux.j2

@@ -7,7 +7,7 @@ metadata:
     component: "{{component}}"
     logging-infra: "{{logging_component}}"
 spec:
-  replicas: {{replicas|default(0)}}
+  replicas: {{replicas|default(1)}}
   selector:
     provider: openshift
     component: "{{component}}"