Browse Source

bug 1506073. Lower cpu request for logging when it exceeds limit

Jeff Cantrill 7 years ago
parent
commit
205d03455c

+ 2 - 0
roles/openshift_logging/README.md

@@ -297,6 +297,8 @@ oc delete pod --selector=<ds_selector>
 
 Changelog
 ---------
+Tue Oct 26, 2017
+- Make CPU request equal limit if limit is greater then request
 
 Tue Oct 10, 2017
 - Default imagePullPolicy changed from Always to IfNotPresent 

+ 27 - 0
roles/openshift_logging/filter_plugins/openshift_logging.py

@@ -3,6 +3,7 @@
 '''
 
 import random
+import re
 
 
 def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
@@ -17,6 +18,31 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
     return dict(kind='emptydir')
 
 
+def min_cpu(left, right):
+    '''Return the minimum cpu value of the two values given'''
+    message = "Unable to evaluate {} cpu value is specified correctly '{}'. Exp whole, decimal or int followed by M"
+    pattern = re.compile(r"^(\d*\.?\d*)([Mm])?$")
+    millis_per_core = 1000
+    if not right:
+        return left
+    m_left = pattern.match(left)
+    if not m_left:
+        raise RuntimeError(message.format("left", left))
+    m_right = pattern.match(right)
+    if not m_right:
+        raise RuntimeError(message.format("right", right))
+    left_value = float(m_left.group(1))
+    right_value = float(m_right.group(1))
+    if m_left.group(2) not in ["M", "m"]:
+        left_value = left_value * millis_per_core
+    if m_right.group(2) not in ["M", "m"]:
+        right_value = right_value * millis_per_core
+    response = left
+    if left_value != min(left_value, right_value):
+        response = right
+    return response
+
+
 def walk(source, path, default, delimiter='.'):
     '''Walk the sourch hash given the path and return the value or default if not found'''
     if not isinstance(source, dict):
@@ -87,6 +113,7 @@ class FilterModule(object):
             'random_word': random_word,
             'entry_from_named_pair': entry_from_named_pair,
             'map_from_pairs': map_from_pairs,
+            'min_cpu': min_cpu,
             'es_storage': es_storage,
             'serviceaccount_name': serviceaccount_name,
             'serviceaccount_namespace': serviceaccount_namespace,

+ 15 - 0
roles/openshift_logging/filter_plugins/test

@@ -1,7 +1,22 @@
 import unittest
 from openshift_logging import walk
+from openshift_logging import min_cpu
 
 class TestFilterMethods(unittest.TestCase):
+    
+
+    def test_min_cpu_for_none(self):
+        source = "1000M"
+        self.assertEquals(min_cpu(source, None), "1000M")
+
+    def test_min_cpu_for_millis(self):
+        source = "1"
+        self.assertEquals(min_cpu(source, "0.1"), "0.1")
+
+
+    def test_min_cpu_for_whole(self):
+        source = "120M"
+        self.assertEquals(min_cpu(source, "2"), "120M")
 
 
     def test_walk_find_key(self):

+ 1 - 1
roles/openshift_logging_curator/tasks/main.yaml

@@ -90,7 +90,7 @@
     es_host: "{{ openshift_logging_curator_es_host }}"
     es_port: "{{ openshift_logging_curator_es_port }}"
     curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
-    curator_cpu_request: "{{ openshift_logging_curator_cpu_request }}"
+    curator_cpu_request: "{{ openshift_logging_curator_cpu_request | min_cpu(openshift_logging_curator_cpu_limit | default(none)) }}"
     curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
     curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
     curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"

+ 1 - 1
roles/openshift_logging_elasticsearch/tasks/main.yaml

@@ -354,7 +354,7 @@
     image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
     proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
     es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}"
-    es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request }}"
+    es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request | min_cpu(openshift_logging_elasticsearch_cpu_limit | default(none)) }}"
     es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
     es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
     es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"

+ 1 - 2
roles/openshift_logging_fluentd/tasks/main.yaml

@@ -154,7 +154,6 @@
       path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"
 
 # create Fluentd daemonset
-
 # this should change based on the type of fluentd deployment to be done...
 # TODO: pass in aggregation configurations
 - name: Generate logging-fluentd daemonset definition
@@ -173,7 +172,7 @@
     fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
     fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
     fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
-    fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request }}"
+    fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"
     fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
     audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
     audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"

+ 2 - 2
roles/openshift_logging_kibana/tasks/main.yaml

@@ -230,10 +230,10 @@
     es_host: "{{ openshift_logging_kibana_es_host }}"
     es_port: "{{ openshift_logging_kibana_es_port }}"
     kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
-    kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request }}"
+    kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request | min_cpu(openshift_logging_kibana_cpu_limit | default(none)) }}"
     kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
     kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
-    kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request }}"
+    kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request | min_cpu(openshift_logging_kibana_proxy_cpu_limit | default(none)) }}"
     kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
     kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
     kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"

+ 1 - 1
roles/openshift_logging_mux/tasks/main.yaml

@@ -171,7 +171,7 @@
     ops_host: "{{ openshift_logging_mux_ops_host }}"
     ops_port: "{{ openshift_logging_mux_ops_port }}"
     mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
-    mux_cpu_request: "{{ openshift_logging_mux_cpu_request }}"
+    mux_cpu_request: "{{ openshift_logging_mux_cpu_request | min_cpu(openshift_logging_mux_cpu_limit | default(none)) }}"
     mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
     mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
     mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"