Przeglądaj źródła

Fix broken debug_level

Currently, debug_level is documented as a way to change
the debug output level for both masters and nodes.

debug_level does not currently have any effect.

This commit removes debug_level from openshift_facts
and properly sets openshift_master_debug_level and
openshift_node_debug_level to the value of debug_level
specified in the inventory.

This commit also reorganizes some set_fact tasks
needed during master upgrades to put all work-around
set-facts for undefined variables in one place, allowing
for easier cleanup in the future.  This includes an
entry for openshift_master_debug_level.

Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1500164
Michael Gugino 7 lat temu
rodzic
commit
bb1d5f4525

+ 0 - 1
playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml

@@ -31,7 +31,6 @@
       role: master
       local_facts:
         embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
-        debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
 
 - name: Upgrade and backup etcd
   include: ./etcd/main.yml

+ 0 - 1
roles/openshift_facts/library/openshift_facts.py

@@ -1907,7 +1907,6 @@ class OpenShiftFacts(object):
                                   portal_net='172.30.0.0/16',
                                   client_binary='oc', admin_binary='oadm',
                                   dns_domain='cluster.local',
-                                  debug_level=2,
                                   config_base='/etc/origin')
 
         if 'master' in roles:

+ 8 - 0
roles/openshift_master/defaults/main.yml

@@ -1,4 +1,9 @@
 ---
+# openshift_master_defaults_in_use is a workaround to detect if we are consuming
+# the plays from the role or outside of the role.
+openshift_master_defaults_in_use: True
+openshift_master_debug_level: "{{ debug_level | default(2) }}"
+
 r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 
@@ -26,6 +31,9 @@ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
 oreg_auth_credentials_replace: False
 l_bind_docker_reg_auth: False
 
+containerized_svc_dir: "/usr/lib/systemd/system"
+ha_svc_template_path: "native-cluster"
+
 # NOTE
 # r_openshift_master_*_default may be defined external to this role.
 # openshift_use_*, if defined, may affect other roles or play behavior.

+ 0 - 10
roles/openshift_master/tasks/registry_auth.yml

@@ -1,14 +1,4 @@
 ---
-# We need to setup some variables as this play might be called directly
-# from outside of the role.
-- set_fact:
-    oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
-  when: oreg_auth_credentials_path is not defined
-
-- set_fact:
-    oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
-  when: oreg_host is not defined
-
 - name: Check for credentials file for registry auth
   stat:
     path: "{{ oreg_auth_credentials_path }}"

+ 7 - 27
roles/openshift_master/tasks/systemd_units.yml

@@ -1,31 +1,6 @@
 ---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.  For that reason the ha_svc variables are use set_fact instead of
-# the vars directory on the role.
-
-# This play may be consumed outside the role, we need to ensure that
-# openshift_master_config_dir is set.
-- name: Set openshift_master_config_dir if unset
-  set_fact:
-    openshift_master_config_dir: '/etc/origin/master'
-  when: openshift_master_config_dir is not defined
-
-# This play may be consumed outside the role, we need to ensure that
-# r_openshift_master_data_dir is set.
-- name: Set r_openshift_master_data_dir if unset
-  set_fact:
-    r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
-  when: r_openshift_master_data_dir is not defined
-
-- include: registry_auth.yml
-
-- name: Remove the legacy master service if it exists
-  include: clean_systemd_units.yml
-
-- name: Init HA Service Info
-  set_fact:
-    containerized_svc_dir: "/usr/lib/systemd/system"
-    ha_svc_template_path: "native-cluster"
+- include: upgrade_facts.yml
+  when: openshift_master_defaults_in_use is not defined
 
 - name: Set HA Service Info for containerized installs
   set_fact:
@@ -34,6 +9,11 @@
   when:
   - openshift.common.is_containerized | bool
 
+- include: registry_auth.yml
+
+- name: Remove the legacy master service if it exists
+  include: clean_systemd_units.yml
+
 # This is the image used for both HA and non-HA clusters:
 - name: Pre-pull master image
   command: >

+ 33 - 0
roles/openshift_master/tasks/upgrade_facts.yml

@@ -0,0 +1,33 @@
+---
+# This file exists because we call systemd_units.yml from outside of the role
+# during upgrades.  When we remove this pattern, we can probably
+# eliminate most of these set_fact items.
+
+- name: Set openshift_master_config_dir if unset
+  set_fact:
+    openshift_master_config_dir: '/etc/origin/master'
+  when: openshift_master_config_dir is not defined
+
+- name: Set r_openshift_master_data_dir if unset
+  set_fact:
+    r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+  when: r_openshift_master_data_dir is not defined
+
+- set_fact:
+    oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+  when: oreg_auth_credentials_path is not defined
+
+- set_fact:
+    oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+  when: oreg_host is not defined
+
+- name: Set openshift_master_debug_level
+  set_fact:
+    openshift_master_debug_level: "{{ debug_level | default(2) }}"
+  when:
+  - openshift_master_debug_level is not defined
+
+- name: Init HA Service Info
+  set_fact:
+    containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}"
+    ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}"

+ 1 - 1
roles/openshift_master/templates/atomic-openshift-master.j2

@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }}
 CONFIG_FILE={{ openshift_master_config_file }}
 {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
 {% if openshift_master_is_scaleup_host %}

+ 1 - 1
roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2

@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
 CONFIG_FILE={{ openshift_master_config_file }}
 {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
 {% if openshift_master_is_scaleup_host %}

+ 1 - 1
roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2

@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
 CONFIG_FILE={{ openshift_master_config_file }}
 {# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
 {% if openshift_master_is_scaleup_host %}

+ 0 - 1
roles/openshift_master_facts/tasks/main.yml

@@ -34,7 +34,6 @@
       cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
       cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
       cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
-      debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
       api_port: "{{ openshift_master_api_port | default(None) }}"
       api_url: "{{ openshift_master_api_url | default(None) }}"
       api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"

+ 2 - 0
roles/openshift_node/defaults/main.yml

@@ -1,4 +1,6 @@
 ---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
 r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
 r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
 

+ 1 - 1
roles/openshift_node/tasks/config/configure-node-settings.yml

@@ -7,7 +7,7 @@
     create: true
   with_items:
   - regex: '^OPTIONS='
-    line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+    line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
   - regex: '^CONFIG_FILE='
     line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
   - regex: '^IMAGE_VERSION='

+ 0 - 1
roles/openshift_node_facts/tasks/main.yml

@@ -11,7 +11,6 @@
   - role: node
     local_facts:
       annotations: "{{ openshift_node_annotations | default(none) }}"
-      debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
       iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
       kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
       labels: "{{ openshift_node_labels | default(None) }}"

+ 0 - 1
roles/openshift_node_upgrade/README.md

@@ -49,7 +49,6 @@ From openshift.node:
 
 | Name                               |  Default Value      |                     |
 |------------------------------------|---------------------|---------------------|
-| openshift.node.debug_level         |---------------------|---------------------|
 | openshift.node.node_image          |---------------------|---------------------|
 | openshift.node.ovs_image           |---------------------|---------------------|
 

+ 2 - 0
roles/openshift_node_upgrade/defaults/main.yml

@@ -1,4 +1,6 @@
 ---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
 openshift_use_openshift_sdn: True
 os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
 

+ 1 - 1
roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml

@@ -7,7 +7,7 @@
     create: true
   with_items:
   - regex: '^OPTIONS='
-    line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+    line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
   - regex: '^CONFIG_FILE='
     line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
   - regex: '^IMAGE_VERSION='

+ 1 - 1
roles/openshift_node_upgrade/tasks/systemd_units.yml

@@ -6,7 +6,7 @@
 # - openshift.node.ovs_image
 # - openshift_use_openshift_sdn
 # - openshift.common.service_type
-# - openshift.node.debug_level
+# - openshift_node_debug_level
 # - openshift.common.config_base
 # - openshift.common.http_proxy
 # - openshift.common.portal_net