瀏覽代碼

openshift-prometheus: remove deprecated prometheus stack install

Older openshift prometheus has been replaced by the openshift-monitoring stack.
Bug: https://bugzilla.redhat.com/show_bug.cgi?id=1629716
Paul Gier 6 年之前
父節點
當前提交
24b68f3646
共有 46 個文件被更改,包括 10 次插入27199 次删除
  1. 0 19
      inventory/hosts.example
  2. 0 25
      playbooks/cluster-operator/aws/components/openshift-prometheus.yml
  3. 0 8
      playbooks/common/private/components.yml
  4. 0 8
      playbooks/openshift-grafana/config.yml
  5. 0 31
      playbooks/openshift-grafana/private/config.yml
  6. 0 1
      playbooks/openshift-grafana/private/filter_plugins
  7. 0 1
      playbooks/openshift-grafana/private/lookup_plugins
  8. 0 1
      playbooks/openshift-grafana/private/roles
  9. 0 10
      playbooks/openshift-grafana/private/uninstall.yml
  10. 0 2
      playbooks/openshift-grafana/uninstall.yml
  11. 0 16
      playbooks/openshift-prometheus/OWNERS
  12. 0 9
      playbooks/openshift-prometheus/config.yml
  13. 0 31
      playbooks/openshift-prometheus/private/config.yml
  14. 0 1
      playbooks/openshift-prometheus/private/roles
  15. 0 8
      playbooks/openshift-prometheus/private/uninstall.yml
  16. 0 2
      playbooks/openshift-prometheus/uninstall.yml
  17. 0 78
      roles/openshift_grafana/README.md
  18. 0 60
      roles/openshift_grafana/defaults/main.yaml
  19. 0 19478
      roles/openshift_grafana/files/dashboards/node-exporter-full-dashboard.json
  20. 0 5057
      roles/openshift_grafana/files/dashboards/openshift-cluster-monitoring.json
  21. 0 21
      roles/openshift_grafana/meta/main.yml
  22. 0 31
      roles/openshift_grafana/tasks/facts.yaml
  23. 0 284
      roles/openshift_grafana/tasks/install_grafana.yaml
  24. 0 20
      roles/openshift_grafana/tasks/main.yaml
  25. 0 7
      roles/openshift_grafana/tasks/uninstall_grafana.yaml
  26. 0 387
      roles/openshift_grafana/templates/grafana-config.yml.j2
  27. 0 116
      roles/openshift_grafana/templates/grafana.yml.j2
  28. 0 10
      roles/openshift_prometheus/OWNERS
  29. 0 109
      roles/openshift_prometheus/README.md
  30. 0 111
      roles/openshift_prometheus/defaults/main.yaml
  31. 0 94
      roles/openshift_prometheus/files/node-exporter-template.yaml
  32. 0 21
      roles/openshift_prometheus/meta/main.yaml
  33. 0 10
      roles/openshift_prometheus/tasks/facts.yaml
  34. 0 55
      roles/openshift_prometheus/tasks/install_node_exporter.yaml
  35. 0 271
      roles/openshift_prometheus/tasks/install_prometheus.yaml
  36. 0 33
      roles/openshift_prometheus/tasks/main.yaml
  37. 0 107
      roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
  38. 0 20
      roles/openshift_prometheus/templates/alertmanager.yml.j2
  39. 0 309
      roles/openshift_prometheus/templates/prometheus.j2
  40. 0 4
      roles/openshift_prometheus/templates/prometheus.rules.j2
  41. 0 323
      roles/openshift_prometheus/templates/prometheus.yml.j2
  42. 0 2
      roles/openshift_prometheus/tests/inventory
  43. 0 5
      roles/openshift_prometheus/tests/test.yaml
  44. 0 2
      roles/openshift_prometheus/vars/main.yml
  45. 10 0
      roles/openshift_sanitize_inventory/tasks/unsupported.yml
  46. 0 1
      test/ci/inventory/group_vars/OSEv3/vars.yml

+ 0 - 19
inventory/hosts.example

@@ -701,25 +701,6 @@ debug_level=2
 # this value must be 1
 #openshift_logging_es_cluster_size=1
 
-# Prometheus deployment
-#
-# Currently prometheus deployment is disabled by default, enable it by setting this
-#openshift_hosted_prometheus_deploy=true
-#
-# Prometheus storage config
-# By default prometheus uses emptydir storage, if you want to persist you should
-# configure it to use pvc storage type. Each volume must be ReadWriteOnce.
-#openshift_prometheus_storage_type=emptydir
-#openshift_prometheus_alertmanager_storage_type=emptydir
-#openshift_prometheus_alertbuffer_storage_type=emptydir
-# Use PVCs for persistence
-#openshift_prometheus_storage_type=pvc
-#openshift_prometheus_alertmanager_storage_type=pvc
-#openshift_prometheus_alertbuffer_storage_type=pvc
-
-# Grafana deployment, requires Prometheus
-#openshift_hosted_grafana_deploy=true
-
 # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
 # os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
 

+ 0 - 25
playbooks/cluster-operator/aws/components/openshift-prometheus.yml

@@ -1,25 +0,0 @@
----
-- name: Alert user to variables needed
-  hosts: localhost
-  tasks:
-  - name: Alert user to variables needed - clusterid
-    debug:
-      msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
-
-  - name: Alert user to variables needed - region
-    debug:
-      msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-
-- name: Setup the master node group
-  hosts: localhost
-  tasks:
-  - import_role:
-      name: openshift_aws
-      tasks_from: setup_master_group.yml
-
-- name: run the init
-  import_playbook: ../../../init/main.yml
-
-- name: Run prometheus playbook
-  import_playbook: ../../../openshift-prometheus/private/config.yml
-  when: openshift_hosted_prometheus_deploy | default(false) | bool

+ 0 - 8
playbooks/common/private/components.yml

@@ -41,14 +41,6 @@
 - import_playbook: ../../openshift-logging/private/config.yml
   when: openshift_logging_install_logging | default(false) | bool
 
-- import_playbook: ../../openshift-prometheus/private/config.yml
-  when: openshift_hosted_prometheus_deploy | default(false) | bool
-
-- import_playbook: ../../openshift-grafana/private/config.yml
-  when:
-  - openshift_hosted_prometheus_deploy | default(false) | bool
-  - openshift_hosted_grafana_deploy | default(false) | bool
-
 - import_playbook: ../../openshift-monitor-availability/private/config.yml
   when: openshift_monitor_availability_install | default(false) | bool
 

+ 0 - 8
playbooks/openshift-grafana/config.yml

@@ -1,8 +0,0 @@
----
-- import_playbook: ../init/main.yml
-  vars:
-    l_init_fact_hosts: "oo_masters_to_config"
-    l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
-    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
-
-- import_playbook: private/config.yml

+ 0 - 31
playbooks/openshift-grafana/private/config.yml

@@ -1,31 +0,0 @@
----
-- name: Grafana Install Checkpoint Start
-  hosts: all
-  gather_facts: false
-  tasks:
-  - name: Set Grafana install 'In Progress'
-    run_once: true
-    set_stats:
-      data:
-        installer_phase_grafana:
-          title: "Grafana Install"
-          playbook: "playbooks/openshift-grafana/config.yml"
-          status: "In Progress"
-          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-
-- name: OpenShift Grafana
-  hosts: oo_first_master
-  roles:
-  - role: openshift_grafana
-
-- name: Grafana Install Checkpoint End
-  hosts: all
-  gather_facts: false
-  tasks:
-  - name: Set Grafana install 'Complete'
-    run_once: true
-    set_stats:
-      data:
-        installer_phase_grafana:
-          status: "Complete"
-          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"

+ 0 - 1
playbooks/openshift-grafana/private/filter_plugins

@@ -1 +0,0 @@
-../../../filter_plugins

+ 0 - 1
playbooks/openshift-grafana/private/lookup_plugins

@@ -1 +0,0 @@
-../../../lookup_plugins

+ 0 - 1
playbooks/openshift-grafana/private/roles

@@ -1 +0,0 @@
-../../../roles/

+ 0 - 10
playbooks/openshift-grafana/private/uninstall.yml

@@ -1,10 +0,0 @@
----
-- name: Uninstall Grafana
-  hosts: masters[0]
-  vars:
-    openshift_grafana_state: absent
-  tasks:
-  - name: Run the Grafana Uninstall Role Tasks
-    include_role:
-      name: openshift_grafana
-      tasks_from: uninstall_grafana.yaml

+ 0 - 2
playbooks/openshift-grafana/uninstall.yml

@@ -1,2 +0,0 @@
----
-- import_playbook: private/uninstall.yml

+ 0 - 16
playbooks/openshift-prometheus/OWNERS

@@ -1,16 +0,0 @@
-# approval == this is a good idea /approve
-approvers:
-  - zgalor
-  - pgier
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs
-# review == this code is good /lgtm
-reviewers:
-  - zgalor
-  - pgier
-  - michaelgugino
-  - mtnbikenc
-  - sdodson
-  - vrutkovs

+ 0 - 9
playbooks/openshift-prometheus/config.yml

@@ -1,9 +0,0 @@
----
-- import_playbook: ../init/main.yml
-  vars:
-    l_init_fact_hosts: "oo_masters_to_config"
-    l_openshift_version_set_hosts: "oo_masters_to_config:!oo_first_master"
-    l_sanity_check_hosts: "{{ groups['oo_masters_to_config'] }}"
-
-
-- import_playbook: private/config.yml

+ 0 - 31
playbooks/openshift-prometheus/private/config.yml

@@ -1,31 +0,0 @@
----
-- name: Prometheus Install Checkpoint Start
-  hosts: all
-  gather_facts: false
-  tasks:
-  - name: Set Prometheus install 'In Progress'
-    run_once: true
-    set_stats:
-      data:
-        installer_phase_prometheus:
-          title: "Prometheus Install"
-          playbook: "playbooks/openshift-prometheus/config.yml"
-          status: "In Progress"
-          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"
-
-- name: OpenShift Prometheus
-  hosts: oo_first_master
-  roles:
-  - role: openshift_prometheus
-
-- name: Prometheus Install Checkpoint End
-  hosts: all
-  gather_facts: false
-  tasks:
-  - name: Set Prometheus install 'Complete'
-    run_once: true
-    set_stats:
-      data:
-        installer_phase_prometheus:
-          status: "Complete"
-          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}"

+ 0 - 1
playbooks/openshift-prometheus/private/roles

@@ -1 +0,0 @@
-../../../roles

+ 0 - 8
playbooks/openshift-prometheus/private/uninstall.yml

@@ -1,8 +0,0 @@
----
-- name: Uninstall Prometheus
-  hosts: masters[0]
-  tasks:
-  - name: Run the Prometheus Uninstall Role Tasks
-    include_role:
-      name: openshift_prometheus
-      tasks_from: uninstall_prometheus.yaml

+ 0 - 2
playbooks/openshift-prometheus/uninstall.yml

@@ -1,2 +0,0 @@
----
-- import_playbook: private/uninstall.yml

+ 0 - 78
roles/openshift_grafana/README.md

@@ -1,78 +0,0 @@
-OpenShift Grafana Playbooks
-===========================
-
-OpenShift Grafana Configuration.
-
-NOTE: Grafana is not yet supported by Red hat. This is community version of playbooks and grafana.
-
-This role handles the configuration of Grafana dashboard with Prometheus.
-
-Requirements
-------------
-
-* Ansible 2.2
-
-
-Host Variables
---------------
-
-For configuring new clusters, the following role variables are available.
-
-Each host in either of the above groups must have the following variable
-defined:
-
-| Name                                         | Default value     | Description                                  |
-|----------------------------------------------|-------------------|----------------------------------------------|
-| openshift_grafana_namespace                  | openshift-grafana | Default grafana namespace                    |
-| openshift_grafana_timeout                    | 300               | Default pod wait timeout                     |
-| openshift_grafana_prometheus_namespace       | openshift-metrics | Default prometheus namespace                 |
-| openshift_grafana_prometheus_serviceaccount  | prometheus        | Prometheus service account                   |
-| openshift_grafana_serviceaccount_name        | grafana           | Grafana service account name                 |
-| openshift_grafana_datasource_name            | prometheus        | Default datasource name                      |
-| openshift_grafana_node_exporter              | false             | Do we want to deploy node exported dashboard |
-| openshift_grafana_graph_granularity          | 2m                | Default dashboard granularity                |
-| openshift_grafana_node_selector              | {"region":"infra"}| Default node selector                        |
-| openshift_grafana_serviceaccount_annotations | empty             | Additional service account annotation list   |
-| openshift_grafana_dashboards                 | (check defaults)  | Additional list of dashboards to deploy      |
-| openshift_grafana_hostname                   | grafana           | Grafana route hostname                       |
-| openshift_grafana_service_name               | grafana           | Grafana Service name                         |
-| openshift_grafana_service_port               | 443               | Grafana service port                         |
-| openshift_grafana_service_targetport         | 8443              | Grafana TargetPort to auth proxy             |
-| openshift_grafana_container_port             | 3000              | Grafana container port                       |
-| openshift_grafana_oauth_proxy_memory_requests| nil               | OAuthProxy memory request                    |
-| openshift_grafana_oauth_proxy_cpu_requests   | nil               | OAuthProxy CPY request                       |
-| openshift_grafana_oauth_proxy_memory_limit   | nil               | OAuthProxy Memory Limit                      |
-| openshift_grafana_oauth_proxy_cpu_limit      | nil               | OAuthProxy CPY limit                         |
-| openshift_grafana_storage_type               | emptydir          | Default grafana storage type [emptydir, pvc] |
-| openshift_grafana_pvc_name                   | grafana           | Grafana Storage Claim name                   |
-| openshift_grafana_pvc_access_modes           | ReadWriteOnce     | Grafana Storage Claim mode                   |
-| openshift_grafana_pvc_pv_selector            | {}                | Grafana PV Selector                          |
-| openshift_grafana_sc_name                    | None              | StorageClass name to use                     |
-
-Dependencies
-------------
-
-* openshift_hosted_facts
-* openshift_repos
-* lib_openshift
-
-Example Playbook
-----------------
-
-```
-- name: Configure Grafana
-  hosts: oo_first_master
-  roles:
-  - role: openshift_grafana
-```
-
-License
--------
-
-Apache License, Version 2.0
-
-Author Information
-------------------
-
-Mangirdas Judeikis (mudeiki@redhat.com)
-Eldad Marciano

+ 0 - 60
roles/openshift_grafana/defaults/main.yaml

@@ -1,60 +0,0 @@
----
-# We should probably use something more official here.
-openshift_grafana_image: "docker.io/grafana/grafana:master"
-openshift_grafana_proxy_image: "{{ l2_os_logging_proxy_image }}"
-
-openshift_grafana_state: present
-openshift_grafana_namespace: openshift-grafana
-openshift_grafana_pod_timeout: 300
-openshift_grafana_prometheus_namespace: "openshift-monitoring"
-openshift_grafana_prometheus_serviceaccount: "prometheus-k8s"
-openshift_grafana_prometheus_route: "prometheus-k8s"
-openshift_grafana_serviceaccount_name: grafana
-openshift_grafana_serviceaccount_annotations: []
-l_openshift_grafana_serviceaccount_annotations:
-  - serviceaccounts.openshift.io/oauth-redirectreference.primary='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"grafana"}}'
-openshift_grafana_datasource_name: "prometheus"
-openshift_grafana_node_exporter: false
-openshift_grafana_graph_granularity: "2m"
-openshift_grafana_node_selector: {"node-role.kubernetes.io/infra":"true"}
-openshift_grafana_hostname: grafana-{{openshift_grafana_namespace}}.{{openshift_master_default_subdomain}}
-openshift_grafana_service_name: grafana
-openshift_grafana_service_port: 443
-openshift_grafana_service_targetport: 8443
-openshift_grafana_container_port: 3000
-
-openshift_grafana_storage_type: "emptydir"
-openshift_grafana_pvc_name: grafana
-openshift_grafana_pvc_size: "{{ openshift_grafana_storage_volume_size | default('10Gi') }}"
-openshift_grafana_pvc_access_modes: [ReadWriteOnce]
-openshift_grafana_pvc_pv_selector: "{{ openshift_grafana_storage_labels | default({}) }}"
-openshift_grafana_sc_name: "{{ openshift_grafana_storage_class | default(None) }}"
-
-openshift_grafana_dashboards: []
-l_openshift_grafana_dashboards:
-  - openshift-cluster-monitoring.json
-  - node-exporter-full-dashboard.json
-
-# container resources
-openshift_grafana_cpu_limit: null
-openshift_grafana_memory_limit: null
-openshift_grafana_cpu_requests: null
-openshift_grafana_memory_requests: null
-openshift_grafana_oauth_proxy_cpu_limit: null
-openshift_grafana_oauth_proxy_memory_limit: null
-openshift_grafana_oauth_proxy_cpu_requests: null
-openshift_grafana_oauth_proxy_memory_requests: null
-
-openshift_grafana_datasource_payload:
-  name: grafana_name
-  type: prometheus
-  typeLogoUrl: ''
-  access: proxy
-  url: https://prometheus_url
-  basicAuth: false
-  withCredentials: false
-  jsonData:
-    tlsSkipVerify: true
-    httpHeaderName1: Authorization
-  secureJsonData:
-    httpHeaderValue1: Bearer

File diff suppressed because it is too large
+ 0 - 19478
roles/openshift_grafana/files/dashboards/node-exporter-full-dashboard.json


File diff suppressed because it is too large
+ 0 - 5057
roles/openshift_grafana/files/dashboards/openshift-cluster-monitoring.json


+ 0 - 21
roles/openshift_grafana/meta/main.yml

@@ -1,21 +0,0 @@
----
-galaxy_info:
-  author: OpenShift Development <dev@lists.openshift.redhat.com>
-  description: Deploy OpenShift grafana integration for the cluster
-  company: Red Hat, Inc.
-  license: license (Apache)
-  min_ansible_version: 2.4
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  - name: Fedora
-    versions:
-    - all
-  categories:
-  - openshift
-dependencies:
-- role: lib_openshift
-- role: openshift_facts
-- role: lib_utils
-- role: openshift_logging_defaults

+ 0 - 31
roles/openshift_grafana/tasks/facts.yaml

@@ -1,31 +0,0 @@
----
-- set_fact:
-    grafana_state: "{{ openshift_grafana_state }}"
-    grafana_namespace: "{{ openshift_grafana_namespace }}"
-    grafana_timeout: "{{ openshift_grafana_pod_timeout }}"
-    grafana_prometheus_namespace: "{{ openshift_grafana_prometheus_namespace }}"
-    grafana_prometheus_serviceaccount: "{{ openshift_grafana_prometheus_serviceaccount }}"
-    grafana_prometheus_route: "{{ openshift_grafana_prometheus_route }}"
-    grafana_serviceaccount_name: "{{ openshift_grafana_serviceaccount_name }}"
-    grafana_datasource_name: "{{ openshift_grafana_datasource_name }}"
-    grafana_node_exporter: "{{ openshift_grafana_node_exporter }}"
-    grafana_graph_granularity: "{{ openshift_grafana_graph_granularity }}"
-    grafana_datasource_json: "{{ openshift_grafana_datasource_payload | to_json }}"
-    grafana_node_selector: "{{ openshift_grafana_node_selector }}"
-    grafana_serviceaccount_annotations: "{{ l_openshift_grafana_serviceaccount_annotations + openshift_grafana_serviceaccount_annotations|list }}"
-    grafana_dashboards: "{{ l_openshift_grafana_dashboards + openshift_grafana_dashboards|list }}"
-    grafana_hostname: "{{ openshift_grafana_hostname }}"
-    grafana_service_name: "{{ openshift_grafana_service_name }}"
-    grafana_service_port: "{{ openshift_grafana_service_port }}"
-    grafana_service_targetport: "{{ openshift_grafana_service_targetport }}"
-    grafana_container_port: "{{ openshift_grafana_container_port }}"
-    grafana_oauth_proxy_memory_requests: "{{ openshift_grafana_oauth_proxy_memory_requests }}"
-    grafana_oauth_proxy_cpu_requests: "{{ openshift_grafana_oauth_proxy_cpu_requests }}"
-    grafana_oauth_proxy_memory_limit: "{{ openshift_grafana_oauth_proxy_memory_limit }}"
-    grafana_oauth_proxy_cpu_limit: "{{ openshift_grafana_oauth_proxy_cpu_limit }}"
-    grafana_storage_type: "{{ openshift_grafana_storage_type }}"
-    grafana_pvc_name: "{{ openshift_grafana_pvc_name }}"
-    grafana_pvc_size: "{{ openshift_grafana_pvc_size }}"
-    grafana_pvc_access_modes: "{{ openshift_grafana_pvc_access_modes }}"
-    grafana_pvc_pv_selector: "{{ openshift_grafana_pvc_pv_selector }}"
-    grafana_sc_name: "{{ openshift_grafana_sc_name }}"

+ 0 - 284
roles/openshift_grafana/tasks/install_grafana.yaml

@@ -1,284 +0,0 @@
----
-
-- name: Ensure that Grafana has nodes to run on
-  import_role:
-    name: openshift_control_plane
-    tasks_from: ensure_nodes_matching_selector.yml
-  vars:
-    openshift_master_ensure_nodes_selector: "{{ grafana_node_selector | map_to_pairs }}"
-    openshift_master_ensure_nodes_service: Grafana
-
-- name: Create grafana namespace
-  oc_project:
-    state: present
-    name: "{{ grafana_namespace }}"
-    node_selector: "{{ grafana_node_selector | lib_utils_oo_selector_to_string_list() }}"
-    description: Grafana
-
-- name: create grafana_serviceaccount_name serviceaccount
-  oc_serviceaccount:
-    state: present
-    name: "{{ grafana_serviceaccount_name }}"
-    namespace: "{{ grafana_namespace }}"
-  changed_when: no
-
-# TODO remove this when annotations are supported by oc_serviceaccount
-- name: annotate serviceaccount
-  command: >
-    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
-    annotate --overwrite -n {{ grafana_namespace }}
-    serviceaccount {{ grafana_serviceaccount_name }} {{ item }}
-  with_items:
-    "{{ grafana_serviceaccount_annotations }}"
-
-# create clusterrolebinding for prometheus serviceaccount
-- name: Set cluster-reader permissions for grafana
-  oc_adm_policy_user:
-    state: present
-    namespace: "{{ grafana_namespace }}"
-    resource_kind: cluster-role
-    resource_name: cluster-reader
-    user: "{{ openshift_grafana_serviceaccount_name }}"
-
-- name: create grafana routes
-  oc_route:
-    state: present
-    name: "{{ item.name }}"
-    host: "{{ item.host }}"
-    namespace: "{{ grafana_namespace }}"
-    service_name: "{{ item.name }}"
-    tls_termination: reencrypt
-  with_items:
-  - name: grafana
-    host: "{{ grafana_hostname }}"
-
-- name: create services for grafana
-  oc_service:
-    name: "{{ grafana_service_name }}"
-    namespace: "{{ grafana_namespace }}"
-    labels:
-      name: grafana
-    annotations:
-      prometheus.io/scrape: "true"
-      prometheus.io/scheme: https
-      prometheus.io/port: "{{ grafana_service_targetport | int }}"
-      service.alpha.openshift.io/serving-cert-secret-name: grafana-tls
-    ports:
-    - name: grafana
-      port: "{{ grafana_service_port }}"
-      targetPort: "{{ grafana_service_targetport }}"
-      protocol: TCP
-    selector:
-      app: grafana
-
-- name: Set grafana secrets
-  oc_secret:
-    state: present
-    name: "{{ item }}-proxy"
-    namespace: "{{ grafana_namespace }}"
-    contents:
-    - path: session_secret
-      data: "{{ 43 | lib_utils_oo_random_word }}="
-  with_items:
-  - grafana
-
-# Storage
-- name: create grafana pvc
-  oc_pvc:
-    namespace: "{{ grafana_namespace }}"
-    name: "{{ grafana_pvc_name }}"
-    access_modes: "{{ grafana_pvc_access_modes }}"
-    volume_capacity: "{{ grafana_pvc_size }}"
-    selector: "{{ grafana_pvc_pv_selector }}"
-    storage_class_name: "{{ grafana_sc_name }}"
-  when: grafana_storage_type == 'pvc'
-
-- name: template grafana components
-  template:
-    src: "{{ item }}.j2"
-    dest: "{{ mktemp.stdout }}/{{ item }}"
-  changed_when: no
-  with_items:
-  - "grafana.yml"
-  - "grafana-config.yml"
-
-- name: Set grafana configmap
-  oc_configmap:
-    state: present
-    name: "grafana-config"
-    namespace: "{{ grafana_namespace }}"
-    from_file:
-      defaults.ini: "{{ mktemp.stdout }}/grafana-config.yml"
-
-- name: Set grafana deployment
-  oc_obj:
-    state: present
-    name: "grafana"
-    namespace: "{{ grafana_namespace }}"
-    kind: deployment
-    files:
-    - "{{ mktemp.stdout }}/grafana.yml"
-
-- name: Copy Grafana files
-  copy:
-    src: "dashboards/{{ item }}"
-    dest: "{{ mktemp.stdout }}/{{ item }}"
-  with_items:
-  - "{{ grafana_dashboards }}"
-
-- name: Wait for grafana pod
-  oc_obj:
-    namespace: "{{ grafana_namespace }}"
-    kind: pod
-    state: list
-    selector: "app=grafana"
-  register: grafana_pod
-  until:
-  - "grafana_pod.results.results[0]['items'] | count > 0"
-  # Pod's 'Ready' status must be True
-  - "grafana_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"
-  delay: 10
-  retries: "{{ (grafana_timeout | int / 10) | int }}"
-
-- name: Get the prometheus SA token
-  shell: oc sa get-token {{ grafana_prometheus_serviceaccount }} -n {{ grafana_prometheus_namespace }}
-  register: prometheus_sa_token
-
-- name: Get the grafana SA token
-  shell: oc sa get-token {{ grafana_serviceaccount_name }} -n {{ grafana_namespace }}
-  register: grafana_sa_token
-
-- name: Get prometheus route
-  oc_route:
-    state: list
-    name: "{{ grafana_prometheus_route }}"
-    namespace: "{{ grafana_prometheus_namespace }}"
-  register: prometheus_route
-
-- name: Get grafana route
-  oc_route:
-    state: list
-    name: grafana
-    namespace: "{{ grafana_namespace }}"
-  register: grafana_route
-
-- name: set facts
-  set_fact:
-    payload_data: "{{ grafana_datasource_json | regex_replace('grafana_name', grafana_datasource_name ) | regex_replace('prometheus_url', prometheus_route.results[0].spec.host ) | regex_replace('Bearer',  'Bearer ' + prometheus_sa_token.stdout) }}"
-    grafana_route: "https://{{ grafana_route.results[0].spec.host }}"
-
-- name: Add new datasource to grafana
-  uri:
-    url: "{{ grafana_route }}/api/datasources"
-    user: "{{ grafana_sa_token.stdout }}"
-    validate_certs: false
-    method: POST
-    body: '{{ payload_data }}'
-    body_format: json
-    status_code:
-    - 200
-    - 409
-    headers:
-      Content-Type: "Content-Type: application/json"
-  register: add_ds
-
-- block:
-  - name: Retrieve current grafana datasource
-    uri:
-      url: "{{ grafana_route }}/api/datasources/name/{{ grafana_datasource_name }}"
-      user: "{{ grafana_sa_token.stdout }}"
-      validate_certs: false
-      method: GET
-      status_code:
-      - 200
-    register: grafana_ds
-  - name: Update grafana datasource
-    uri:
-      url: "{{ grafana_route }}/api/datasources/{{ grafana_ds.json['id'] }}"
-      user: "{{ grafana_sa_token.stdout }}"
-      validate_certs: false
-      method: PUT
-      body: '{{ payload_data }}'
-      body_format: json
-      headers:
-        Content-Type: "Content-Type: application/json"
-      status_code:
-      - 200
-    register: update_ds
-  when: add_ds.status == 409
-
-- name: Regex set data source name for openshift dashboard
-  replace:
-    path: "{{ mktemp.stdout }}/openshift-cluster-monitoring.json"
-    regexp: '{{ item.regexp }}'
-    replace: '{{ item.replace }}'
-    backup: yes
-  with_items:
-  - regexp: '##DS_PR##'
-    replace: '{{ grafana_datasource_name }}'
-  - regexp: 'Xs'
-    replace: '{{ grafana_graph_granularity }}'
-
-- name: Regex set data source name for node exporter
-  replace:
-    path: "{{ mktemp.stdout }}/node-exporter-full-dashboard.json"
-    regexp: '{{ item.regexp }}'
-    replace: '{{ item.replace }}'
-    backup: yes
-  with_items:
-  - regexp: '##DS_PR##'
-    replace: '{{ grafana_datasource_name }}'
-  - regexp: 'Xs'
-    replace: '{{ grafana_graph_granularity }}'
-  when: grafana_node_exporter | default(false) | bool == true
-
-- set_fact:
-    cluster_monitoring_dashboard: "{{ mktemp.stdout }}/openshift-cluster-monitoring.json"
-    node_exporter_dashboard: "{{ mktemp.stdout }}/node-exporter-full-dashboard.json"
-
-- name: Slurp dashboard file
-  slurp:
-    src: "{{ cluster_monitoring_dashboard }}"
-  register: slurpfile
-
-- set_fact:
-    dashboard_data: '{{ slurpfile["content"] | b64decode | from_json | combine({ "dashboard": { "overwrite": true } }, recursive=True) | to_json }}'
-
-- name: Add openshift dashboard
-  uri:
-    url: "{{ grafana_route }}/api/dashboards/db"
-    user: "{{ grafana_sa_token.stdout }}"
-    validate_certs: false
-    method: POST
-    body: '{{ dashboard_data }}'
-    body_format: json
-    status_code:
-    - 200
-    - 412
-    headers:
-      Content-Type: "Content-Type: application/json"
-  register: add_ds
-
-- name: Slurp dashboard file
-  slurp:
-    src: "{{ node_exporter_dashboard }}"
-  register: slurpfile
-
-- set_fact:
-    dashboard_data: '{{ slurpfile["content"] | b64decode | from_json | combine({ "dashboard": { "overwrite": true } }, recursive=True) | to_json }}'
-
-- name: Add node exporter dashboard
-  uri:
-    url: "{{ grafana_route }}/api/dashboards/db"
-    user: "{{ grafana_sa_token.stdout }}"
-    validate_certs: false
-    method: POST
-    body: '{{ dashboard_data }}'
-    body_format: json
-    status_code:
-    - 200
-    - 412
-    headers:
-      Content-Type: "Content-Type: application/json"
-  register: add_ds
-  when: grafana_node_exporter | default(false) | bool == true

+ 0 - 20
roles/openshift_grafana/tasks/main.yaml

@@ -1,20 +0,0 @@
----
-- name: Create temp directory for doing work in
-  command: mktemp -d /tmp/openshift-grafana-ansible-XXXXXX
-  register: mktemp
-  changed_when: False
-  check_mode: no
-
-- include_tasks: facts.yaml
-- include_tasks: install_grafana.yaml
-  when: grafana_state == 'present'
-
-- include_tasks: uninstall_grafana.yaml
-  when: grafana_state == 'absent'
-
-- name: Delete temp directory
-  file:
-    name: "{{ mktemp.stdout }}"
-    state: absent
-  changed_when: False
-  check_mode: no

+ 0 - 7
roles/openshift_grafana/tasks/uninstall_grafana.yaml

@@ -1,7 +0,0 @@
----
-
-# remove namespace - This will delete all the objects inside the namespace
-- name: Remove grafana project
-  oc_project:
-    state: absent
-    name: "{{ openshift_grafana_namespace }}"

+ 0 - 387
roles/openshift_grafana/templates/grafana-config.yml.j2

@@ -1,387 +0,0 @@
-##################### Grafana Configuration Defaults #####################
-#
-# Do not modify this file in grafana installs
-#
-# possible values : production, development
-app_mode = production
-# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty
-instance_name = ${HOSTNAME}
-#################################### Paths ###############################
-[paths]
-# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used)
-#
-data = data
-#
-# Directory where grafana can store logs
-#
-logs = data/log
-#
-# Directory where grafana will automatically scan and look for plugins
-#
-plugins = data/plugins
-#################################### Server ##############################
-[server]
-# Protocol (http, https, socket)
-protocol = http
-# The ip address to bind to, empty will bind to all interfaces
-http_addr =
-# The http port  to use
-http_port = 3000
-# The public facing domain name used to access grafana from a browser
-domain = localhost
-# Redirect to correct domain if host header does not match domain
-# Prevents DNS rebinding attacks
-enforce_domain = false
-# The full public facing url
-root_url = %(protocol)s://%(domain)s:%(http_port)s/
-# Log web requests
-router_logging = false
-# the path relative working path
-static_root_path = public
-# enable gzip
-enable_gzip = false
-# https certs & key file
-cert_file = /etc/tls/private/tls.crt
-cert_key = /etc/tls/private/tls.key
-# Unix socket path
-socket = /tmp/grafana.sock
-#################################### Database ############################
-[database]
-# You can configure the database connection by specifying type, host, name, user and password
-# as separate properties or as on string using the url property.
-# Either "mysql", "postgres" or "sqlite3", it's your choice
-type = sqlite3
-host = 127.0.0.1:3306
-name = grafana
-user = root
-# If the password contains # or ; you have to wrap it with triple quotes. Ex """#password;"""
-password =
-# Use either URL or the previous fields to configure the database
-# Example: mysql://user:secret@host:port/database
-url =
-# Max idle conn setting default is 2
-max_idle_conn = 2
-# Max conn setting default is 0 (mean not set)
-max_open_conn =
-# For "postgres", use either "disable", "require" or "verify-full"
-# For "mysql", use either "true", "false", or "skip-verify".
-ssl_mode = disable
-ca_cert_path =
-client_key_path =
-client_cert_path =
-server_cert_name =
-# For "sqlite3" only, path relative to data_path setting
-path = grafana.db
-#################################### Session #############################
-[session]
-# Either "memory", "file", "redis", "mysql", "postgres", "memcache", default is "file"
-provider = file
-# Provider config options
-# memory: not have any config yet
-# file: session dir path, is relative to grafana data_path
-# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana`
-# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable
-# mysql: go-sql-driver/mysql dsn config string, examples:
-#         `user:password@tcp(127.0.0.1:3306)/database_name`
-#         `user:password@unix(/var/run/mysqld/mysqld.sock)/database_name`
-# memcache: 127.0.0.1:11211
-provider_config = sessions
-# Session cookie name
-cookie_name = grafana_sess
-# If you use session in https only, default is false
-cookie_secure = false
-# Session life time, default is 86400
-session_life_time = 86400
-gc_interval_time = 86400
-#################################### Data proxy ###########################
-[dataproxy]
-# This enables data proxy logging, default is false
-logging = false
-#################################### Analytics ###########################
-[analytics]
-# Server reporting, sends usage counters to stats.grafana.org every 24 hours.
-# No ip addresses are being tracked, only simple counters to track
-# running instances, dashboard and error counts. It is very helpful to us.
-# Change this option to false to disable reporting.
-reporting_enabled = true
-# Set to false to disable all checks to https://grafana.com
-# for new versions (grafana itself and plugins), check is used
-# in some UI views to notify that grafana or plugin update exists
-# This option does not cause any auto updates, nor send any information
-# only a GET request to https://grafana.com to get latest versions
-check_for_updates = true
-# Google Analytics universal tracking code, only enabled if you specify an id here
-google_analytics_ua_id =
-# Google Tag Manager ID, only enabled if you specify an id here
-google_tag_manager_id =
-#################################### Security ############################
-[security]
-# default admin user, created on startup
-admin_user = admin
-# default admin password, can be changed before first start of grafana,  or in profile settings
-admin_password = admin
-# used for signing
-secret_key = SW2YcwTIb9zpOOhoPsMm
-# Auto-login remember days
-login_remember_days = 7
-cookie_username = grafana_user
-cookie_remember_name = grafana_remember
-# disable gravatar profile images
-disable_gravatar = false
-# data source proxy whitelist (ip_or_domain:port separated by spaces)
-data_source_proxy_whitelist =
-[snapshots]
-# snapshot sharing options
-external_enabled = true
-external_snapshot_url = https://snapshots-origin.raintank.io
-external_snapshot_name = Publish to snapshot.raintank.io
-# remove expired snapshot
-snapshot_remove_expired = true
-# remove snapshots after 90 days
-snapshot_TTL_days = 90
-#################################### Users ####################################
-[users]
-# disable user signup / registration
-allow_sign_up = true
-# Allow non admin users to create organizations
-allow_org_create = true
-# Set to true to automatically assign new users to the default organization (id 1)
-auto_assign_org = true
-# Default role new users will be automatically assigned (if auto_assign_org above is set to true)
-auto_assign_org_role = Admin
-# Require email validation before sign up completes
-verify_email_enabled = false
-# Background text for the user field on the login page
-login_hint = email or username
-# Default UI theme ("dark" or "light")
-default_theme = dark
-# External user management
-external_manage_link_url =
-external_manage_link_name =
-external_manage_info =
-[auth]
-# Set to true to disable (hide) the login form, useful if you use OAuth
-disable_login_form = true
-# Set to true to disable the signout link in the side menu. useful if you use auth.proxy
-disable_signout_menu = true
-#################################### Anonymous Auth ######################
-[auth.anonymous]
-# enable anonymous access
-enabled = true
-# specify organization name that should be used for unauthenticated users
-org_name = Main Org.
-# specify role for unauthenticated users
-org_role = Admin
-#################################### Github Auth #########################
-[auth.github]
-enabled = false
-allow_sign_up = true
-client_id = some_id
-client_secret = some_secret
-scopes = user:email
-auth_url = https://github.com/login/oauth/authorize
-token_url = https://github.com/login/oauth/access_token
-api_url = https://api.github.com/user
-team_ids =
-allowed_organizations =
-#################################### Google Auth #########################
-[auth.google]
-enabled = false
-allow_sign_up = true
-client_id = some_client_id
-client_secret = some_client_secret
-scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email
-auth_url = https://accounts.google.com/o/oauth2/auth
-token_url = https://accounts.google.com/o/oauth2/token
-api_url = https://www.googleapis.com/oauth2/v1/userinfo
-allowed_domains =
-hosted_domain =
-#################################### Grafana.com Auth ####################
-# legacy key names (so they work in env variables)
-[auth.grafananet]
-enabled = false
-allow_sign_up = true
-client_id = some_id
-client_secret = some_secret
-scopes = user:email
-allowed_organizations =
-[auth.grafana_com]
-enabled = false
-allow_sign_up = true
-client_id = some_id
-client_secret = some_secret
-scopes = user:email
-allowed_organizations =
-#################################### Generic OAuth #######################
-[auth.generic_oauth]
-name = OAuth
-enabled = false
-allow_sign_up = true
-client_id = some_id
-client_secret = some_secret
-scopes = user:email
-auth_url =
-token_url =
-api_url =
-team_ids =
-allowed_organizations =
-#################################### Basic Auth ##########################
-[auth.basic]
-enabled = false
-#################################### Auth Proxy ##########################
-[auth.proxy]
-enabled = true
-header_name = X-WEBAUTH-USER
-header_property = username
-auto_sign_up = true
-ldap_sync_ttl = 60
-whitelist =
-#################################### Auth LDAP ###########################
-[auth.ldap]
-enabled = false
-config_file = /etc/grafana/ldap.toml
-allow_sign_up = true
-#################################### SMTP / Emailing #####################
-[smtp]
-enabled = false
-host = localhost:25
-user =
-# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;"""
-password =
-cert_file =
-key_file =
-skip_verify = false
-from_address = admin@grafana.localhost
-from_name = Grafana
-ehlo_identity =
-[emails]
-welcome_email_on_sign_up = false
-templates_pattern = emails/*.html
-#################################### Logging ##########################
-[log]
-# Either "console", "file", "syslog". Default is console and  file
-# Use space to separate multiple modes, e.g. "console file"
-mode = console file
-# Either "debug", "info", "warn", "error", "critical", default is "info"
-level = error
-# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug
-filters =
-# For "console" mode only
-[log.console]
-level =
-# log line format, valid options are text, console and json
-format = console
-# For "file" mode only
-[log.file]
-level =
-# log line format, valid options are text, console and json
-format = text
-# This enables automated log rotate(switch of following options), default is true
-log_rotate = true
-# Max line number of single file, default is 1000000
-max_lines = 1000000
-# Max size shift of single file, default is 28 means 1 << 28, 256MB
-max_size_shift = 28
-# Segment log daily, default is true
-daily_rotate = true
-# Expired days of log file(delete after max days), default is 7
-max_days = 7
-[log.syslog]
-level =
-# log line format, valid options are text, console and json
-format = text
-# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used.
-network =
-address =
-# Syslog facility. user, daemon and local0 through local7 are valid.
-facility =
-# Syslog tag. By default, the process' argv[0] is used.
-tag =
-#################################### AMQP Event Publisher ################
-[event_publisher]
-enabled = false
-rabbitmq_url = amqp://localhost/
-exchange = grafana_events
-#################################### Dashboard JSON files ################
-[dashboards.json]
-enabled = false
-path = /var/lib/grafana/dashboards
-#################################### Usage Quotas ########################
-[quota]
-enabled = false
-#### set quotas to -1 to make unlimited. ####
-# limit number of users per Org.
-org_user = 10
-# limit number of dashboards per Org.
-org_dashboard = 100
-# limit number of data_sources per Org.
-org_data_source = 10
-# limit number of api_keys per Org.
-org_api_key = 10
-# limit number of orgs a user can create.
-user_org = 10
-# Global limit of users.
-global_user = -1
-# global limit of orgs.
-global_org = -1
-# global limit of dashboards
-global_dashboard = -1
-# global limit of api_keys
-global_api_key = -1
-# global limit on number of logged in users.
-global_session = -1
-#################################### Alerting ############################
-[alerting]
-# Disable alerting engine & UI features
-enabled = true
-# Makes it possible to turn off alert rule execution but alerting UI is visible
-execute_alerts = true
-#################################### Internal Grafana Metrics ############
-# Metrics available at HTTP API Url /api/metrics
-[metrics]
-enabled           = true
-interval_seconds  = 10
-# Send internal Grafana metrics to graphite
-[metrics.graphite]
-# Enable by setting the address setting (ex localhost:2003)
-address =
-prefix = prod.grafana.%(instance_name)s.
-[grafana_net]
-url = https://grafana.com
-[grafana_com]
-url = https://grafana.com
-#################################### Distributed tracing ############
-[tracing.jaeger]
-# jaeger destination (ex localhost:6831)
-address =
-# tag that will always be included in when creating new spans. ex (tag1:value1,tag2:value2)
-always_included_tag =
-# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote
-sampler_type = const
-# jaeger samplerconfig param
-# for "const" sampler, 0 or 1 for always false/true respectively
-# for "probabilistic" sampler, a probability between 0 and 1
-# for "rateLimiting" sampler, the number of spans per second
-# for "remote" sampler, param is the same as for "probabilistic"
-# and indicates the initial sampling rate before the actual one
-# is received from the mothership
-sampler_param = 1
-#################################### External Image Storage ##############
-[external_image_storage]
-# You can choose between (s3, webdav, gcs)
-provider =
-[external_image_storage.s3]
-bucket_url =
-bucket =
-region =
-path =
-access_key =
-secret_key =
-[external_image_storage.webdav]
-url =
-username =
-password =
-public_url =
-[external_image_storage.gcs]
-key_file =
-bucket =

+ 0 - 116
roles/openshift_grafana/templates/grafana.yml.j2

@@ -1,116 +0,0 @@
----
-apiVersion: extensions/v1beta1
-kind: Deployment
-metadata:
-  labels:
-    app: grafana
-  name: grafana
-  namespace: {{ grafana_namespace }}
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: grafana
-  template:
-    metadata:
-      labels:
-        app: grafana
-      name: grafana
-    spec:
-      serviceAccountName: {{ grafana_serviceaccount_name }}
-{% if grafana_node_selector is iterable and grafana_node_selector | length > 0 %}
-      nodeSelector:
-{% for key, value in grafana_node_selector.items() %}
-        {{ key }}: "{{ value }}"
-{% endfor %}
-{% endif %}
-      containers:
-      - name: oauth-proxy
-        image: "{{ openshift_grafana_proxy_image }}"
-        imagePullPolicy: IfNotPresent
-        resources:
-          requests:
-{% if grafana_oauth_proxy_memory_requests is defined and grafana_oauth_proxy_memory_requests is not none %}
-            memory: "{{ grafana_oauth_proxy_memory_requests }}"
-{% endif %}
-{% if grafana_oauth_proxy_cpu_requests is defined and grafana_oauth_proxy_cpu_requests is not none %}
-            cpu: "{{ grafana_oauth_proxy_cpu_requests }}"
-{% endif %}
-          limits:
-{% if grafana_oauth_proxy_memory_limit is defined and grafana_oauth_proxy_memory_limit is not none %}
-            memory: "{{ grafana_oauth_proxy_memory_limit }}"
-{% endif %}
-{% if grafana_oauth_proxy_cpu_limit is defined and grafana_oauth_proxy_cpu_limit is not none %}
-            cpu: "{{ grafana_oauth_proxy_cpu_limit }}"
-{% endif %}
-        ports:
-        - containerPort: {{ grafana_service_port }}
-          name: web
-        args:
-        - -https-address=:{{ grafana_service_targetport }}
-        - -http-address=
-        - -email-domain=*
-        - -client-id=system:serviceaccount:{{ grafana_namespace }}:{{ grafana_serviceaccount_name }}
-        - -upstream=http://localhost:{{ grafana_container_port }}
-        - -provider=openshift
-#       - '-openshift-delegate-urls={"/api/datasources": {"resource": "namespace", "verb": "get", "resourceName": "{{ grafana_namespace }}", "namespace": "{{ grafana_namespace }}"}}'
-        - '-openshift-sar={"namespace": "{{ grafana_namespace }}", "verb": "list", "resource": "services"}'
-        - -tls-cert=/etc/tls/private/tls.crt
-        - -tls-key=/etc/tls/private/tls.key
-        - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
-        - -cookie-secret-file=/etc/proxy/secrets/session_secret
-        - -skip-auth-regex=^/metrics,/api/datasources,/api/dashboards
-        volumeMounts:
-        - mountPath: /etc/tls/private
-          name: grafana-tls-secret
-        - mountPath: /etc/proxy/secrets
-          name: grafana-proxy-secrets
-
-      - name: grafana
-        image: "{{ openshift_grafana_image }}"
-        imagePullPolicy: IfNotPresent
-        resources:
-          requests:
-{% if openshift_grafana_memory_requests is defined and openshift_grafana_memory_requests is not none %}
-            memory: "{{ openshift_grafana_memory_requests }}"
-{% endif %}
-{% if openshift_grafana_cpu_requests is defined and openshift_grafana_cpu_requests is not none %}
-            cpu: "{{ openshift_grafana_cpu_requests }}"
-{% endif %}
-          limits:
-{% if openshift_grafana_memory_limit is defined and openshift_grafana_memory_limit is not none %}
-            memory: "{{ openshift_grafana_memory_limit }}"
-{% endif %}
-{% if openshift_grafana_cpu_limit is defined and openshift_grafana_cpu_limit is not none %}
-            cpu: "{{ openshift_grafana_cpu_limit }}"
-{% endif %}
-        ports:
-        - name: grafana-http
-          containerPort: {{ grafana_container_port }}
-        volumeMounts:
-        - mountPath: "/root/go/src/github.com/grafana/grafana/data"
-          name: grafana-data
-        - mountPath: "/usr/share/grafana/conf"
-          name: grafana-config
-        - mountPath: /etc/tls/private
-          name: grafana-tls-secret
-        - mountPath: /etc/proxy/secrets
-          name: grafana-proxy-secrets
-
-      volumes:
-      - name: grafana-config
-        configMap:
-          name: grafana-config
-      - name: grafana-proxy-secrets
-        secret:
-          secretName: grafana-proxy
-      - name: grafana-tls-secret
-        secret:
-          secretName: grafana-tls
-      - name: grafana-data
-{% if grafana_storage_type == 'pvc' %}
-        persistentVolumeClaim:
-          claimName: {{ grafana_pvc_name }}
-{% else %}
-        emptydir: {}
-{% endif %}

+ 0 - 10
roles/openshift_prometheus/OWNERS

@@ -1,10 +0,0 @@
-# approval == this is a good idea /approve
-approvers:
-  - pgier
-  - simonpasquier
-  - zgalor
-# review == this code is good /lgtm
-reviewers:
-  - pgier
-  - simonpasquier
-  - zgalor

+ 0 - 109
roles/openshift_prometheus/README.md

@@ -1,109 +0,0 @@
-OpenShift Prometheus
-====================
-
-OpenShift Prometheus Installation
-
-Requirements
-------------
-
-
-Role Variables
---------------
-
-For default values, see [`defaults/main.yaml`](defaults/main.yaml).
-
-- `openshift_prometheus_state`: present - install/update. absent - uninstall.
-
-- `openshift_prometheus_node_exporter_install`: true (default) or false
-
-- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be
-  deployed.
-
-- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on.
-
-- `openshift_prometheus_args`: Modify or add arguments for prometheus application
-
-- `openshift_prometheus_hostname`: specify the hostname for the route to prometheus `prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}`
-
-- `openshift_prometheus_alerts_hostname`: specify the hostname for the route to prometheus-alerts `prometheus_alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}`
-
-e.g
-```
-openshift_prometheus_args=['--storage.tsdb.retention=6h', '--query.timeout=2m']
-```
-
-## PVC related variables
-Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
-```
-openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir)
-openshift_prometheus_<COMPONENT>_storage_class: <VALUE>
-openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
-```
-e.g
-```
-openshift_prometheus_storage_type: pvc
-openshift_prometheus_storage_class: glusterfs-storage
-openshift_prometheus_alertmanager_pvc_name: alertmanager
-openshift_prometheus_alertbuffer_pvc_size: 10G
-openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
-```
-
-NOTE: Setting `openshift_prometheus_<COMPONENT>_storage_labels` overrides `openshift_prometheus_<COMPONENT>_pvc_pv_selector`
-
-
-## Additional Alert Rules file variable
-An external file with alert rules can be added by setting path to additional rules variable:
-```
-openshift_prometheus_additional_rules_file: <PATH>
-```
-
-File content should be in prometheus alert rules format.
-Following example sets rule to fire an alert when one of the cluster nodes is down:
-
-```
-groups:
-- name: example-rules
-  interval: 30s # defaults to global interval
-  rules:
-  - alert: Node Down
-    expr: up{job="kubernetes-nodes"} == 0
-    annotations:
-      miqTarget: "ContainerNode"
-      severity: "HIGH"
-      message: "{{ '{{' }}{{ '$labels.instance' }}{{ '}}' }} is down"
-```
-
-
-## Additional variables to control resource limits
-Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can specify a cpu and memory limits and requests by setting
-the corresponding role variable:
-```
-openshift_prometheus_<COMPONENT>_(memory|cpu)_(limit|requests): <VALUE>
-```
-e.g
-```
-openshift_prometheus_alertmanager_memory_limit: 1Gi
-openshift_prometheus_oath_proxy_cpu_request: 100
-openshift_prometheus_node_exporter_cpu_limit: 200m
-```
-
-Dependencies
-------------
-
-openshift_facts
-
-
-Example Playbook
-----------------
-
-```
-- name: Configure openshift-prometheus
-  hosts: oo_first_master
-  roles:
-  - role: openshift_prometheus
-```
-
-License
--------
-
-Apache License, Version 2.0

+ 0 - 111
roles/openshift_prometheus/defaults/main.yaml

@@ -1,111 +0,0 @@
----
-# defaults file for openshift_prometheus
-openshift_prometheus_state: present
-openshift_prometheus_node_exporter_install: true
-
-openshift_prometheus_namespace: openshift-metrics
-
-# Need to standardise these tags
-l_openshift_prometheus_version_dict:
-  origin:
-    prometheus: 'v2.3.1'
-    alert_manager: 'v0.15.0'
-    alert_buffer: 'v0.0.2'
-    node_exporter: 'v0.16.0'
-  openshift-enterprise:
-    prometheus: "{{ openshift_image_tag }}"
-    alert_manager: "{{ openshift_image_tag }}"
-    alert_buffer: "{{ openshift_image_tag }}"
-    node_exporter: "{{ openshift_image_tag }}"
-
-l_openshift_prometheus_alertmanager_version: "{{ l_openshift_prometheus_version_dict[openshift_deployment_type]['alert_manager'] }}"
-l_openshift_prometheus_alertmanager_image: "{{ l_os_non_standard_reg_url | regex_replace('${version}' | regex_escape, l_openshift_prometheus_alertmanager_version) }}"
-openshift_prometheus_alertmanager_image: "{{ l_openshift_prometheus_alertmanager_image | regex_replace(l_os_logging_non_standard_reg_search | regex_escape, 'prometheus-alertmanager') }}"
-
-l_openshift_prometheus_alertbuffer_version: "{{ l_openshift_prometheus_version_dict[openshift_deployment_type]['alert_buffer'] }}"
-l_openshift_prometheus_alertbuffer_image: "{{ l_os_non_standard_reg_url | regex_replace('${version}' | regex_escape, l_openshift_prometheus_alertbuffer_version) }}"
-openshift_prometheus_alertbuffer_image: "{{ l_openshift_prometheus_alertbuffer_image | regex_replace(l_os_logging_non_standard_reg_search | regex_escape, 'prometheus-alert-buffer') }}"
-
-l_openshift_prometheus_node_exporter_version: "{{ l_openshift_prometheus_version_dict[openshift_deployment_type]['node_exporter'] }}"
-l_openshift_prometheus_node_exporter_image: "{{ l_os_non_standard_reg_url | regex_replace('${version}' | regex_escape, l_openshift_prometheus_node_exporter_version) }}"
-openshift_prometheus_node_exporter_image: "{{ l_openshift_prometheus_node_exporter_image | regex_replace(l_os_logging_non_standard_reg_search | regex_escape, 'prometheus-node-exporter') }}"
-
-l_openshift_prometheus_version: "{{ l_openshift_prometheus_version_dict[openshift_deployment_type]['prometheus'] }}"
-l_openshift_prometheus_image: "{{ l_os_non_standard_reg_url | regex_replace('${version}' | regex_escape, l_openshift_prometheus_version) }}"
-openshift_prometheus_image: "{{ l_openshift_prometheus_image | regex_replace(l_os_logging_non_standard_reg_search | regex_escape, 'prometheus') }}"
-
-openshift_prometheus_proxy_image: "{{ l2_os_logging_proxy_image }}"
-
-# defaults hosts for routes
-openshift_prometheus_hostname: prometheus-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
-openshift_prometheus_alerts_hostname: alerts-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
-openshift_prometheus_alertmanager_hostname: alertmanager-{{openshift_prometheus_namespace}}.{{openshift_master_default_subdomain}}
-
-openshift_prometheus_node_selector: "{{ openshift_hosted_infra_selector | default('node-role.kubernetes.io/infra=true') | map_from_pairs }}"
-
-openshift_prometheus_service_port: 443
-openshift_prometheus_service_targetport: 8443
-openshift_prometheus_service_name: prometheus
-openshift_prometheus_reader_serviceaccount_name: prometheus-reader
-openshift_prometheus_alerts_service_targetport: 9443
-openshift_prometheus_alerts_service_name: alerts
-openshift_prometheus_alertmanager_service_targetport: 10443
-openshift_prometheus_alertmanager_service_name: alertmanager
-openshift_prometheus_serviceaccount_annotations: []
-l_openshift_prometheus_serviceaccount_annotations:
-  - serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'
-  - serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}'
-  - serviceaccounts.openshift.io/oauth-redirectreference.alertmanager='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alertmanager"}}'
-
-# additional prometheus rules file
-openshift_prometheus_additional_rules_file: null
-
-#prometheus application arguments
-openshift_prometheus_args: ['--storage.tsdb.retention=3d']
-
-# storage
-# One of ['emptydir', 'pvc']
-openshift_prometheus_storage_type: "emptydir"
-openshift_prometheus_pvc_name: prometheus
-openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
-openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
-openshift_prometheus_sc_name: "{{ openshift_prometheus_storage_class | default(None) }}"
-
-# One of ['emptydir', 'pvc']
-openshift_prometheus_alertmanager_storage_type: "emptydir"
-openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
-openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
-openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
-openshift_prometheus_alertmanager_sc_name: "{{ openshift_prometheus_alertmanager_storage_class | default(None) }}"
-
-# One of ['emptydir', 'pvc']
-openshift_prometheus_alertbuffer_storage_type: "emptydir"
-openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
-openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
-openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
-openshift_prometheus_alertbuffer_sc_name: "{{ openshift_prometheus_alertbuffer_storage_class | default(None) }}"
-
-# container resources
-openshift_prometheus_cpu_limit: null
-openshift_prometheus_memory_limit: null
-openshift_prometheus_cpu_requests: null
-openshift_prometheus_memory_requests: null
-openshift_prometheus_alertmanager_cpu_limit: null
-openshift_prometheus_alertmanager_memory_limit: null
-openshift_prometheus_alertmanager_cpu_requests: null
-openshift_prometheus_alertmanager_memory_requests: null
-openshift_prometheus_alertbuffer_cpu_limit: null
-openshift_prometheus_alertbuffer_memory_limit: null
-openshift_prometheus_alertbuffer_cpu_requests: null
-openshift_prometheus_alertbuffer_memory_requests: null
-openshift_prometheus_oauth_proxy_cpu_limit: null
-openshift_prometheus_oauth_proxy_memory_limit: null
-openshift_prometheus_oauth_proxy_cpu_requests: null
-openshift_prometheus_oauth_proxy_memory_requests: null
-openshift_prometheus_node_exporter_cpu_limit: 200m
-openshift_prometheus_node_exporter_memory_limit: 50Mi
-openshift_prometheus_node_exporter_cpu_requests: 100m
-openshift_prometheus_node_exporter_memory_requests: 30Mi

+ 0 - 94
roles/openshift_prometheus/files/node-exporter-template.yaml

@@ -1,94 +0,0 @@
-apiVersion: template.openshift.io/v1
-kind: Template
-metadata:
-  name: prometheus-node-exporter
-  annotations:
-    openshift.io/display-name: Prometheus Node Exporter
-    description: Prometheus exporter for node host metrics
-    iconClass: fa fa-cogs
-    tags: monitoring,prometheus
-    openshift.io/support-url: https://access.redhat.com
-    openshift.io/provider-display-name: Red Hat, Inc.
-parameters:
-- name: IMAGE
-  value: openshift/prometheus-node-exporter:v0.16.0
-- name: MEMORY_REQUESTS
-  value: 30Mi
-- name: CPU_REQUESTS
-  value: 100m
-- name: MEMORY_LIMITS
-  value: 50Mi
-- name: CPU_LIMITS
-  value: 200m
-objects:
-- apiVersion: v1
-  kind: ServiceAccount
-  metadata:
-    name: prometheus-node-exporter
-- apiVersion: v1
-  kind: Service
-  metadata:
-    annotations:
-      prometheus.io/scrape: "true"
-    labels:
-      app: prometheus-node-exporter
-    name: prometheus-node-exporter
-  spec:
-    clusterIP: None
-    ports:
-    - name: scrape
-      port: 9102
-      protocol: TCP
-      targetPort: 9102
-    selector:
-      app: prometheus-node-exporter
-- apiVersion: extensions/v1beta1
-  kind: DaemonSet
-  metadata:
-    name: prometheus-node-exporter
-    labels:
-      app: prometheus-node-exporter
-      role: monitoring
-  spec:
-    updateStrategy:
-      type: RollingUpdate
-    template:
-      metadata:
-        labels:
-          app: prometheus-node-exporter
-          role: monitoring
-        name: prometheus-exporter
-      spec:
-        serviceAccountName: prometheus-node-exporter
-        hostNetwork: true
-        hostPID: true
-        containers:
-        - image: ${IMAGE}
-          name: node-exporter
-          args:
-          - --no-collector.wifi
-          - --web.listen-address=:9102
-          ports:
-          - containerPort: 9102
-            name: scrape
-          resources:
-            requests:
-              memory: ${MEMORY_REQUESTS}
-              cpu: ${CPU_REQUESTS}
-            limits:
-              memory: ${MEMORY_LIMITS}
-              cpu: ${CPU_LIMITS}
-          volumeMounts:
-          - name: proc
-            readOnly:  true
-            mountPath: /host/proc
-          - name: sys
-            readOnly: true
-            mountPath: /host/sys
-        volumes:
-        - name: proc
-          hostPath:
-            path: /proc
-        - name: sys
-          hostPath:
-            path: /sys

+ 0 - 21
roles/openshift_prometheus/meta/main.yaml

@@ -1,21 +0,0 @@
----
-galaxy_info:
-  author: OpenShift Development <dev@lists.openshift.redhat.com>
-  description: Deploy OpenShift prometheus integration for the cluster
-  company: Red Hat, Inc.
-  license: license (Apache)
-  min_ansible_version: 2.2
-  platforms:
-  - name: EL
-    versions:
-    - 7
-  - name: Fedora
-    versions:
-    - all
-  categories:
-  - openshift
-dependencies:
-- role: lib_openshift
-- role: openshift_facts
-- role: lib_utils
-- role: openshift_logging_defaults

+ 0 - 10
roles/openshift_prometheus/tasks/facts.yaml

@@ -1,10 +0,0 @@
----
-# The kubernetes version impacts the prometheus scraping endpoint
-# so gathering it before constructing the configmap
-- name: get oc version
-  oc_version:
-  register: oc_version
-
-- set_fact:
-    kubernetes_version: "{{ '%.2f' | format(oc_version.results.kubernetes_short|float) }}"
-    openshift_prometheus_serviceaccount_annotations: "{{ l_openshift_prometheus_serviceaccount_annotations + openshift_prometheus_serviceaccount_annotations|list }}"

+ 0 - 55
roles/openshift_prometheus/tasks/install_node_exporter.yaml

@@ -1,55 +0,0 @@
----
-# set facts
-- include_tasks: facts.yaml
-
-# namespace
-- name: Add prometheus project
-  oc_project:
-    state: present
-    name: "{{ openshift_prometheus_namespace }}"
-    node_selector: ""
-    description: Prometheus
-
-- name: Make temp directory for node exporter template
-  command: mktemp -d /tmp/prometheus-ansible-XXXXXX
-  register: mktemp
-  changed_when: False
-
-- name: Copy admin client config
-  command: >
-    cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
-  changed_when: false
-
-# create clusterrolebinding for prometheus-node-exporter serviceaccount
-- name: Set hostaccess SCC for prometheus-node-exporter
-  oc_adm_policy_user:
-    state: present
-    namespace: "{{ openshift_prometheus_namespace }}"
-    resource_kind: scc
-    resource_name: hostaccess
-    user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus-node-exporter"
-
-- name: Copy node exporter templates to temp directory
-  copy:
-    src: "{{ item }}"
-    dest: "{{ mktemp.stdout }}/{{ item }}"
-  with_items:
-    - "{{ __node_exporter_template_file }}"
-
-- name: Apply the node exporter template file
-  shell: >
-    {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __node_exporter_template_file }}"
-    --param IMAGE="{{ openshift_prometheus_node_exporter_image }}"
-    --param MEMORY_REQUESTS="{{ openshift_prometheus_node_exporter_memory_requests }}"
-    --param CPU_REQUESTS="{{ openshift_prometheus_node_exporter_cpu_requests }}"
-    --param MEMORY_LIMITS="{{ openshift_prometheus_node_exporter_memory_limit }}"
-    --param CPU_LIMITS="{{ openshift_prometheus_node_exporter_cpu_limit }}"
-    --config={{ mktemp.stdout }}/admin.kubeconfig
-    -n "{{ openshift_prometheus_namespace }}"
-    | {{ openshift_client_binary }} apply --config={{ mktemp.stdout }}/admin.kubeconfig -f - -n "{{ openshift_prometheus_namespace }}"
-
-- name: Remove temp directory
-  file:
-    state: absent
-    name: "{{ mktemp.stdout }}"
-  changed_when: False

+ 0 - 271
roles/openshift_prometheus/tasks/install_prometheus.yaml

@@ -1,271 +0,0 @@
----
-# set facts
-- include_tasks: facts.yaml
-
-- name: Ensure that Prometheus has nodes to run on
-  import_role:
-    name: openshift_control_plane
-    tasks_from: ensure_nodes_matching_selector.yml
-  vars:
-    openshift_master_ensure_nodes_selector: "{{ openshift_prometheus_node_selector | map_to_pairs }}"
-    openshift_master_ensure_nodes_service: Prometheus
-
-# namespace
-- name: Add prometheus project
-  oc_project:
-    state: present
-    name: "{{ openshift_prometheus_namespace }}"
-    node_selector: ""
-    description: Prometheus
-
-# secrets
-- name: Set alert, alertmanager and prometheus secrets
-  oc_secret:
-    state: present
-    name: "{{ item }}-proxy"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    contents:
-    - path: session_secret
-      data: "{{ 43 | lib_utils_oo_random_word }}="
-  with_items:
-  - prometheus
-  - alerts
-  - alertmanager
-
-# serviceaccount
-- name: create prometheus serviceaccount
-  oc_serviceaccount:
-    state: present
-    name: "{{ openshift_prometheus_service_name }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-  changed_when: no
-
-# serviceaccount reader
-- name: create openshift_prometheus_reader_serviceaccount_name serviceaccount
-  oc_serviceaccount:
-    state: present
-    name: "{{ openshift_prometheus_reader_serviceaccount_name }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-  changed_when: no
-
-# TODO remove this when annotations are supported by oc_serviceaccount
-- name: annotate serviceaccount
-  command: >
-    {{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig annotate --overwrite -n {{ openshift_prometheus_namespace }}
-    serviceaccount {{ openshift_prometheus_service_name }} {{ item }}
-  with_items:
-    "{{ openshift_prometheus_serviceaccount_annotations }}"
-
-# add required permissions to prometheus for scraping router metrics
-- name: Create router-metrics cluster role
-  oc_clusterrole:
-    state: present
-    name: router-metrics
-    rules:
-    - apiGroups: ["route.openshift.io"]
-      resources: ["routers/metrics"]
-      verbs: ["get"]
-
-# create clusterrolebinding for prometheus serviceaccount
-- name: Set clusterrole permissions for prometheus
-  oc_adm_policy_user:
-    state: present
-    namespace: "{{ openshift_prometheus_namespace }}"
-    resource_kind: cluster-role
-    resource_name: "{{ item }}"
-    user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:{{ openshift_prometheus_service_name }}"
-  with_items:
-  - cluster-reader
-  - router-metrics
-
-# create view role for prometheus-reader serviceaccount
-- name: Set view permissions for prometheus reader
-  oc_adm_policy_user:
-    state: present
-    namespace: "{{ openshift_prometheus_namespace }}"
-    resource_kind: cluster-role
-    resource_name: view
-    user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:{{ openshift_prometheus_reader_serviceaccount_name }}"
-
-
-- name: create services for prometheus
-  oc_service:
-    name: "{{ openshift_prometheus_service_name }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    labels:
-      name: prometheus
-    annotations:
-      prometheus.io/scrape: 'true'
-      prometheus.io/scheme: https
-      service.alpha.openshift.io/serving-cert-secret-name: prometheus-tls
-    ports:
-    - name: prometheus
-      port: "{{ openshift_prometheus_service_port }}"
-      targetPort: "{{ openshift_prometheus_service_targetport }}"
-      protocol: TCP
-    selector:
-      app: prometheus
-
-- name: create services for alert buffer
-  oc_service:
-    name: "{{ openshift_prometheus_alerts_service_name }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    labels:
-      name: prometheus
-    annotations:
-      service.alpha.openshift.io/serving-cert-secret-name: alerts-tls
-    ports:
-    - name: prometheus
-      port: "{{ openshift_prometheus_service_port }}"
-      targetPort: "{{ openshift_prometheus_alerts_service_targetport }}"
-      protocol: TCP
-    selector:
-      app: prometheus
-
-- name: create services for alertmanager
-  oc_service:
-    name: "{{ openshift_prometheus_alertmanager_service_name }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    labels:
-      name: prometheus
-    annotations:
-      service.alpha.openshift.io/serving-cert-secret-name: alertmanager-tls
-    ports:
-    - name: prometheus
-      port: "{{ openshift_prometheus_service_port }}"
-      targetPort: "{{ openshift_prometheus_alertmanager_service_targetport }}"
-      protocol: TCP
-    selector:
-      app: prometheus
-
-# create prometheus and alerts routes
-# TODO: oc_route module should support insecureEdgeTerminationPolicy: Redirect
-- name: create prometheus and alerts routes
-  oc_route:
-    state: present
-    name: "{{ item.name }}"
-    host: "{{ item.host }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    service_name: "{{ item.name }}"
-    tls_termination: reencrypt
-  with_items:
-  - name: prometheus
-    host: "{{ openshift_prometheus_hostname }}"
-  - name: alerts
-    host: "{{ openshift_prometheus_alerts_hostname }}"
-  - name: alertmanager
-    host: "{{ openshift_prometheus_alertmanager_hostname }}"
-
-# Storage
-- name: create prometheus pvc
-  oc_pvc:
-    namespace: "{{ openshift_prometheus_namespace }}"
-    name: "{{ openshift_prometheus_pvc_name }}"
-    access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
-    volume_capacity: "{{ openshift_prometheus_pvc_size }}"
-    selector: "{{ openshift_prometheus_pvc_pv_selector }}"
-    storage_class_name: "{{ openshift_prometheus_sc_name }}"
-  when: openshift_prometheus_storage_type == 'pvc'
-
-- name: create alertmanager pvc
-  oc_pvc:
-    namespace: "{{ openshift_prometheus_namespace }}"
-    name: "{{ openshift_prometheus_alertmanager_pvc_name }}"
-    access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
-    volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
-    selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
-    storage_class_name: "{{ openshift_prometheus_alertmanager_sc_name }}"
-  when: openshift_prometheus_alertmanager_storage_type == 'pvc'
-
-- name: create alertbuffer pvc
-  oc_pvc:
-    namespace: "{{ openshift_prometheus_namespace }}"
-    name: "{{ openshift_prometheus_alertbuffer_pvc_name }}"
-    access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
-    volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
-    selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
-    storage_class_name: "{{ openshift_prometheus_alertbuffer_sc_name }}"
-  when: openshift_prometheus_alertbuffer_storage_type == 'pvc'
-
-# prometheus configmap
-# Copy the additional rules file if it is defined
-- name: Copy additional rules file to host
-  copy:
-    src: "{{ openshift_prometheus_additional_rules_file }}"
-    dest: "{{ tempdir }}/prometheus.additional.rules"
-  when:
-  - openshift_prometheus_additional_rules_file is defined
-  - openshift_prometheus_additional_rules_file is not none
-  - openshift_prometheus_additional_rules_file | trim | length > 0
-
-- stat:
-    path: "{{ tempdir }}/prometheus.additional.rules"
-    get_checksum: false
-    get_attributes: false
-    get_mime: false
-  register: additional_rules_stat
-
-- template:
-    src: prometheus.yml.j2
-    dest: "{{ tempdir }}/prometheus.yml"
-  changed_when: no
-
-- template:
-    src: prometheus.rules.j2
-    dest: "{{ tempdir }}/prometheus.rules"
-  changed_when: no
-
-# In prometheus configmap create "additional.rules" section if file exists
-- name: Set prometheus configmap
-  oc_configmap:
-    state: present
-    name: "prometheus"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    from_file:
-      prometheus.rules: "{{ tempdir }}/prometheus.rules"
-      prometheus.additional.rules: "{{ tempdir }}/prometheus.additional.rules"
-      prometheus.yml: "{{ tempdir }}/prometheus.yml"
-  when: additional_rules_stat.stat.exists == True
-
-- name: Set prometheus configmap
-  oc_configmap:
-    state: present
-    name: "prometheus"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    from_file:
-      prometheus.rules: "{{ tempdir }}/prometheus.rules"
-      prometheus.yml: "{{ tempdir }}/prometheus.yml"
-  when: additional_rules_stat.stat.exists == False
-
-# alertmanager configmap
-- template:
-    src: alertmanager.yml.j2
-    dest: "{{ tempdir }}/alertmanager.yml"
-  changed_when: no
-
-- name: Set alertmanager configmap
-  oc_configmap:
-    state: present
-    name: "alertmanager"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    from_file:
-      alertmanager.yml: "{{ tempdir }}/alertmanager.yml"
-
-# create prometheus stateful set
-- name: Set prometheus template
-  template:
-    src: prometheus.j2
-    dest: "{{ tempdir }}/templates/prometheus.yaml"
-  vars:
-    namespace: "{{ openshift_prometheus_namespace }}"
-#    prom_replicas: "{{ openshift_prometheus_replicas }}"
-
-- name: Set prometheus stateful set
-  oc_obj:
-    state: present
-    name: "prometheus"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    kind: statefulset
-    files:
-    - "{{ tempdir }}/templates/prometheus.yaml"
-    delete_after: true

+ 0 - 33
roles/openshift_prometheus/tasks/main.yaml

@@ -1,33 +0,0 @@
----
-
-- name: Create temp directory for doing work in on target
-  command: mktemp -td openshift-prometheus-ansible-XXXXXX
-  register: mktemp
-  changed_when: False
-
-- set_fact:
-    tempdir: "{{ mktemp.stdout }}"
-
-- name: Create templates subdirectory
-  file:
-    state: directory
-    path: "{{ tempdir }}/{{ item }}"
-    mode: 0755
-  changed_when: False
-  with_items:
-    - templates
-
-- include_tasks: install_prometheus.yaml
-  when: openshift_prometheus_state == 'present'
-
-- include_tasks: uninstall_prometheus.yaml
-  when: openshift_prometheus_state == 'absent'
-
-- include_tasks: install_node_exporter.yaml
-  when: openshift_prometheus_node_exporter_install | default(true) | bool and openshift_prometheus_state == 'present'
-
-- name: Delete temp directory
-  file:
-    name: "{{ tempdir }}"
-    state: absent
-  changed_when: False

+ 0 - 107
roles/openshift_prometheus/tasks/uninstall_prometheus.yaml

@@ -1,107 +0,0 @@
----
-
-- name: Remove node_exporter daemon set
-  oc_obj:
-    state: absent
-    name: "prometheus-node-exporter"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    kind: daemonset
-
-- name: Remove node_exporter services
-  oc_service:
-    state: absent
-    name: "prometheus-node-exporter"
-    namespace: "{{ openshift_prometheus_namespace }}"
-
-- name: Remove prometheus stateful set
-  oc_obj:
-    state: absent
-    name: "prometheus"
-    namespace: "{{ openshift_prometheus_namespace }}"
-    kind: statefulset
-
-- name: Remove prometheus configmaps
-  oc_configmap:
-    state: absent
-    name: "{{ item }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-  with_items:
-    - "prometheus"
-    - "alertmanager"
-
-- name: Remove prometheus PVCs
-  oc_pvc:
-    namespace: "{{ openshift_prometheus_namespace }}"
-    name: "{{ item }}"
-    state: absent
-  with_items:
-    - "{{ openshift_prometheus_pvc_name }}"
-    - "{{ openshift_prometheus_alertmanager_pvc_name }}"
-    - "{{ openshift_prometheus_alertbuffer_pvc_name }}"
-
-- name: Remove prometheus and alerts routes
-  oc_route:
-    state: absent
-    name: "{{ item.name }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-  with_items:
-    - name: prometheus
-    - name: alerts
-    - name: alertmanager
-
-- name: Remove services for prometheus
-  oc_service:
-    state: absent
-    name: "{{ item }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-  with_items:
-    - "{{ openshift_prometheus_service_name }}"
-    - "{{ openshift_prometheus_alertmanager_service_name }}"
-    - "{{ openshift_prometheus_alerts_service_name }}"
-
-- name: Remove prometheus secrets
-  oc_secret:
-    state: absent
-    name: "{{ item }}-proxy"
-    namespace: "{{ openshift_prometheus_namespace }}"
-  with_items:
-    - prometheus
-    - alerts
-    - alertmanager
-
-- name: Remove prometheus serviceaccounts
-  oc_serviceaccount:
-    state: absent
-    name: "{{ item }}"
-    namespace: "{{ openshift_prometheus_namespace }}"
-  with_items:
-    - "{{ openshift_prometheus_service_name }}"
-    - "{{ openshift_prometheus_reader_serviceaccount_name }}"
-    - prometheus-node-exporter
-
-# Check for any remaining objects in the namespace
-- name: Get all objects in prometheus namespace
-  oc_obj:
-    state: list
-    kind: all
-    namespace: "{{ openshift_prometheus_namespace }}"
-  register: __prometheus_namespace_objects
-
-- name: Set prometheus objects facts
-  set_fact:
-    num_prometheus_objects: "{{ __prometheus_namespace_objects['results']['results'][0]['items'] | length }}"
-
-# If there are no remaining objects then it should be safe to delete
-# the clusterrole and the project/namespace
-- name: delete router-metrics cluster role
-  oc_obj:
-    state: absent
-    kind: clusterrole
-    name: router-metrics
-  when: num_prometheus_objects | int == 0
-
-- name: Remove prometheus project
-  oc_project:
-    state: absent
-    name: "{{ openshift_prometheus_namespace }}"
-  when: num_prometheus_objects | int == 0

+ 0 - 20
roles/openshift_prometheus/templates/alertmanager.yml.j2

@@ -1,20 +0,0 @@
-global:
-
-# The root route on which each incoming alert enters.
-route:
-  # default route if none match
-  receiver: alert-buffer-wh
-
-  # The labels by which incoming alerts are grouped together. For example,
-  # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
-  # be batched into a single group.
-  # TODO:
-  group_by: []
-
-  # All the above attributes are inherited by all child routes and can
-  # overwritten on each.
-
-receivers:
-- name: alert-buffer-wh
-  webhook_configs:
-  - url: http://localhost:9099/topics/alerts

+ 0 - 309
roles/openshift_prometheus/templates/prometheus.j2

@@ -1,309 +0,0 @@
-apiVersion: apps/v1beta1
-kind: StatefulSet
-metadata:
-  name: prometheus
-  namespace: {{ namespace }}
-  labels:
-    app: prometheus
-spec:
-  updateStrategy:
-    type: RollingUpdate
-  podManagementPolicy: Parallel
-  selector:
-    provider: openshift
-    matchLabels:
-      app: prometheus
-  template:
-    metadata:
-      name: prometheus
-      labels:
-        app: prometheus
-    spec:
-      serviceAccountName: "{{ openshift_prometheus_service_name }}"
-{% if openshift_prometheus_node_selector is iterable and openshift_prometheus_node_selector | length > 0 %}
-      nodeSelector:
-{% for key, value in openshift_prometheus_node_selector.items() %}
-        {{ key }}: "{{ value }}"
-{% endfor %}
-{% endif %}
-      containers:
-      # Deploy Prometheus behind an oauth proxy
-      - name: prom-proxy
-        image: "{{ openshift_prometheus_proxy_image }}"
-        imagePullPolicy: IfNotPresent
-        resources:
-          requests:
-{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
-            memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}"
-{% endif %}
-{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
-            cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}"
-{% endif %}
-          limits:
-{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
-            memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}"
-{% endif %}
-{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
-            cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
-{% endif %}
-        ports:
-        - containerPort: {{ openshift_prometheus_service_targetport }}
-          name: web
-        args:
-        - -provider=openshift
-        - -https-address=:{{ openshift_prometheus_service_targetport }}
-        - -http-address=
-        - -email-domain=*
-        - -upstream=http://localhost:9090
-        - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }}
-        - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
-        - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
-        - -tls-cert=/etc/tls/private/tls.crt
-        - -tls-key=/etc/tls/private/tls.key
-        - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
-        - -cookie-secret-file=/etc/proxy/secrets/session_secret
-        - -openshift-ca=/etc/pki/tls/cert.pem
-        - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-        - -skip-auth-regex=^/metrics
-        volumeMounts:
-        - mountPath: /etc/tls/private
-          name: prometheus-tls-secret
-        - mountPath: /etc/proxy/secrets
-          name: prometheus-proxy-secret
-        - mountPath: /prometheus
-          name: prometheus-data
-
-      - name: prometheus
-        args:
-{% for arg in openshift_prometheus_args %}
-        - {{ arg }}
-{% endfor %}
-        - --config.file=/etc/prometheus/prometheus.yml
-        - --web.listen-address=localhost:9090
-        - --web.external-url=https://{{ openshift_prometheus_hostname }}
-        image: "{{ openshift_prometheus_image }}"
-        imagePullPolicy: IfNotPresent
-        livenessProbe:
-          exec:
-            command:
-            - /bin/bash
-            - -c
-            - |-
-              set -euo pipefail;
-              touch /tmp/prometheusconfig.hash;
-              if [[ $(find /etc/prometheus -type f | sort | xargs md5sum | md5sum) != $(cat /tmp/prometheusconfig.hash) ]]; then
-                find /etc/prometheus -type f | sort | xargs md5sum | md5sum > /tmp/prometheusconfig.hash;
-                kill -HUP 1;
-              fi
-          initialDelaySeconds: 60
-          periodSeconds: 60
-        resources:
-          requests:
-{% if openshift_prometheus_memory_requests is defined and openshift_prometheus_memory_requests is not none %}
-            memory: "{{ openshift_prometheus_memory_requests }}"
-{% endif %}
-{% if openshift_prometheus_cpu_requests is defined and openshift_prometheus_cpu_requests is not none %}
-            cpu: "{{ openshift_prometheus_cpu_requests }}"
-{% endif %}
-          limits:
-{% if openshift_prometheus_memory_limit is defined and openshift_prometheus_memory_limit is not none %}
-            memory: "{{ openshift_prometheus_memory_limit }}"
-{% endif %}
-{% if openshift_prometheus_cpu_limit is defined and openshift_prometheus_cpu_limit is not none %}
-            cpu: "{{ openshift_prometheus_cpu_limit }}"
-{% endif %}
-
-        volumeMounts:
-        - mountPath: /etc/prometheus
-          name: prometheus-config
-        - mountPath: /prometheus
-          name: prometheus-data
-
-      # Deploy alert-buffer behind oauth alerts-proxy
-      - name: alerts-proxy
-        image: "{{ openshift_prometheus_proxy_image }}"
-        imagePullPolicy: IfNotPresent
-        resources:
-          requests:
-{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
-            memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}"
-{% endif %}
-{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
-            cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}"
-{% endif %}
-          limits:
-{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
-            memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}"
-{% endif %}
-{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
-            cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
-{% endif %}
-        ports:
-        - containerPort: {{ openshift_prometheus_alerts_service_targetport }}
-          name: web
-        args:
-        - -provider=openshift
-        - -https-address=:{{ openshift_prometheus_alerts_service_targetport }}
-        - -http-address=
-        - -email-domain=*
-        - -upstream=http://localhost:9099
-        - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }}
-        - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
-        - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
-        - -tls-cert=/etc/tls/private/tls.crt
-        - -tls-key=/etc/tls/private/tls.key
-        - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
-        - -cookie-secret-file=/etc/proxy/secrets/session_secret
-        - -openshift-ca=/etc/pki/tls/cert.pem
-        - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-        - -skip-auth-regex=^/metrics
-        volumeMounts:
-        - mountPath: /etc/tls/private
-          name: alerts-tls-secret
-        - mountPath: /etc/proxy/secrets
-          name: alerts-proxy-secret
-
-      - name: alert-buffer
-        args:
-        - --storage-path=/alert-buffer/messages.db
-        image: "{{ openshift_prometheus_alertbuffer_image }}"
-        imagePullPolicy: IfNotPresent
-        resources:
-          requests:
-{% if openshift_prometheus_alertbuffer_memory_requests is defined and openshift_prometheus_alertbuffer_memory_requests is not none %}
-            memory: "{{ openshift_prometheus_alertbuffer_memory_requests }}"
-{% endif %}
-{% if openshift_prometheus_alertbuffer_cpu_requests is defined and openshift_prometheus_alertbuffer_cpu_requests is not none %}
-            cpu: "{{ openshift_prometheus_alertbuffer_cpu_requests }}"
-{% endif %}
-          limits:
-{% if openshift_prometheus_alertbuffer_memory_limit is defined and openshift_prometheus_alertbuffer_memory_limit is not none %}
-            memory: "{{ openshift_prometheus_alertbuffer_memory_limit }}"
-{% endif %}
-{% if openshift_prometheus_alertbuffer_cpu_limit is defined and openshift_prometheus_alertbuffer_cpu_limit is not none %}
-            cpu: "{{ openshift_prometheus_alertbuffer_cpu_limit }}"
-{% endif %}
-        volumeMounts:
-        - mountPath: /alert-buffer
-          name: alerts-data
-
-      # Deploy alertmanager behind oauth alertmanager-proxy
-      - name: alertmanager-proxy
-        image: "{{ openshift_prometheus_proxy_image }}"
-        imagePullPolicy: IfNotPresent
-        requests:
-{% if openshift_prometheus_oauth_proxy_memory_requests is defined and openshift_prometheus_oauth_proxy_memory_requests is not none %}
-          memory: "{{ openshift_prometheus_oauth_proxy_memory_requests }}"
-{% endif %}
-{% if openshift_prometheus_oauth_proxy_cpu_requests is defined and openshift_prometheus_oauth_proxy_cpu_requests is not none %}
-          cpu: "{{ openshift_prometheus_oauth_proxy_cpu_requests }}"
-{% endif %}
-        limits:
-{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
-          memory: "{{ openshift_prometheus_oauth_proxy_memory_limit }}"
-{% endif %}
-{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
-          cpu: "{{ openshift_prometheus_oauth_proxy_cpu_limit }}"
-{% endif %}
-        ports:
-        - containerPort: {{ openshift_prometheus_alertmanager_service_targetport }}
-          name: web
-        args:
-        - -provider=openshift
-        - -https-address=:{{ openshift_prometheus_alertmanager_service_targetport }}
-        - -http-address=
-        - -email-domain=*
-        - -upstream=http://localhost:9093
-        - -client-id=system:serviceaccount:{{ namespace }}:{{ openshift_prometheus_service_name }}
-        - -openshift-ca=/etc/pki/tls/cert.pem
-        - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-        - '-openshift-sar={"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}'
-        - '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get", "resourceName": "{{ namespace }}", "namespace": "{{ namespace }}"}}'
-        - -tls-cert=/etc/tls/private/tls.crt
-        - -tls-key=/etc/tls/private/tls.key
-        - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
-        - -cookie-secret-file=/etc/proxy/secrets/session_secret
-        - -skip-auth-regex=^/metrics
-        volumeMounts:
-        - mountPath: /etc/tls/private
-          name: alertmanager-tls-secret
-        - mountPath: /etc/proxy/secrets
-          name: alertmanager-proxy-secret
-
-      - name: alertmanager
-        args:
-        - --config.file=/etc/alertmanager/alertmanager.yml
-        - --web.external-url=https://{{ openshift_prometheus_alertmanager_hostname }}
-        image: "{{ openshift_prometheus_alertmanager_image }}"
-        imagePullPolicy: IfNotPresent
-        resources:
-          requests:
-{% if openshift_prometheus_alertmanager_memory_requests is defined and openshift_prometheus_alertmanager_memory_requests is not none %}
-            memory: "{{ openshift_prometheus_alertmanager_memory_requests }}"
-{% endif %}
-{% if openshift_prometheus_alertmanager_cpu_requests is defined and openshift_prometheus_alertmanager_cpu_requests is not none %}
-            cpu: "{{ openshift_prometheus_alertmanager_cpu_requests }}"
-{% endif %}
-          limits:
-{% if openshift_prometheus_alertmanager_memory_limit is defined and openshift_prometheus_alertmanager_memory_limit is not none %}
-            memory: "{{ openshift_prometheus_alertmanager_memory_limit }}"
-{% endif %}
-{% if openshift_prometheus_alertmanager_cpu_limit is defined and openshift_prometheus_alertmanager_cpu_limit is not none %}
-            cpu: "{{ openshift_prometheus_alertmanager_cpu_limit }}"
-{% endif %}
-        volumeMounts:
-        - mountPath: /etc/alertmanager
-          name: alertmanager-config
-        - mountPath: /alertmanager
-          name: alertmanager-data
-
-      restartPolicy: Always
-      volumes:
-
-      - name: prometheus-config
-        configMap:
-          defaultMode: 420
-          name: prometheus
-      - name: prometheus-proxy-secret
-        secret:
-          secretName: prometheus-proxy
-      - name: prometheus-tls-secret
-        secret:
-          secretName: prometheus-tls
-      - name: prometheus-data
-{% if openshift_prometheus_storage_type == 'pvc' %}
-        persistentVolumeClaim:
-          claimName: {{ openshift_prometheus_pvc_name }}
-{% else %}
-        emptydir: {}
-{% endif %}
-      - name: alertmanager-config
-        configMap:
-          defaultMode: 420
-          name: alertmanager
-      - name: alertmanager-proxy-secret
-        secret:
-          secretName: alertmanager-proxy
-      - name: alertmanager-tls-secret
-        secret:
-          secretName: alertmanager-tls
-      - name: alerts-tls-secret
-        secret:
-          secretName: alerts-tls
-      - name: alerts-proxy-secret
-        secret:
-          secretName: alerts-proxy
-      - name: alertmanager-data
-{% if openshift_prometheus_alertmanager_storage_type == 'pvc' %}
-        persistentVolumeClaim:
-          claimName: {{ openshift_prometheus_alertmanager_pvc_name }}
-{% else %}
-        emptydir: {}
-{% endif %}
-      - name: alerts-data
-{% if openshift_prometheus_alertbuffer_storage_type == 'pvc' %}
-        persistentVolumeClaim:
-          claimName: {{ openshift_prometheus_alertbuffer_pvc_name }}
-{% else %}
-        emptydir: {}
-{% endif %}

+ 0 - 4
roles/openshift_prometheus/templates/prometheus.rules.j2

@@ -1,4 +0,0 @@
-groups:
-- name: example-rules
-  interval: 30s # defaults to global interval
-  rules:

+ 0 - 323
roles/openshift_prometheus/templates/prometheus.yml.j2

@@ -1,323 +0,0 @@
-rule_files:
-  - '*.rules'
-
-# A scrape configuration for running Prometheus on a Kubernetes cluster.
-# This uses separate scrape configs for cluster components (i.e. API server, node)
-# and services to allow each to use different authentication configs.
-#
-# Kubernetes labels will be added as Prometheus labels on metrics via the
-# `labelmap` relabeling action.
-
-# Scrape config for API servers.
-#
-# Kubernetes exposes API servers as endpoints to the default/kubernetes
-# service so this uses `endpoints` role and uses relabelling to only keep
-# the endpoints associated with the default/kubernetes service using the
-# default named port `https`. This works for single API server deployments as
-# well as HA API server deployments.
-scrape_configs:
-- job_name: 'kubernetes-apiservers'
-
-  kubernetes_sd_configs:
-  - role: endpoints
-    namespaces:
-      names:
-      - default
-
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  # Keep only the default/kubernetes service endpoints for the https port. This
-  # will add targets for each API server which Kubernetes adds an endpoint to
-  # the default/kubernetes service.
-  relabel_configs:
-  - source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
-    action: keep
-    regex: kubernetes;https
-
-# Scrape config for controllers.
-#
-# Each master node exposes a /metrics endpoint on :8444 that contains operational metrics for
-# the controllers.
-#
-- job_name: 'kubernetes-controllers'
-
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  kubernetes_sd_configs:
-  - role: endpoints
-    namespaces:
-      names:
-      - default
-
-  # Keep only the default/kubernetes service endpoints for the https port, and then
-  # set the port to 8444. This is the default configuration for the controllers on OpenShift
-  # masters.
-  relabel_configs:
-  - source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
-    action: keep
-    regex: kubernetes;https
-  - source_labels: [__address__]
-    action: replace
-    target_label: __address__
-    regex: (.+)(?::\d+)
-    replacement: $1:8444
-
-# Scrape config for nodes.
-#
-# Each node exposes a /metrics endpoint that contains operational metrics for
-# the Kubelet and other components.
-- job_name: 'kubernetes-nodes'
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-  kubernetes_sd_configs:
-  - role: node
-  # Drop a very high cardinality metric that is incorrect in 3.7. It will be
-  # fixed in 3.9.
-  metric_relabel_configs:
-  - source_labels: [__name__]
-    action: drop
-    regex: 'openshift_sdn_pod_(setup|teardown)_latency(.*)'
-  relabel_configs:
-  - action: labelmap
-    regex: __meta_kubernetes_node_label_(.+)
-
-# Scrape config for cAdvisor.
-#
-# Beginning in Kube 1.7, each node exposes a /metrics/cadvisor endpoint that
-# reports container metrics for each running pod. Scrape those by default.
-- job_name: 'kubernetes-cadvisor'
-
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  metrics_path: /metrics/cadvisor
-
-  kubernetes_sd_configs:
-  - role: node
-
-  # Exclude a set of high cardinality metrics that can contribute to significant
-  # memory use in large clusters. These can be selectively enabled as necessary
-  # for medium or small clusters.
-  metric_relabel_configs:
-  - source_labels: [__name__]
-    action: drop
-    regex: 'container_(cpu_user_seconds_total|cpu_cfs_periods_total|memory_usage_bytes|memory_swap|memory_cache|last_seen|fs_(read_seconds_total|write_seconds_total|sector_(.*)|io_(.*)|reads_merged_total|writes_merged_total)|tasks_state|memory_failcnt|memory_failures_total|spec_memory_swap_limit_bytes|fs_(.*)_bytes_total|spec_(.*))'
-
-  relabel_configs:
-  - action: labelmap
-    regex: __meta_kubernetes_node_label_(.+)
-
-# Scrape config for service endpoints.
-#
-# The relabeling allows the actual service scrape endpoint to be configured
-# via the following annotations:
-#
-# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
-# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
-# to set this to `https` & most likely set the `tls_config` of the scrape config.
-# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
-# * `prometheus.io/port`: If the metrics are exposed on a different port to the
-# service then set this appropriately.
-- job_name: 'kubernetes-service-endpoints'
-
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-    # TODO: this should be per target
-    insecure_skip_verify: true
-
-  kubernetes_sd_configs:
-  - role: endpoints
-
-  relabel_configs:
-    # only scrape infrastructure components
-    - source_labels: [__meta_kubernetes_namespace]
-      action: keep
-      regex: 'default|metrics|kube-.+|openshift|openshift-.+'
-    # drop logging components managed by other scrape targets
-    - source_labels: [__meta_kubernetes_namespace]
-      action: drop
-      regex: '{{ openshift_logging_namespace | default('openshift-logging') }}'
-    # drop infrastructure components managed by other scrape targets
-    - source_labels: [__meta_kubernetes_service_name]
-      action: drop
-      regex: 'prometheus-node-exporter'
-    # only those that have requested scraping
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
-      action: keep
-      regex: true
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
-      action: replace
-      target_label: __scheme__
-      regex: (https?)
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
-      action: replace
-      target_label: __metrics_path__
-      regex: (.+)
-    - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
-      action: replace
-      target_label: __address__
-      regex: (.+)(?::\d+);(\d+)
-      replacement: $1:$2
-    - action: labelmap
-      regex: __meta_kubernetes_service_label_(.+)
-    - source_labels: [__meta_kubernetes_namespace]
-      action: replace
-      target_label: kubernetes_namespace
-    - source_labels: [__meta_kubernetes_service_name]
-      action: replace
-      target_label: kubernetes_name
-
-# Scrape logging endpoints.
-#
-# The relabeling allows the actual service scrape endpoint to be configured
-# via the following annotations:
-#
-# * `prometheus.io/scrape`: Only scrape services that have a value of `true`
-# * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
-# to set this to `https` & most likely set the `tls_config` of the scrape config.
-# * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
-# * `prometheus.io/port`: If the metrics are exposed on a different port to the
-# service then set this appropriately.
-- job_name: 'kubernetes-logging-service-endpoints'
-
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-    insecure_skip_verify: true
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  kubernetes_sd_configs:
-  - role: endpoints
-    namespaces:
-      names:
-      - '{{ openshift_logging_namespace | default('openshift-logging') }}'
-
-  relabel_configs:
-    # drop infrastructure components managed by other scrape targets
-    - source_labels: [__meta_kubernetes_service_name]
-      action: drop
-      regex: 'prometheus-node-exporter'
-    # only those that have requested scraping
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
-      action: keep
-      regex: true
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
-      action: replace
-      target_label: __scheme__
-      regex: (https?)
-    - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
-      action: replace
-      target_label: __metrics_path__
-      regex: (.+)
-    - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
-      action: replace
-      target_label: __address__
-      regex: (.+)(?::\d+);(\d+)
-      replacement: $1:$2
-    - action: labelmap
-      regex: __meta_kubernetes_service_label_(.+)
-    - source_labels: [__meta_kubernetes_namespace]
-      action: replace
-      target_label: kubernetes_namespace
-    - source_labels: [__meta_kubernetes_service_name]
-      action: replace
-      target_label: kubernetes_name
-
-# Scrape config for node-exporter, which is expected to be running on port 9102.
-- job_name: 'kubernetes-nodes-exporter'
-
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-
-  kubernetes_sd_configs:
-  - role: node
-
-  metric_relabel_configs:
-  - source_labels: [__name__]
-    action: drop
-    regex: 'node_cpu|node_(disk|scrape_collector)_.+'
-  # preserve a subset of the network, netstat, vmstat, and filesystem series
-  - source_labels: [__name__]
-    action: replace
-    regex: '(node_(netstat_Ip_.+|vmstat_(nr|thp)_.+|filesystem_(free|size|device_error)|network_(transmit|receive)_(drop|errs)))'
-    target_label: __name__
-    replacement: renamed_$1
-  - source_labels: [__name__]
-    action: drop
-    regex: 'node_(netstat|vmstat|filesystem|network)_.+'
-  - source_labels: [__name__]
-    action: replace
-    regex: 'renamed_(.+)'
-    target_label: __name__
-    replacement: $1
-  # drop any partial expensive series
-  - source_labels: [__name__, device]
-    action: drop
-    regex: 'node_network_.+;veth.+'
-  - source_labels: [__name__, mountpoint]
-    action: drop
-    regex: 'node_filesystem_(free|size|device_error);([^/].*|/.+)'
-
-  relabel_configs:
-  - source_labels: [__address__]
-    regex: '(.*):10250'
-    replacement: '${1}:9102'
-    target_label: __address__
-  - source_labels: [__meta_kubernetes_node_label_kubernetes_io_hostname]
-    target_label: __instance__
-  - action: labelmap
-    regex: __meta_kubernetes_node_label_(.+)
-
-# Scrape config for the template service broker
-- job_name: 'openshift-template-service-broker'
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
-    server_name: apiserver.openshift-template-service-broker.svc
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  kubernetes_sd_configs:
-  - role: endpoints
-    namespaces:
-      names:
-      - openshift-template-service-broker
-
-  relabel_configs:
-  - source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
-    action: keep
-    regex: apiserver;https
-
-# Scrape config for the router
-- job_name: 'openshift-router'
-  scheme: https
-  tls_config:
-    ca_file: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
-    server_name: router.default.svc
-  bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-  kubernetes_sd_configs:
-  - role: endpoints
-    namespaces:
-      names:
-      - default
-
-  relabel_configs:
-  - source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
-    action: keep
-    regex: router;1936-tcp
-
-alerting:
-  alertmanagers:
-  - scheme: http
-    static_configs:
-    - targets:
-      - "localhost:9093"

+ 0 - 2
roles/openshift_prometheus/tests/inventory

@@ -1,2 +0,0 @@
-localhost
-

+ 0 - 5
roles/openshift_prometheus/tests/test.yaml

@@ -1,5 +0,0 @@
----
-- hosts: localhost
-  remote_user: root
-  roles:
-    - openshift_prometheus

+ 0 - 2
roles/openshift_prometheus/vars/main.yml

@@ -1,2 +0,0 @@
----
-__node_exporter_template_file: "node-exporter-template.yaml"

+ 10 - 0
roles/openshift_sanitize_inventory/tasks/unsupported.yml

@@ -48,3 +48,13 @@
     msg: |-
       Configuring a value for openshift_hosted_registry_storage_kind=glusterfs without a any glusterfs option is not allowed.
       Specify either openshift_hosted_registry_storage_glusterfs_ips variable or glusterfs, glusterfs_registry host groups.
+
+#if the user is trying to install the deprecated prometheus stack
+- name: Check for deprecated prometheus/grafana install
+  when:
+  - (openshift_hosted_prometheus_deploy | default(false) | bool) or
+    (openshift_hosted_grafana_deploy | default(false) | bool)
+  fail:
+    msg: |-
+      The inventory variables 'openshift_hosted_prometheus_deploy' and 'openshift_hosted_grafana_deploy' are no longer used.
+      See roles/openshift_cluster_monitoring_operator/README.md for information about installing Prometheus and related monitoring components.

+ 0 - 1
test/ci/inventory/group_vars/OSEv3/vars.yml

@@ -12,7 +12,6 @@ openshift_metrics_install_metrics: false
 openshift_metrics_install_logging: false
 openshift_logging_install_logging: false
 openshift_management_install_management: false
-openshift_hosted_prometheus_deploy: false
 template_service_broker_install: false
 ansible_service_broker_install: false
 openshift_enable_service_catalog: false