Browse Source

Management Cleanup and Provider Integration

* Add container provider integration
* General cleanup
* Poll until service fully starts
* Add notes on multiple-provider additions
Tim Bielawa 7 years ago
parent
commit
f3741a0509

+ 69 - 1
filter_plugins/oo_filters.py

@@ -1125,6 +1125,73 @@ of items as ['region=infra', 'zone=primary']
     return selectors
 
 
+def oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
+    """Parse the Service Account Secrets list, `sa_secrets`, (as from
+oc_serviceaccount_secret:state=list) and return the name of the secret
+containing the `secret_hint` string. For example, by default this will
+return the name of the secret holding the SA bearer token.
+
+Only provide the 'results' object to this filter. This filter expects
+to receive a list like this:
+
+    [
+        {
+            "name": "management-admin-dockercfg-p31s2"
+        },
+        {
+            "name": "management-admin-token-bnqsh"
+        }
+    ]
+
+
+Returns:
+
+* `secret_name` [string] - The name of the secret matching the
+  `secret_hint` parameter. By default this is the secret holding the
+  SA's bearer token.
+
+Example playbook usage:
+
+Register a return value from oc_serviceaccount_secret with and pass
+that result to this filter plugin.
+
+    - name: Get all SA Secrets
+      oc_serviceaccount_secret:
+        state: list
+        service_account: management-admin
+        namespace: management-infra
+      register: sa
+
+    - name: Save the SA bearer token secret name
+      set_fact:
+        management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+    - name: Get the SA bearer token value
+      oc_secret:
+        state: list
+        name: "{{ management_token }}"
+        namespace: management-infra
+        decode: true
+      register: sa_secret
+
+    - name: Print the bearer token value
+      debug:
+        var: sa_secret.results.decoded.token
+
+    """
+    secret_name = None
+
+    for secret in sa_secrets:
+        # each secret is a hash
+        if secret['name'].find(secret_hint) == -1:
+            continue
+        else:
+            secret_name = secret['name']
+            break
+
+    return secret_name
+
+
 class FilterModule(object):
     """ Custom ansible filter mapping """
 
@@ -1167,5 +1234,6 @@ class FilterModule(object):
             "to_padded_yaml": to_padded_yaml,
             "oo_random_word": oo_random_word,
             "oo_contains_rule": oo_contains_rule,
-            "oo_selector_to_string_list": oo_selector_to_string_list
+            "oo_selector_to_string_list": oo_selector_to_string_list,
+            "oo_filter_sa_secrets": oo_filter_sa_secrets,
         }

+ 31 - 19
inventory/byo/hosts.example

@@ -968,25 +968,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
 # openshift_upgrade_post_storage_migration_enabled=true
 # openshift_upgrade_post_storage_migration_fatal=false
 
-# host group for masters
-[masters]
-ose3-master[1:3]-ansible.test.example.com
-
-[etcd]
-ose3-etcd[1:3]-ansible.test.example.com
-
-# NOTE: Containerized load balancer hosts are not yet supported, if using a global
-# containerized=true host variable we must set to false.
-[lb]
-ose3-lb-ansible.test.example.com containerized=false
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-ose3-master[1:3]-ansible.test.example.com
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
-
+######################################################################
 # CloudForms/ManageIQ (CFME/MIQ) Configuration
 
 # See the readme for full descriptions and getting started
@@ -1036,6 +1018,17 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima
 # setting this variable. Useful for testing specific task files.
 #openshift_management_storage_nfs_local_hostname: false
 
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+#openshift_management_username: admin
+#openshift_management_password: smartvm
+
 # A hash of parameters you want to override or set in the
 # miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
 # your inventory file as a simple hash. Acceptable values are defined
@@ -1044,3 +1037,22 @@ ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'prima
 #
 # openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
 #openshift_management_template_parameters: {}
+
+# host group for masters
+[masters]
+ose3-master[1:3]-ansible.test.example.com
+
+[etcd]
+ose3-etcd[1:3]-ansible.test.example.com
+
+# NOTE: Containerized load balancer hosts are not yet supported, if using a global
+# containerized=true host variable we must set to false.
+[lb]
+ose3-lb-ansible.test.example.com containerized=false
+
+# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
+# However, in order to ensure that your masters are not burdened with running pods you should
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
+[nodes]
+ose3-master[1:3]-ansible.test.example.com
+ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"

+ 6 - 0
playbooks/byo/openshift-management/add_container_provider.yml

@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-management/add_container_provider.yml

+ 0 - 2
playbooks/byo/openshift-management/uninstall.yml

@@ -1,4 +1,2 @@
 ---
-# - include: ../openshift-cluster/initialize_groups.yml
-
 - include: ../../common/openshift-management/uninstall.yml

+ 8 - 0
playbooks/common/openshift-management/add_container_provider.yml

@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+  hosts: oo_first_master
+  tasks:
+  - name: Run the Management Integration Tasks
+    include_role:
+      name: openshift_management
+      tasks_from: add_container_provider

+ 1 - 1
playbooks/common/openshift-management/uninstall.yml

@@ -1,6 +1,6 @@
 ---
 - name: Uninstall CFME
-  hosts: masters
+  hosts: masters[0]
   tasks:
   - name: Run the CFME Uninstall Role Tasks
     include_role:

File diff suppressed because it is too large
+ 139 - 28
roles/openshift_management/README.md


+ 14 - 0
roles/openshift_management/defaults/main.yml

@@ -77,6 +77,20 @@ openshift_management_storage_nfs_base_dir: /exports
 openshift_management_storage_nfs_local_hostname: false
 
 ######################################################################
+# DEFAULT ACCOUNT INFORMATION
+######################################################################
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+openshift_management_username: admin
+openshift_management_password: smartvm
+
+######################################################################
 # SCAFFOLDING - These are parameters we pre-seed that a user may or
 # may not set later
 ######################################################################

+ 22 - 0
roles/openshift_management/files/examples/container_providers.yml

@@ -0,0 +1,22 @@
+---
+container_providers:
+  - connection_configurations:
+      - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+        endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+    hostname: "OCP/Origin cluster hostname (providing API access)"
+    name: openshift-management
+    port: 8443
+    type: "ManageIQ::Providers::Openshift::ContainerManager"
+# Copy and update for as many OCP or Origin providers as you want to
+# add to your management service
+  # - connection_configurations:
+  #     - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+  #       endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+  #   hostname: "OCP/Origin cluster hostname (providing API access)"
+  #   name: openshift-management
+  #   port: 8443
+  #   type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+  hostname: "Management server hostname (providing API access)"
+  user: admin
+  password: smartvm

+ 65 - 0
roles/openshift_management/tasks/add_container_provider.yml

@@ -0,0 +1,65 @@
+---
+- name: Ensure lib_openshift modules are available
+  include_role:
+    role: lib_openshift
+
+- name: Ensure OpenShift facts module is available
+  include_role:
+    role: openshift_facts
+
+- name: Ensure OpenShift facts are loaded
+  openshift_facts:
+
+- name: Ensure the management SA Secrets are read
+  oc_serviceaccount_secret:
+    state: list
+    service_account: management-admin
+    namespace: management-infra
+  register: sa
+
+- name: Ensure the management SA bearer token is identified
+  set_fact:
+    management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+  oc_secret:
+    state: list
+    name: "{{ management_token }}"
+    namespace: management-infra
+    decode: true
+  no_log: True
+  register: sa_secret
+
+- name: Ensure the SA bearer token value is saved
+  set_fact:
+    management_bearer_token: "{{ sa_secret.results.decoded.token }}"
+
+- name: Ensure we have the public route to the management service
+  oc_route:
+    state: list
+    name: httpd
+    namespace: openshift-management
+  register: route
+
+- name: Ensure the management service route is saved
+  set_fact:
+    management_route: "{{ route.results.0.spec.host }}"
+
+- name: Ensure this cluster is a container provider
+  uri:
+    url: "https://{{ management_route }}/api/providers"
+    body_format: json
+    method: POST
+    user: "{{ openshift_management_username }}"
+    password: "{{ openshift_management_password }}"
+    validate_certs: no
+    # Docs on formatting the BODY of the POST request:
+    # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+    body:
+      connection_configurations:
+        - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken}
+          endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+      hostname: "{{ openshift.master.cluster_public_hostname }}"
+      name: "{{ openshift_management_project }}"
+      port: "{{ openshift.master.api_port }}"
+      type: "ManageIQ::Providers::Openshift::ContainerManager"

+ 27 - 0
roles/openshift_management/tasks/add_many_container_providers.yml

@@ -0,0 +1,27 @@
+---
+- hosts: "{{ groups['masters'][0] }}"
+  tasks:
+  - name: Include providers/management configuration
+    include_vars:
+      file: "{{ openshift_management_many_container_providers_config }}"
+
+  - name: Ensure this cluster is a container provider
+    uri:
+      url: "https://{{ management_server['hostname'] }}/api/providers"
+      body_format: json
+      method: POST
+      user: "{{ management_server['user'] }}"
+      password: "{{ management_server['password'] }}"
+      validate_certs: no
+      # Docs on formatting the BODY of the POST request:
+      # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+      body: "{{ item }}"
+    failed_when: false
+    with_items: "{{ container_providers }}"
+    register: results
+
+  - name: Ensure failed additions are reported for each container provider
+    debug:
+      msg: |
+        FLOOP {{ item.item.hostname }}
+    with_items: "{{ results.results }}"

+ 19 - 6
roles/openshift_management/tasks/main.yml

@@ -2,23 +2,29 @@
 ######################################################################)
 # Users, projects, and privileges
 
-- name: Run pre-install CFME validation checks
+- name: Run pre-install Management validation checks
   include: validate.yml
 
-- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists"
+# This creates a service account allowing Container Provider
+# integration (managing OCP/Origin via MIQ/Management)
+- name: Enable Container Provider Integration
+  include_role:
+    role: openshift_manageiq
+
+- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"
   oc_project:
     state: present
     name: "{{ openshift_management_project }}"
     display_name: "{{ openshift_management_project_description }}"
 
-- name: Create and Authorize CFME Accounts
+- name: Create and Authorize Management Accounts
   include: accounts.yml
 
 ######################################################################
 # STORAGE - Initialize basic storage class
 #---------------------------------------------------------------------
 # * nfs - set up NFS shares on the first master for a proof of concept
-- name: Create required NFS exports for CFME app storage
+- name: Create required NFS exports for Management app storage
   include: storage/nfs.yml
   when: openshift_management_storage_class == 'nfs'
 
@@ -45,7 +51,7 @@
 
 ######################################################################
 # APPLICATION TEMPLATE
-- name: Install the CFME app and PV templates
+- name: Install the Management app and PV templates
   include: template.yml
 
 ######################################################################
@@ -71,9 +77,16 @@
   when:
     - openshift_management_app_template in ['miq-template', 'cfme-template']
 
-- name: Ensure the CFME App is created
+- name: Ensure the Management App is created
   oc_process:
     namespace: "{{ openshift_management_project }}"
     template_name: "{{ openshift_management_template_name }}"
     create: True
     params: "{{ openshift_management_template_parameters }}"
+
+- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max
+  command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}"
+  register: app_seeding_logs
+  until: app_seeding_logs.stdout.find('Server starting complete') != -1
+  delay: 30
+  retries: 20

+ 4 - 4
roles/openshift_management/tasks/storage/create_nfs_pvs.yml

@@ -26,7 +26,7 @@
       when:
         - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined
 
-- name: Check if the CFME App PV has been created
+- name: Check if the Management App PV has been created
   oc_obj:
     namespace: "{{ openshift_management_project }}"
     state: list
@@ -34,7 +34,7 @@
     name: "{{ openshift_management_flavor_short }}-app"
   register: miq_app_pv_check
 
-- name: Check if the CFME DB PV has been created
+- name: Check if the Management DB PV has been created
   oc_obj:
     namespace: "{{ openshift_management_project }}"
     state: list
@@ -44,7 +44,7 @@
   when:
     - openshift_management_app_template in ['miq-template', 'cfme-template']
 
-- name: Ensure the CFME App PV is created
+- name: Ensure the Management App PV is created
   oc_process:
     namespace: "{{ openshift_management_project }}"
     template_name: "{{ openshift_management_flavor }}-app-pv"
@@ -55,7 +55,7 @@
       NFS_HOST: "{{ openshift_management_nfs_server }}"
   when: miq_app_pv_check.results.results == [{}]
 
-- name: Ensure the CFME DB PV is created
+- name: Ensure the Management DB PV is created
   oc_process:
     namespace: "{{ openshift_management_project }}"
     template_name: "{{ openshift_management_flavor }}-db-pv"

+ 13 - 13
roles/openshift_management/tasks/template.yml

@@ -15,7 +15,7 @@
 # STANDARD PODIFIED DATABASE TEMPLATE
 - when: openshift_management_app_template in ['miq-template', 'cfme-template']
   block:
-  - name: Check if the CFME Server template has been created already
+  - name: Check if the Management Server template has been created already
     oc_obj:
       namespace: "{{ openshift_management_project }}"
       state: list
@@ -25,12 +25,12 @@
 
   - when: miq_server_check.results.results == [{}]
     block:
-    - name: Copy over CFME Server template
+    - name: Copy over Management Server template
       copy:
         src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"
         dest: "{{ template_dir }}/"
 
-    - name: Ensure CFME Server Template is created
+    - name: Ensure Management Server Template is created
       oc_obj:
         namespace: "{{ openshift_management_project }}"
         name: "{{ openshift_management_flavor }}"
@@ -41,9 +41,9 @@
 
 ######################################################################
 # EXTERNAL DATABASE TEMPLATE
-- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template']
+- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
   block:
-  - name: Check if the CFME Ext-DB Server template has been created already
+  - name: Check if the Management Ext-DB Server template has been created already
     oc_obj:
       namespace: "{{ openshift_management_project }}"
       state: list
@@ -53,12 +53,12 @@
 
   - when: miq_ext_db_server_check.results.results == [{}]
     block:
-    - name: Copy over CFME Ext-DB Server template
+    - name: Copy over Management Ext-DB Server template
       copy:
         src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"
         dest: "{{ template_dir }}/"
 
-    - name: Ensure CFME Ext-DB Server Template is created
+    - name: Ensure Management Ext-DB Server Template is created
       oc_obj:
         namespace: "{{ openshift_management_project }}"
         name: "{{ openshift_management_flavor }}-ext-db"
@@ -74,7 +74,7 @@
 # Begin conditional PV template creations
 
 # Required for the application server
-- name: Check if the CFME App PV template has been created already
+- name: Check if the Management App PV template has been created already
   oc_obj:
     namespace: "{{ openshift_management_project }}"
     state: list
@@ -84,12 +84,12 @@
 
 - when: miq_app_pv_check.results.results == [{}]
   block:
-  - name: Copy over CFME App PV template
+  - name: Copy over Management App PV template
     copy:
       src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
       dest: "{{ template_dir }}/"
 
-  - name: Ensure CFME App PV Template is created
+  - name: Ensure Management App PV Template is created
     oc_obj:
       namespace: "{{ openshift_management_project }}"
       name: "{{ openshift_management_flavor }}-app-pv"
@@ -103,7 +103,7 @@
 # Required for database if the installation is fully podified
 - when: openshift_management_app_template in ['miq-template', 'cfme-template']
   block:
-  - name: Check if the CFME DB PV template has been created already
+  - name: Check if the Management DB PV template has been created already
     oc_obj:
       namespace: "{{ openshift_management_project }}"
       state: list
@@ -113,12 +113,12 @@
 
   - when: miq_db_pv_check.results.results == [{}]
     block:
-    - name: Copy over CFME DB PV template
+    - name: Copy over Management DB PV template
       copy:
         src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
         dest: "{{ template_dir }}/"
 
-    - name: Ensure CFME DB PV Template is created
+    - name: Ensure Management DB PV Template is created
       oc_obj:
         namespace: "{{ openshift_management_project }}"
         name: "{{ openshift_management_flavor }}-db-pv"

+ 1 - 1
roles/openshift_nfs/tasks/create_export.yml

@@ -12,7 +12,7 @@
 #   l_nfs_export_name: Name of sub-directory of the export
 #   l_nfs_options: Mount Options
 
-- name: Ensure CFME App NFS export directory exists
+- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists"
   file:
     path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"
     state: directory